[ovs-dev] [PATCH v2] windows/lib: Fix Windows C++ compilation issues on common headers

Sairam Venugopal vsairam at vmware.com
Thu Dec 14 00:13:20 UTC 2017


Found when compiling the code with C++ binaries. Most of the issues are
due to missing explicit cast.

Changes in PADDED_MEMBERS* is because MSVC does not allow to re-define
unnamed structure in union. Thus, this fix defines the struct outside of
the anonymous union in order to calculate the padded size.

Signed-off-by: Sairam Venugopal <vsairam at vmware.com>
Signed-off-by: Shireesh Kumar Singh <shireeshkum at vmware.com>
Co-authored-by: Shireesh Kumar Singh <shireeshkum at vmware.com>
---
 AUTHORS.rst                |  1 +
 include/openvswitch/util.h | 14 +++++------
 lib/netlink.h              |  3 ++-
 lib/ovs-atomic-msvc.h      | 59 ++++++++++++++++++++++++++++------------------
 lib/ovs-thread.h           |  4 ++--
 lib/packets.h              | 15 ++++++++----
 lib/socket-util.h          |  4 ++--
 lib/unaligned.h            | 21 ++++++++++++++---
 8 files changed, 78 insertions(+), 43 deletions(-)

diff --git a/AUTHORS.rst b/AUTHORS.rst
index e17daf0..4370adc 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -295,6 +295,7 @@ Shan Wei                        davidshan at tencent.com
 Shashank Ram                    rams at vmware.com
 Shashwat Srivastava             shashwat.srivastava at tcs.com
 Shih-Hao Li                     shli at nicira.com
+Shireesh Kumar Singh            shireeshkum at vmware.com
 Shu Shen                        shu.shen at radisys.com
 Simon Horman                    horms at verge.net.au
 Simon Horman                    simon.horman at netronome.com
diff --git a/include/openvswitch/util.h b/include/openvswitch/util.h
index c3e60d5..ad1b184 100644
--- a/include/openvswitch/util.h
+++ b/include/openvswitch/util.h
@@ -185,11 +185,11 @@ OVS_NO_RETURN void ovs_assert_failure(const char *, const char *, const char *);
 /* C++ doesn't allow a type declaration within "sizeof", but it does support
  * scoping for member names, so we can just declare a second member, with a
  * name and the same type, and then use its size. */
-#define PADDED_MEMBERS(UNIT, MEMBERS)                           \
-    union {                                                     \
-        struct { MEMBERS };                                     \
-        struct { MEMBERS } named_member__;                      \
-        uint8_t PAD_ID[ROUND_UP(sizeof named_member__, UNIT)];  \
+#define PADDED_MEMBERS(UNIT, MEMBERS)                                       \
+    struct named_member__ { MEMBERS };                                      \
+    union {                                                                 \
+        struct { MEMBERS };                                                 \
+        uint8_t PAD_ID[ROUND_UP(sizeof(struct named_member__), UNIT)];      \
     }
 #endif
 
@@ -233,11 +233,11 @@ OVS_NO_RETURN void ovs_assert_failure(const char *, const char *, const char *);
     }
 #else
 #define PADDED_MEMBERS_CACHELINE_MARKER(UNIT, CACHELINE, MEMBERS)           \
+    struct struct_##CACHELINE { MEMBERS };                                  \
     union {                                                                 \
         OVS_CACHE_LINE_MARKER CACHELINE;                                    \
         struct { MEMBERS };                                                 \
-        struct { MEMBERS } named_member_##CACHELINE;                        \
-        uint8_t PAD_ID[ROUND_UP(sizeof named_member_##CACHELINE, UNIT)];    \
+        uint8_t PAD_ID[ROUND_UP(sizeof(struct struct_##CACHELINE), UNIT)];  \
     }
 #endif
 
diff --git a/lib/netlink.h b/lib/netlink.h
index 6dfac27..e4cb2f7 100644
--- a/lib/netlink.h
+++ b/lib/netlink.h
@@ -153,7 +153,8 @@ enum nl_attr_type
 static inline struct nlattr *
 nl_attr_next(const struct nlattr *nla)
 {
-    return (void *) ((uint8_t *) nla + NLA_ALIGN(nla->nla_len));
+    return ALIGNED_CAST(struct nlattr *,
+                        ((uint8_t *) nla + NLA_ALIGN(nla->nla_len)));
 }
 
 static inline bool
diff --git a/lib/ovs-atomic-msvc.h b/lib/ovs-atomic-msvc.h
index 81f7682..6727d2c 100644
--- a/lib/ovs-atomic-msvc.h
+++ b/lib/ovs-atomic-msvc.h
@@ -98,8 +98,8 @@ atomic_signal_fence(memory_order order)
 
 #define atomic_store32(DST, SRC, ORDER)                                 \
     if (ORDER == memory_order_seq_cst) {                                \
-        InterlockedExchange((int32_t volatile *) (DST),                 \
-                               (int32_t) (SRC));                        \
+        InterlockedExchange((long volatile *) (DST),                    \
+                               (long) (SRC));                           \
     } else {                                                            \
         *(DST) = (SRC);                                                 \
     }
@@ -128,13 +128,18 @@ atomic_signal_fence(memory_order order)
     atomic_storeX(64, DST, SRC, ORDER)
 #endif
 
-/* Used for 8 and 16 bit variations. */
-#define atomic_storeX(X, DST, SRC, ORDER)                               \
-    if (ORDER == memory_order_seq_cst) {                                \
-        InterlockedExchange##X((int##X##_t volatile *) (DST),           \
-                               (int##X##_t) (SRC));                     \
-    } else {                                                            \
-        *(DST) = (SRC);                                                 \
+#define atomic_store8(DST, SRC, ORDER)                                     \
+    if (ORDER == memory_order_seq_cst) {                                   \
+        InterlockedExchange8((char volatile *) (DST), (char) (SRC));       \
+    } else {                                                               \
+        *(DST) = (SRC);                                                    \
+    }
+
+#define atomic_store16(DST, SRC, ORDER)                                    \
+    if (ORDER == memory_order_seq_cst) {                                   \
+        InterlockedExchange16((short volatile *) (DST), (short) (SRC));    \
+    } else {                                                               \
+        *(DST) = (SRC);                                                    \
     }
 
 #define atomic_store(DST, SRC)                               \
@@ -142,9 +147,9 @@ atomic_signal_fence(memory_order order)
 
 #define atomic_store_explicit(DST, SRC, ORDER)                           \
     if (sizeof *(DST) == 1) {                                            \
-        atomic_storeX(8, DST, SRC, ORDER)                                \
+        atomic_store8(DST, SRC, ORDER)                                   \
     } else if (sizeof *(DST) == 2) {                                     \
-        atomic_storeX(16, DST, SRC, ORDER)                               \
+        atomic_store16( DST, SRC, ORDER)                                 \
     } else if (sizeof *(DST) == 4) {                                     \
         atomic_store32(DST, SRC, ORDER)                                  \
     } else if (sizeof *(DST) == 8) {                                     \
@@ -209,27 +214,33 @@ atomic_signal_fence(memory_order order)
 
 /* Arithmetic addition calls. */
 
-#define atomic_add32(RMW, ARG, ORIG, ORDER)                        \
-    *(ORIG) = InterlockedExchangeAdd((int32_t volatile *) (RMW),   \
-                                      (int32_t) (ARG));
+#define atomic_add8(RMW, ARG, ORIG, ORDER)                        \
+    *(ORIG) = _InterlockedExchangeAdd8((char volatile *) (RMW),   \
+                                      (char) (ARG));
 
-/* For 8, 16 and 64 bit variations. */
-#define atomic_add_generic(X, RMW, ARG, ORIG, ORDER)                        \
-    *(ORIG) = _InterlockedExchangeAdd##X((int##X##_t volatile *) (RMW),     \
-                                      (int##X##_t) (ARG));
+#define atomic_add16(RMW, ARG, ORIG, ORDER)                        \
+    *(ORIG) = _InterlockedExchangeAdd16((short volatile *) (RMW),   \
+                                      (short) (ARG));
+
+#define atomic_add32(RMW, ARG, ORIG, ORDER)                        \
+    *(ORIG) = InterlockedExchangeAdd((long volatile *) (RMW),   \
+                                      (long) (ARG));
+#define atomic_add64(RMW, ARG, ORIG, ORDER)                        \
+    *(ORIG) = _InterlockedExchangeAdd64((int64_t volatile *) (RMW),   \
+                                      (int64_t) (ARG));
 
 #define atomic_add(RMW, ARG, ORIG)                               \
         atomic_add_explicit(RMW, ARG, ORIG, memory_order_seq_cst)
 
 #define atomic_add_explicit(RMW, ARG, ORIG, ORDER)             \
     if (sizeof *(RMW) == 1) {                                  \
-        atomic_op(add, 8, RMW, ARG, ORIG, ORDER)               \
+        atomic_add8(RMW, ARG, ORIG, ORDER)               \
     } else if (sizeof *(RMW) == 2) {                           \
-        atomic_op(add, 16, RMW, ARG, ORIG, ORDER)              \
+        atomic_add16(RMW, ARG, ORIG, ORDER)              \
     } else if (sizeof *(RMW) == 4) {                           \
         atomic_add32(RMW, ARG, ORIG, ORDER)                    \
     } else if (sizeof *(RMW) == 8) {                           \
-        atomic_op(add, 64, RMW, ARG, ORIG, ORDER)              \
+        atomic_add64(RMW, ARG, ORIG, ORDER)              \
     } else {                                                   \
         abort();                                               \
     }
@@ -335,7 +346,8 @@ atomic_signal_fence(memory_order order)
 static inline bool
 atomic_compare_exchange8(int8_t volatile *dst, int8_t *expected, int8_t src)
 {
-    int8_t previous = _InterlockedCompareExchange8(dst, src, *expected);
+    int8_t previous = _InterlockedCompareExchange8((char volatile *)dst,
+                                                   src, *expected);
     if (previous == *expected) {
         return true;
     } else {
@@ -361,7 +373,8 @@ static inline bool
 atomic_compare_exchange32(int32_t volatile *dst, int32_t *expected,
                           int32_t src)
 {
-    int32_t previous = InterlockedCompareExchange(dst, src, *expected);
+    int32_t previous = InterlockedCompareExchange((long volatile *)dst,
+                                                  src, *expected);
     if (previous == *expected) {
         return true;
     } else {
diff --git a/lib/ovs-thread.h b/lib/ovs-thread.h
index 55e51a4..03fd804 100644
--- a/lib/ovs-thread.h
+++ b/lib/ovs-thread.h
@@ -260,7 +260,7 @@ void xpthread_join(pthread_t, void **);
     static inline NAME##_type *                                         \
     NAME##_get_unsafe(void)                                             \
     {                                                                   \
-        return &NAME##_var;                                             \
+        return (NAME##_type *)&NAME##_var;                              \
     }                                                                   \
                                                                         \
     static inline NAME##_type *                                         \
@@ -316,7 +316,7 @@ void xpthread_join(pthread_t, void **);
     static inline NAME##_type *                                         \
     NAME##_get_unsafe(void)                                             \
     {                                                                   \
-        return pthread_getspecific(NAME##_key);                         \
+        return (NAME##_type *)pthread_getspecific(NAME##_key);          \
     }                                                                   \
                                                                         \
     NAME##_type *NAME##_get(void);
diff --git a/lib/packets.h b/lib/packets.h
index 13ea46d..45e6345 100644
--- a/lib/packets.h
+++ b/lib/packets.h
@@ -1123,7 +1123,8 @@ in6_addr_set_mapped_ipv4(struct in6_addr *ip6, ovs_be32 ip4)
 static inline ovs_be32
 in6_addr_get_mapped_ipv4(const struct in6_addr *addr)
 {
-    union ovs_16aligned_in6_addr *taddr = (void *) addr;
+    union ovs_16aligned_in6_addr *taddr =
+        (union ovs_16aligned_in6_addr *) addr;
     if (IN6_IS_ADDR_V4MAPPED(addr)) {
         return get_16aligned_be32(&taddr->be32[3]);
     } else {
@@ -1134,7 +1135,8 @@ in6_addr_get_mapped_ipv4(const struct in6_addr *addr)
 static inline void
 in6_addr_solicited_node(struct in6_addr *addr, const struct in6_addr *ip6)
 {
-    union ovs_16aligned_in6_addr *taddr = (void *) addr;
+    union ovs_16aligned_in6_addr *taddr =
+        (union ovs_16aligned_in6_addr *) addr;
     memset(taddr->be16, 0, sizeof(taddr->be16));
     taddr->be16[0] = htons(0xff02);
     taddr->be16[5] = htons(0x1);
@@ -1150,8 +1152,10 @@ static inline void
 in6_generate_eui64(struct eth_addr ea, struct in6_addr *prefix,
                    struct in6_addr *lla)
 {
-    union ovs_16aligned_in6_addr *taddr = (void *) lla;
-    union ovs_16aligned_in6_addr *prefix_taddr = (void *) prefix;
+    union ovs_16aligned_in6_addr *taddr =
+        (union ovs_16aligned_in6_addr *) lla;
+    union ovs_16aligned_in6_addr *prefix_taddr =
+        (union ovs_16aligned_in6_addr *) prefix;
     taddr->be16[0] = prefix_taddr->be16[0];
     taddr->be16[1] = prefix_taddr->be16[1];
     taddr->be16[2] = prefix_taddr->be16[2];
@@ -1169,7 +1173,8 @@ in6_generate_eui64(struct eth_addr ea, struct in6_addr *prefix,
 static inline void
 in6_generate_lla(struct eth_addr ea, struct in6_addr *lla)
 {
-    union ovs_16aligned_in6_addr *taddr = (void *) lla;
+    union ovs_16aligned_in6_addr *taddr =
+        (union ovs_16aligned_in6_addr *) lla;
     memset(taddr->be16, 0, sizeof(taddr->be16));
     taddr->be16[0] = htons(0xfe80);
     taddr->be16[4] = htons(((ea.ea[0] ^ 0x02) << 8) | ea.ea[1]);
diff --git a/lib/socket-util.h b/lib/socket-util.h
index 873a59a..439f0c2 100644
--- a/lib/socket-util.h
+++ b/lib/socket-util.h
@@ -138,7 +138,7 @@ static inline int make_unix_socket(int style, bool nonblock,
 static inline int rpl_setsockopt(int sock, int level, int optname,
                                  const void *optval, socklen_t optlen)
 {
-    return (setsockopt)(sock, level, optname, optval, optlen);
+    return (setsockopt)(sock, level, optname, (const char *)optval, optlen);
 }
 
 #define getsockopt(sock, level, optname, optval, optlen) \
@@ -146,7 +146,7 @@ static inline int rpl_setsockopt(int sock, int level, int optname,
 static inline int rpl_getsockopt(int sock, int level, int optname,
                                  void *optval, socklen_t *optlen)
 {
-    return (getsockopt)(sock, level, optname, optval, optlen);
+    return (getsockopt)(sock, level, optname, (char *)optval, optlen);
 }
 #endif
 
diff --git a/lib/unaligned.h b/lib/unaligned.h
index a150d7d..6d7edd1 100644
--- a/lib/unaligned.h
+++ b/lib/unaligned.h
@@ -52,11 +52,19 @@ static inline void put_unaligned_be64(ovs_be64 *, ovs_be64);
  * Below, "sizeof (*(P) % 1)" verifies that *P has an integer type, since
  * operands to % must be integers.
  */
+#if defined(__cplusplus) && defined(WIN32)
+#define get_unaligned_u64(P)                                \
+    ([P]{BUILD_ASSERT(sizeof *(P) == 8);},                  \
+     BUILD_ASSERT_GCCONLY(!TYPE_IS_SIGNED(typeof(*(P)))),   \
+     (void) sizeof (*(P) % 1),                              \
+     get_unaligned_u64__((const uint64_t *) (P)))
+#else
 #define get_unaligned_u64(P)                                \
     (BUILD_ASSERT(sizeof *(P) == 8),                        \
      BUILD_ASSERT_GCCONLY(!TYPE_IS_SIGNED(typeof(*(P)))),   \
      (void) sizeof (*(P) % 1),                              \
      get_unaligned_u64__((const uint64_t *) (P)))
+#endif
 
 #ifdef __GNUC__
 /* GCC implementations. */
@@ -220,7 +228,11 @@ put_32aligned_u64(ovs_32aligned_u64 *x, uint64_t value)
 static inline ovs_u128
 get_32aligned_u128(const ovs_32aligned_u128 *x)
 {
-    ovs_u128 u = { .u32 = { x->u32[0], x->u32[1], x->u32[2], x->u32[3] } };
+    ovs_u128 u;
+    u.u32[0] = x->u32[0];
+    u.u32[1] = x->u32[1];
+    u.u32[2] = x->u32[2];
+    u.u32[3] = x->u32[3];
     return u;
 }
 
@@ -287,8 +299,11 @@ put_32aligned_be64(ovs_32aligned_be64 *x, ovs_be64 value)
 static inline ovs_be128
 get_32aligned_be128(const ovs_32aligned_be128 *x)
 {
-    ovs_be128 u = { .be32 = { x->be32[0], x->be32[1],
-                              x->be32[2], x->be32[3] } };
+    ovs_be128 u;
+    u.be32[0] = x->be32[0];
+    u.be32[1] = x->be32[1];
+    u.be32[2] = x->be32[2];
+    u.be32[3] = x->be32[3];
     return u;
 }
 
-- 
2.9.0.windows.1



More information about the dev mailing list