summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Haller <thaller@redhat.com>2022-07-28 10:40:05 +0200
committerThomas Haller <thaller@redhat.com>2022-07-28 13:07:50 +0200
commit8153b3ff0c3d235b59a1f636e67f62a90b088f38 (patch)
treeeebbcfd8232dca6b3fcf48d346a9d638a3ba6522
parent6501f741fc71d7c84852266e663ba3bf8b69570d (diff)
downloadNetworkManager-8153b3ff0c3d235b59a1f636e67f62a90b088f38.tar.gz
std-aux: use unique local variable in NM_IN_SET() macro
If you do: nm_assert_addr_family(NMP_OBJECT_CAST_MPTCP_ADDR(obj)->addr_family)); then there are two nested NM_IN_SET() macro invocations. Once, NMP_OBJECT_CAST_MPTCP_ADDR() checks that the object type is one of a few selected (using NM_IN_SET()). Then, that is passed to nm_assert_addr_family(), which checks NM_IN_SET(addr_family, AF_INET, AF_INET6). In general, it's easy to end up in a situation like this. And it mostly works just fine. The only problem was that NM_IN_SET() uses an internal, local variable "_x". The compiler will emit a very verbose failure about the shadowed variable: ./src/libnm-std-aux/nm-std-aux.h:802:14: error: declaration of '_x' shadows a previous local [-Werror=shadow] 802 | type _x = (x); \ NM_UNIQ_T() exists for this purpose. Use it. NM_IN_SET() is popular enough to warrant a special treatment to avoid this pitfall.
-rw-r--r--src/libnm-std-aux/nm-std-aux.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/src/libnm-std-aux/nm-std-aux.h b/src/libnm-std-aux/nm-std-aux.h
index ad9e9d656c..18c467b3b3 100644
--- a/src/libnm-std-aux/nm-std-aux.h
+++ b/src/libnm-std-aux/nm-std-aux.h
@@ -796,33 +796,33 @@ nm_memeq(const void *s1, const void *s2, size_t len)
/*****************************************************************************/
-#define _NM_IN_SET_OP(x, idx, op_arg) ((int) (_x == (x)))
-#define _NM_IN_SET(op, type, x, ...) \
- ({ \
- type _x = (x); \
- \
- /* trigger a -Wenum-compare warning */ \
- nm_assert(true || _x == (x)); \
- \
- !!(NM_VA_ARGS_FOREACH(, , op, _NM_IN_SET_OP, , __VA_ARGS__)); \
+#define _NM_IN_SET_OP(x, idx, uniq) ((int) (NM_UNIQ_T(xx, uniq) == (x)))
+#define _NM_IN_SET(uniq, op, type, x, ...) \
+ ({ \
+ type NM_UNIQ_T(xx, uniq) = (x); \
+ \
+ /* trigger a -Wenum-compare warning */ \
+ nm_assert(true || NM_UNIQ_T(xx, uniq) == (x)); \
+ \
+ !!(NM_VA_ARGS_FOREACH(, , op, _NM_IN_SET_OP, uniq, __VA_ARGS__)); \
})
/* Beware that this does short-circuit evaluation (use "||" instead of "|")
* which has a possibly unexpected non-function-like behavior.
* Use NM_IN_SET_SE if you need all arguments to be evaluated. */
-#define NM_IN_SET(x, ...) _NM_IN_SET(||, typeof(x), x, __VA_ARGS__)
+#define NM_IN_SET(x, ...) _NM_IN_SET(NM_UNIQ, ||, typeof(x), x, __VA_ARGS__)
/* "SE" stands for "side-effect". Contrary to NM_IN_SET(), this does not do
* short-circuit evaluation, which can make a difference if the arguments have
* side-effects. */
-#define NM_IN_SET_SE(x, ...) _NM_IN_SET(|, typeof(x), x, __VA_ARGS__)
+#define NM_IN_SET_SE(x, ...) _NM_IN_SET(NM_UNIQ, |, typeof(x), x, __VA_ARGS__)
/* the *_TYPED forms allow to explicitly select the type of "x". This is useful
* if "x" doesn't support typeof (bitfields) or you want to gracefully convert
* a type using automatic type conversion rules (but not forcing the conversion
* with a cast). */
-#define NM_IN_SET_TYPED(type, x, ...) _NM_IN_SET(||, type, x, __VA_ARGS__)
-#define NM_IN_SET_SE_TYPED(type, x, ...) _NM_IN_SET(|, type, x, __VA_ARGS__)
+#define NM_IN_SET_TYPED(type, x, ...) _NM_IN_SET(NM_UNIQ, ||, type, x, __VA_ARGS__)
+#define NM_IN_SET_SE_TYPED(type, x, ...) _NM_IN_SET(NM_UNIQ, |, type, x, __VA_ARGS__)
/*****************************************************************************/