summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/classifier.c4
-rw-r--r--lib/cmap.h57
-rw-r--r--lib/dpif-netdev.c2
3 files changed, 24 insertions, 39 deletions
diff --git a/lib/classifier.c b/lib/classifier.c
index 334d0dab5..a6a582cd3 100644
--- a/lib/classifier.c
+++ b/lib/classifier.c
@@ -485,12 +485,12 @@ classifier_destroy(struct classifier *cls)
trie_destroy(&cls->tries[i].root);
}
- CMAP_FOR_EACH_SAFE (subtable, cmap_node, &cls->subtables_map) {
+ CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
destroy_subtable(cls, subtable);
}
cmap_destroy(&cls->subtables_map);
- CMAP_FOR_EACH_SAFE (partition, cmap_node, &cls->partitions) {
+ CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) {
ovsrcu_postpone(free, partition);
}
cmap_destroy(&cls->partitions);
diff --git a/lib/cmap.h b/lib/cmap.h
index 87a1d531f..038db6c04 100644
--- a/lib/cmap.h
+++ b/lib/cmap.h
@@ -163,33 +163,29 @@ struct cmap_node *cmap_find_protected(const struct cmap *, uint32_t hash);
* ...operate on my_node...
* }
*
- * CMAP_FOR_EACH_SAFE variant is useful only in deallocation code already
- * executing at postponed time, when it is known that the RCU grace period
- * has already expired.
+ * CMAP_FOR_EACH is "safe" in the sense of HMAP_FOR_EACH_SAFE. That is, it is
+ * safe to free the current node before going on to the next iteration. Most
+ * of the time, though, this doesn't matter for a cmap because node
+ * deallocation has to be postponed until the next grace period. This means
+ * that this guarantee is useful only in deallocation code already executing at
+ * postponed time, when it is known that the RCU grace period has already
+ * expired.
*/
-#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
- for (*(CURSOR) = cmap_cursor_start(CMAP); \
- ((CURSOR)->node \
- ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
- : false); \
- cmap_cursor_advance(CURSOR))
-
-#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, MEMBER, CURSOR, CMAP) \
- for (*(CURSOR) = cmap_cursor_start(CMAP); \
- ((CURSOR)->node \
- ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
- cmap_cursor_advance(CURSOR), \
- true) \
- : false); \
+#define CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER) \
+ ((CURSOR)->node \
+ ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
+ cmap_cursor_advance(CURSOR), \
+ true) \
+ : false)
+
+#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
+ for (*(CURSOR) = cmap_cursor_start(CMAP); \
+ CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER); \
)
-#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
- for (cmap_cursor_advance(CURSOR); \
- ((CURSOR)->node \
- ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
- : false); \
- cmap_cursor_advance(CURSOR))
+#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
+ while (CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER))
struct cmap_cursor {
const struct cmap_impl *impl;
@@ -201,20 +197,9 @@ struct cmap_cursor {
struct cmap_cursor cmap_cursor_start(const struct cmap *);
void cmap_cursor_advance(struct cmap_cursor *);
-#define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
- for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
- (cursor__.node \
- ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), true) \
- : false); \
- cmap_cursor_advance(&cursor__))
-
-#define CMAP_FOR_EACH_SAFE(NODE, MEMBER, CMAP) \
+#define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
- (cursor__.node \
- ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), \
- cmap_cursor_advance(&cursor__), \
- true) \
- : false); \
+ CMAP_CURSOR_FOR_EACH__(NODE, &cursor__, MEMBER); \
)
static inline struct cmap_node *cmap_first(const struct cmap *);
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 20813a8fd..bbc4eea0c 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -925,7 +925,7 @@ dp_netdev_flow_flush(struct dp_netdev *dp)
struct dp_netdev_flow *netdev_flow;
ovs_mutex_lock(&dp->flow_mutex);
- CMAP_FOR_EACH_SAFE (netdev_flow, node, &dp->flow_table) {
+ CMAP_FOR_EACH (netdev_flow, node, &dp->flow_table) {
dp_netdev_remove_flow(dp, netdev_flow);
}
ovs_mutex_unlock(&dp->flow_mutex);