summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorH. Peter Anvin (Intel) <hpa@zytor.com>2020-07-08 08:56:03 -0700
committerH. Peter Anvin (Intel) <hpa@zytor.com>2020-07-08 09:01:34 -0700
commitc341ad7300afa3f71db5cd9813bbeebf32f9195b (patch)
tree2782b25d1c02108abba12904369a34d1f75357d6
parentf293842b8a5e85506d0e67e20cbe5b62b60e6e61 (diff)
downloadnasm-c341ad7300afa3f71db5cd9813bbeebf32f9195b.tar.gz
rbtree: implement rb_first(), rb_last() operations
Add operations to get the first and last entry in the tree, respectively. Searching for 0 or ~UINT64_C(0) is not sufficient in the presence of duplicated keys, and is more inefficient anyway. rb_first() followed by rb_next() until NULL, or equivalently rb_last() followed by rb_prev() until NULL, can be used to walk the tree in key order (ascending or descending), including all duplicate key entries. Since this is a *threaded* tree now, this walk can safely free entires as it goes along, as long as the whole tree is destroyed; once any one entry has been freed, the tree is no longer valid for anything other than proceeding with the same tree walk. Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
-rw-r--r--include/rbtree.h23
-rw-r--r--nasmlib/rbtree.c36
2 files changed, 48 insertions, 11 deletions
diff --git a/include/rbtree.h b/include/rbtree.h
index 8ccd129c..332f6f85 100644
--- a/include/rbtree.h
+++ b/include/rbtree.h
@@ -74,9 +74,30 @@ struct rbtree *rb_search(const struct rbtree *, uint64_t);
/*
* Return the immediately previous or next node in key order.
- * Returns NULL if this node is the end of the
+ * Returns NULL if this node is the end of the tree.
+ * These operations are safe for complee (but not partial!)
+ * tree walk-with-destruction in key order.
*/
struct rbtree *rb_prev(const struct rbtree *);
struct rbtree *rb_next(const struct rbtree *);
+/*
+ * Return the very first or very last node in key order.
+ */
+struct rbtree *rb_first(const struct rbtree *);
+struct rbtree *rb_last(const struct rbtree *);
+
+/*
+ * Left and right nodes, if real. These operations are
+ * safe for tree destruction, but not for splitting a tree.
+ */
+static inline struct rbtree *rb_left(const struct rbtree *rb)
+{
+ return (rb->m.flags & RBTREE_NODE_PRED) ? NULL : rb->m.left;
+}
+static inline struct rbtree *rb_right(const struct rbtree *rb)
+{
+ return (rb->m.flags & RBTREE_NODE_SUCC) ? NULL : rb->m.right;
+}
+
#endif /* NASM_RBTREE_H */
diff --git a/nasmlib/rbtree.c b/nasmlib/rbtree.c
index 8b74c0c4..510f34b1 100644
--- a/nasmlib/rbtree.c
+++ b/nasmlib/rbtree.c
@@ -208,17 +208,36 @@ _rb_insert(struct rbtree *tree, struct rbtree *node)
return tree;
}
+struct rbtree *rb_first(const struct rbtree *tree)
+{
+ if (unlikely(!tree))
+ return NULL;
+
+ while (!(tree->m.flags & RBTREE_NODE_PRED))
+ tree = tree->m.left;
+
+ return (struct rbtree *)tree;
+}
+
+struct rbtree *rb_last(const struct rbtree *tree)
+{
+ if (unlikely(!tree))
+ return NULL;
+
+ while (!(tree->m.flags & RBTREE_NODE_SUCC))
+ tree = tree->m.right;
+
+ return (struct rbtree *)tree;
+}
+
struct rbtree *rb_prev(const struct rbtree *node)
{
struct rbtree *np = node->m.left;
if (node->m.flags & RBTREE_NODE_PRED)
return np;
-
- while (!(np->m.flags & RBTREE_NODE_SUCC))
- np = np->m.right;
-
- return np;
+ else
+ return rb_last(np);
}
struct rbtree *rb_next(const struct rbtree *node)
@@ -227,9 +246,6 @@ struct rbtree *rb_next(const struct rbtree *node)
if (node->m.flags & RBTREE_NODE_SUCC)
return np;
-
- while (!(np->m.flags & RBTREE_NODE_PRED))
- np = np->m.left;
-
- return np;
+ else
+ return rb_first(np);
}