[nasm:master] rbtree: implement rb_first(), rb_last() operations

nasm-bot for H. Peter Anvin (Intel) hpa at zytor.com
Wed Jul 8 09:03:03 PDT 2020


Commit-ID:  c341ad7300afa3f71db5cd9813bbeebf32f9195b
Gitweb:     http://repo.or.cz/w/nasm.git?a=commitdiff;h=c341ad7300afa3f71db5cd9813bbeebf32f9195b
Author:     H. Peter Anvin (Intel) <hpa at zytor.com>
AuthorDate: Wed, 8 Jul 2020 08:56:03 -0700
Committer:  H. Peter Anvin (Intel) <hpa at zytor.com>
CommitDate: Wed, 8 Jul 2020 09:01:34 -0700

rbtree: implement rb_first(), rb_last() operations

Add operations to get the first and last entry in the tree,
respectively. Searching for 0 or ~UINT64_C(0) is not sufficient in the
presence of duplicated keys, and is more inefficient anyway.

rb_first() followed by rb_next() until NULL, or equivalently rb_last()
followed by rb_prev() until NULL, can be used to walk the tree in key
order (ascending or descending), including all duplicate key
entries.

Since this is a *threaded* tree now, this walk can safely free entires
as it goes along, as long as the whole tree is destroyed; once any one
entry has been freed, the tree is no longer valid for anything other
than proceeding with the same tree walk.

Signed-off-by: H. Peter Anvin (Intel) <hpa at zytor.com>


---
 include/rbtree.h | 23 ++++++++++++++++++++++-
 nasmlib/rbtree.c | 36 ++++++++++++++++++++++++++----------
 2 files changed, 48 insertions(+), 11 deletions(-)

diff --git a/include/rbtree.h b/include/rbtree.h
index 8ccd129c..332f6f85 100644
--- a/include/rbtree.h
+++ b/include/rbtree.h
@@ -74,9 +74,30 @@ struct rbtree *rb_search(const struct rbtree *, uint64_t);
 
 /*
  * Return the immediately previous or next node in key order.
- * Returns NULL if this node is the end of the
+ * Returns NULL if this node is the end of the tree.
+ * These operations are safe for complee (but not partial!)
+ * tree walk-with-destruction in key order.
  */
 struct rbtree *rb_prev(const struct rbtree *);
 struct rbtree *rb_next(const struct rbtree *);
 
+/*
+ * Return the very first or very last node in key order.
+ */
+struct rbtree *rb_first(const struct rbtree *);
+struct rbtree *rb_last(const struct rbtree *);
+
+/*
+ * Left and right nodes, if real. These operations are
+ * safe for tree destruction, but not for splitting a tree.
+ */
+static inline struct rbtree *rb_left(const struct rbtree *rb)
+{
+    return (rb->m.flags & RBTREE_NODE_PRED) ? NULL : rb->m.left;
+}
+static inline struct rbtree *rb_right(const struct rbtree *rb)
+{
+    return (rb->m.flags & RBTREE_NODE_SUCC) ? NULL : rb->m.right;
+}
+
 #endif /* NASM_RBTREE_H */
diff --git a/nasmlib/rbtree.c b/nasmlib/rbtree.c
index 8b74c0c4..510f34b1 100644
--- a/nasmlib/rbtree.c
+++ b/nasmlib/rbtree.c
@@ -208,17 +208,36 @@ _rb_insert(struct rbtree *tree, struct rbtree *node)
     return tree;
 }
 
+struct rbtree *rb_first(const struct rbtree *tree)
+{
+    if (unlikely(!tree))
+        return NULL;
+
+    while (!(tree->m.flags & RBTREE_NODE_PRED))
+        tree = tree->m.left;
+
+    return (struct rbtree *)tree;
+}
+
+struct rbtree *rb_last(const struct rbtree *tree)
+{
+    if (unlikely(!tree))
+        return NULL;
+
+    while (!(tree->m.flags & RBTREE_NODE_SUCC))
+        tree = tree->m.right;
+
+    return (struct rbtree *)tree;
+}
+
 struct rbtree *rb_prev(const struct rbtree *node)
 {
     struct rbtree *np = node->m.left;
 
     if (node->m.flags & RBTREE_NODE_PRED)
         return np;
-
-    while (!(np->m.flags & RBTREE_NODE_SUCC))
-        np = np->m.right;
-
-    return np;
+    else
+        return rb_last(np);
 }
 
 struct rbtree *rb_next(const struct rbtree *node)
@@ -227,9 +246,6 @@ struct rbtree *rb_next(const struct rbtree *node)
 
     if (node->m.flags & RBTREE_NODE_SUCC)
         return np;
-
-    while (!(np->m.flags & RBTREE_NODE_PRED))
-        np = np->m.left;
-
-    return np;
+    else
+        return rb_first(np);
 }


More information about the Nasm-commits mailing list