Thomas Gleixner | 1a59d1b8 | 2019-05-27 08:55:05 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Arnaldo Carvalho de Melo | 4407f96 | 2015-07-05 14:17:04 -0300 | [diff] [blame] | 2 | /* |
| 3 | Red Black Trees |
| 4 | (C) 1999 Andrea Arcangeli <andrea@suse.de> |
| 5 | |
Arnaldo Carvalho de Melo | 4407f96 | 2015-07-05 14:17:04 -0300 | [diff] [blame] | 6 | |
| 7 | linux/include/linux/rbtree.h |
| 8 | |
| 9 | To use rbtrees you'll have to implement your own insert and search cores. |
| 10 | This will avoid us to use callbacks and to drop drammatically performances. |
| 11 | I know it's not the cleaner way, but in C (not in C++) to get |
| 12 | performances and genericity... |
| 13 | |
| 14 | See Documentation/rbtree.txt for documentation and samples. |
| 15 | */ |
| 16 | |
Arnaldo Carvalho de Melo | 9402e23 | 2015-05-25 11:49:11 -0300 | [diff] [blame] | 17 | #ifndef __TOOLS_LINUX_PERF_RBTREE_H |
| 18 | #define __TOOLS_LINUX_PERF_RBTREE_H |
Arnaldo Carvalho de Melo | 4407f96 | 2015-07-05 14:17:04 -0300 | [diff] [blame] | 19 | |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/stddef.h> |
| 22 | |
| 23 | struct rb_node { |
| 24 | unsigned long __rb_parent_color; |
| 25 | struct rb_node *rb_right; |
| 26 | struct rb_node *rb_left; |
| 27 | } __attribute__((aligned(sizeof(long)))); |
| 28 | /* The alignment might seem pointless, but allegedly CRIS needs it */ |
| 29 | |
| 30 | struct rb_root { |
| 31 | struct rb_node *rb_node; |
| 32 | }; |
| 33 | |
Davidlohr Bueso | 3aef2ca | 2018-12-06 11:18:13 -0800 | [diff] [blame] | 34 | /* |
| 35 | * Leftmost-cached rbtrees. |
| 36 | * |
| 37 | * We do not cache the rightmost node based on footprint |
| 38 | * size vs number of potential users that could benefit |
| 39 | * from O(1) rb_last(). Just not worth it, users that want |
| 40 | * this feature can always implement the logic explicitly. |
| 41 | * Furthermore, users that want to cache both pointers may |
| 42 | * find it a bit asymmetric, but that's ok. |
| 43 | */ |
| 44 | struct rb_root_cached { |
| 45 | struct rb_root rb_root; |
| 46 | struct rb_node *rb_leftmost; |
| 47 | }; |
Arnaldo Carvalho de Melo | 4407f96 | 2015-07-05 14:17:04 -0300 | [diff] [blame] | 48 | |
| 49 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) |
| 50 | |
| 51 | #define RB_ROOT (struct rb_root) { NULL, } |
Davidlohr Bueso | 3aef2ca | 2018-12-06 11:18:13 -0800 | [diff] [blame] | 52 | #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } |
Arnaldo Carvalho de Melo | 4407f96 | 2015-07-05 14:17:04 -0300 | [diff] [blame] | 53 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) |
| 54 | |
Davidlohr Bueso | 3aef2ca | 2018-12-06 11:18:13 -0800 | [diff] [blame] | 55 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) |
Arnaldo Carvalho de Melo | 4407f96 | 2015-07-05 14:17:04 -0300 | [diff] [blame] | 56 | |
| 57 | /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ |
| 58 | #define RB_EMPTY_NODE(node) \ |
| 59 | ((node)->__rb_parent_color == (unsigned long)(node)) |
| 60 | #define RB_CLEAR_NODE(node) \ |
| 61 | ((node)->__rb_parent_color = (unsigned long)(node)) |
| 62 | |
| 63 | |
| 64 | extern void rb_insert_color(struct rb_node *, struct rb_root *); |
| 65 | extern void rb_erase(struct rb_node *, struct rb_root *); |
| 66 | |
| 67 | |
| 68 | /* Find logical next and previous nodes in a tree */ |
| 69 | extern struct rb_node *rb_next(const struct rb_node *); |
| 70 | extern struct rb_node *rb_prev(const struct rb_node *); |
| 71 | extern struct rb_node *rb_first(const struct rb_root *); |
| 72 | extern struct rb_node *rb_last(const struct rb_root *); |
| 73 | |
Davidlohr Bueso | 3aef2ca | 2018-12-06 11:18:13 -0800 | [diff] [blame] | 74 | extern void rb_insert_color_cached(struct rb_node *, |
| 75 | struct rb_root_cached *, bool); |
| 76 | extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); |
| 77 | /* Same as rb_first(), but O(1) */ |
| 78 | #define rb_first_cached(root) (root)->rb_leftmost |
| 79 | |
Arnaldo Carvalho de Melo | 4407f96 | 2015-07-05 14:17:04 -0300 | [diff] [blame] | 80 | /* Postorder iteration - always visit the parent after its children */ |
| 81 | extern struct rb_node *rb_first_postorder(const struct rb_root *); |
| 82 | extern struct rb_node *rb_next_postorder(const struct rb_node *); |
| 83 | |
| 84 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ |
| 85 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, |
| 86 | struct rb_root *root); |
Davidlohr Bueso | 3aef2ca | 2018-12-06 11:18:13 -0800 | [diff] [blame] | 87 | extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, |
| 88 | struct rb_root_cached *root); |
Arnaldo Carvalho de Melo | 4407f96 | 2015-07-05 14:17:04 -0300 | [diff] [blame] | 89 | |
| 90 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
| 91 | struct rb_node **rb_link) |
| 92 | { |
| 93 | node->__rb_parent_color = (unsigned long)parent; |
| 94 | node->rb_left = node->rb_right = NULL; |
| 95 | |
| 96 | *rb_link = node; |
| 97 | } |
| 98 | |
| 99 | #define rb_entry_safe(ptr, type, member) \ |
| 100 | ({ typeof(ptr) ____ptr = (ptr); \ |
| 101 | ____ptr ? rb_entry(____ptr, type, member) : NULL; \ |
| 102 | }) |
| 103 | |
Davidlohr Bueso | 3aef2ca | 2018-12-06 11:18:13 -0800 | [diff] [blame] | 104 | /** |
| 105 | * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of |
| 106 | * given type allowing the backing memory of @pos to be invalidated |
| 107 | * |
| 108 | * @pos: the 'type *' to use as a loop cursor. |
| 109 | * @n: another 'type *' to use as temporary storage |
| 110 | * @root: 'rb_root *' of the rbtree. |
| 111 | * @field: the name of the rb_node field within 'type'. |
| 112 | * |
| 113 | * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as |
| 114 | * list_for_each_entry_safe() and allows the iteration to continue independent |
| 115 | * of changes to @pos by the body of the loop. |
| 116 | * |
| 117 | * Note, however, that it cannot handle other modifications that re-order the |
| 118 | * rbtree it is iterating over. This includes calling rb_erase() on @pos, as |
| 119 | * rb_erase() may rebalance the tree, causing us to miss some nodes. |
Arnaldo Carvalho de Melo | 9402e23 | 2015-05-25 11:49:11 -0300 | [diff] [blame] | 120 | */ |
Davidlohr Bueso | 3aef2ca | 2018-12-06 11:18:13 -0800 | [diff] [blame] | 121 | #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ |
| 122 | for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ |
| 123 | pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ |
| 124 | typeof(*pos), field); 1; }); \ |
| 125 | pos = n) |
| 126 | |
Arnaldo Carvalho de Melo | 9402e23 | 2015-05-25 11:49:11 -0300 | [diff] [blame] | 127 | static inline void rb_erase_init(struct rb_node *n, struct rb_root *root) |
| 128 | { |
| 129 | rb_erase(n, root); |
| 130 | RB_CLEAR_NODE(n); |
| 131 | } |
| 132 | #endif /* __TOOLS_LINUX_PERF_RBTREE_H */ |