kbtree.h
Go to the documentation of this file.
1 /*-
2  * Copyright 1997-1999, 2001, John-Mark Gurney.
3  * 2008-2009, Attractive Chaos <[email protected]>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 // Gotchas
29 // -------
30 //
31 // if you delete from a kbtree while iterating over it you must use
32 // kb_del_itr and not kb_del otherwise the iterator might point to freed memory.
33 
34 #ifndef NVIM_LIB_KBTREE_H
35 #define NVIM_LIB_KBTREE_H
36 
37 #include <stdlib.h>
38 #include <string.h>
39 #include <stdint.h>
40 #include <assert.h>
41 
42 #include "nvim/memory.h"
43 
44 #define KB_MAX_DEPTH 64
45 
46 #define __KB_KEY(type, x) (x->key)
47 #define __KB_PTR(btr, x) (x->ptr)
48 
49 #define __KB_TREE_T(name,key_t,T) \
50  typedef struct kbnode_##name##_s kbnode_##name##_t; \
51  struct kbnode_##name##_s { \
52  int32_t n; \
53  bool is_internal; \
54  key_t key[2*T-1]; \
55  kbnode_##name##_t *ptr[]; \
56  } ; \
57  \
58  typedef struct { \
59  kbnode_##name##_t *root; \
60  int n_keys, n_nodes; \
61  } kbtree_##name##_t; \
62  \
63  typedef struct { \
64  kbnode_##name##_t *x; \
65  int i; \
66  } kbpos_##name##_t; \
67  typedef struct { \
68  kbpos_##name##_t stack[KB_MAX_DEPTH], *p; \
69  } kbitr_##name##_t; \
70 
71 
72 #define __kb_destroy(kbnode_t,b) do { \
73  int i; \
74  unsigned int max = 8; \
75  kbnode_t *x, **top, **stack = 0; \
76  if (b->root) { \
77  top = stack = (kbnode_t**)xcalloc(max, sizeof(kbnode_t*)); \
78  *top++ = (b)->root; \
79  while (top != stack) { \
80  x = *--top; \
81  if (x->is_internal == 0) { XFREE_CLEAR(x); continue; } \
82  for (i = 0; i <= x->n; ++i) \
83  if (__KB_PTR(b, x)[i]) { \
84  if (top - stack == (int)max) { \
85  max <<= 1; \
86  stack = (kbnode_t**)xrealloc(stack, max * sizeof(kbnode_t*)); \
87  top = stack + (max>>1); \
88  } \
89  *top++ = __KB_PTR(b, x)[i]; \
90  } \
91  XFREE_CLEAR(x); \
92  } \
93  } \
94  XFREE_CLEAR(stack); \
95  } while (0)
96 
97 #define __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
98  static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, key_t * __restrict k, int *r) \
99  { \
100  int tr, *rr, begin = 0, end = x->n; \
101  if (x->n == 0) return -1; \
102  rr = r? r : &tr; \
103  while (begin < end) { \
104  int mid = (begin + end) >> 1; \
105  if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
106  else end = mid; \
107  } \
108  if (begin == x->n) { *rr = 1; return x->n - 1; } \
109  if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
110  return begin; \
111  }
112 
113 #define __KB_GET(name, key_t, kbnode_t) \
114  static key_t *kb_getp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
115  { \
116  if (!b->root) { \
117  return 0; \
118  } \
119  int i, r = 0; \
120  kbnode_t *x = b->root; \
121  while (x) { \
122  i = __kb_getp_aux_##name(x, k, &r); \
123  if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
124  if (x->is_internal == 0) return 0; \
125  x = __KB_PTR(b, x)[i + 1]; \
126  } \
127  return 0; \
128  } \
129  static inline key_t *kb_get_##name(kbtree_##name##_t *b, key_t k) \
130  { \
131  return kb_getp_##name(b, &k); \
132  }
133 
134 #define __KB_INTERVAL(name, key_t, kbnode_t) \
135  static inline void kb_intervalp_##name(kbtree_##name##_t *b, key_t * __restrict k, key_t **lower, key_t **upper) \
136  { \
137  if (!b->root) { \
138  return; \
139  } \
140  int i, r = 0; \
141  kbnode_t *x = b->root; \
142  *lower = *upper = 0; \
143  while (x) { \
144  i = __kb_getp_aux_##name(x, k, &r); \
145  if (i >= 0 && r == 0) { \
146  *lower = *upper = &__KB_KEY(key_t, x)[i]; \
147  return; \
148  } \
149  if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
150  if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
151  if (x->is_internal == 0) return; \
152  x = __KB_PTR(b, x)[i + 1]; \
153  } \
154  } \
155  static inline void kb_interval_##name(kbtree_##name##_t *b, key_t k, key_t **lower, key_t **upper) \
156  { \
157  kb_intervalp_##name(b, &k, lower, upper); \
158  }
159 
160 #define __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
161  /* x must be an internal node */ \
162  static inline void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
163  { \
164  kbnode_t *z; \
165  z = (kbnode_t*)xcalloc(1, y->is_internal? ILEN : sizeof(kbnode_##name##_t)); \
166  ++b->n_nodes; \
167  z->is_internal = y->is_internal; \
168  z->n = T - 1; \
169  memcpy(__KB_KEY(key_t, z), &__KB_KEY(key_t, y)[T], sizeof(key_t) * (T - 1)); \
170  if (y->is_internal) memcpy(__KB_PTR(b, z), &__KB_PTR(b, y)[T], sizeof(void*) * T); \
171  y->n = T - 1; \
172  memmove(&__KB_PTR(b, x)[i + 2], &__KB_PTR(b, x)[i + 1], sizeof(void*) * (unsigned int)(x->n - i)); \
173  __KB_PTR(b, x)[i + 1] = z; \
174  memmove(&__KB_KEY(key_t, x)[i + 1], &__KB_KEY(key_t, x)[i], sizeof(key_t) * (unsigned int)(x->n - i)); \
175  __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[T - 1]; \
176  ++x->n; \
177  } \
178  static inline key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k) \
179  { \
180  int i = x->n - 1; \
181  key_t *ret; \
182  if (x->is_internal == 0) { \
183  i = __kb_getp_aux_##name(x, k, 0); \
184  if (i != x->n - 1) \
185  memmove(&__KB_KEY(key_t, x)[i + 2], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
186  ret = &__KB_KEY(key_t, x)[i + 1]; \
187  *ret = *k; \
188  ++x->n; \
189  } else { \
190  i = __kb_getp_aux_##name(x, k, 0) + 1; \
191  if (__KB_PTR(b, x)[i]->n == 2 * T - 1) { \
192  __kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
193  if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
194  } \
195  ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
196  } \
197  return ret; \
198  } \
199  static inline key_t *kb_putp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
200  { \
201  if (!b->root) { \
202  b->root = (kbnode_t*)xcalloc(1, ILEN); \
203  ++b->n_nodes; \
204  } \
205  kbnode_t *r, *s; \
206  ++b->n_keys; \
207  r = b->root; \
208  if (r->n == 2 * T - 1) { \
209  ++b->n_nodes; \
210  s = (kbnode_t*)xcalloc(1, ILEN); \
211  b->root = s; s->is_internal = 1; s->n = 0; \
212  __KB_PTR(b, s)[0] = r; \
213  __kb_split_##name(b, s, 0, r); \
214  r = s; \
215  } \
216  return __kb_putp_aux_##name(b, r, k); \
217  } \
218  static inline void kb_put_##name(kbtree_##name##_t *b, key_t k) \
219  { \
220  kb_putp_##name(b, &k); \
221  }
222 
223 
224 #define __KB_DEL(name, key_t, kbnode_t, T) \
225  static inline key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k, int s) \
226  { \
227  int yn, zn, i, r = 0; \
228  kbnode_t *xp, *y, *z; \
229  key_t kp; \
230  if (x == 0) return *k; \
231  if (s) { /* s can only be 0, 1 or 2 */ \
232  r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
233  i = s == 1? x->n - 1 : -1; \
234  } else i = __kb_getp_aux_##name(x, k, &r); \
235  if (x->is_internal == 0) { \
236  if (s == 2) ++i; \
237  kp = __KB_KEY(key_t, x)[i]; \
238  memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
239  --x->n; \
240  return kp; \
241  } \
242  if (r == 0) { \
243  if ((yn = __KB_PTR(b, x)[i]->n) >= T) { \
244  xp = __KB_PTR(b, x)[i]; \
245  kp = __KB_KEY(key_t, x)[i]; \
246  __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
247  return kp; \
248  } else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= T) { \
249  xp = __KB_PTR(b, x)[i + 1]; \
250  kp = __KB_KEY(key_t, x)[i]; \
251  __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
252  return kp; \
253  } else if (yn == T - 1 && zn == T - 1) { \
254  y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
255  __KB_KEY(key_t, y)[y->n++] = *k; \
256  memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, z), (unsigned int)z->n * sizeof(key_t)); \
257  if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, z), (unsigned int)(z->n + 1) * sizeof(void*)); \
258  y->n += z->n; \
259  memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
260  memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \
261  --x->n; \
262  XFREE_CLEAR(z); \
263  return __kb_delp_aux_##name(b, y, k, s); \
264  } \
265  } \
266  ++i; \
267  if ((xp = __KB_PTR(b, x)[i])->n == T - 1) { \
268  if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= T) { \
269  memmove(&__KB_KEY(key_t, xp)[1], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
270  if (xp->is_internal) memmove(&__KB_PTR(b, xp)[1], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \
271  __KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
272  __KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
273  if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
274  --y->n; ++xp->n; \
275  } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= T) { \
276  __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
277  __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
278  if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
279  --y->n; \
280  memmove(__KB_KEY(key_t, y), &__KB_KEY(key_t, y)[1], (unsigned int)y->n * sizeof(key_t)); \
281  if (y->is_internal) memmove(__KB_PTR(b, y), &__KB_PTR(b, y)[1], (unsigned int)(y->n + 1) * sizeof(void*)); \
282  } else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == T - 1) { \
283  __KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
284  memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
285  if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \
286  y->n += xp->n; \
287  memmove(&__KB_KEY(key_t, x)[i - 1], &__KB_KEY(key_t, x)[i], (unsigned int)(x->n - i) * sizeof(key_t)); \
288  memmove(&__KB_PTR(b, x)[i], &__KB_PTR(b, x)[i + 1], (unsigned int)(x->n - i) * sizeof(void*)); \
289  --x->n; \
290  XFREE_CLEAR(xp); \
291  xp = y; \
292  } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == T - 1) { \
293  __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
294  memmove(&__KB_KEY(key_t, xp)[xp->n], __KB_KEY(key_t, y), (unsigned int)y->n * sizeof(key_t)); \
295  if (xp->is_internal) memmove(&__KB_PTR(b, xp)[xp->n], __KB_PTR(b, y), (unsigned int)(y->n + 1) * sizeof(void*)); \
296  xp->n += y->n; \
297  memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
298  memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \
299  --x->n; \
300  XFREE_CLEAR(y); \
301  } \
302  } \
303  return __kb_delp_aux_##name(b, xp, k, s); \
304  } \
305  static inline key_t kb_delp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
306  { \
307  kbnode_t *x; \
308  key_t ret; \
309  ret = __kb_delp_aux_##name(b, b->root, k, 0); \
310  --b->n_keys; \
311  if (b->root->n == 0 && b->root->is_internal) { \
312  --b->n_nodes; \
313  x = b->root; \
314  b->root = __KB_PTR(b, x)[0]; \
315  XFREE_CLEAR(x); \
316  } \
317  return ret; \
318  } \
319  static inline key_t kb_del_##name(kbtree_##name##_t *b, key_t k) \
320  { \
321  return kb_delp_##name(b, &k); \
322  }
323 
324 #define __KB_ITR(name, key_t, kbnode_t) \
325  static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
326  { \
327  itr->p = NULL; \
328  if (b->n_keys == 0) return; \
329  itr->p = itr->stack; \
330  itr->p->x = b->root; itr->p->i = 0; \
331  while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \
332  kbnode_t *x = itr->p->x; \
333  ++itr->p; \
334  itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \
335  } \
336  } \
337  static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
338  { \
339  if (itr->p == NULL) return 0; \
340  for (;;) { \
341  ++itr->p->i; \
342  assert(itr->p->i <= 21); \
343  while (itr->p->x && itr->p->i <= itr->p->x->n) { \
344  itr->p[1].i = 0; \
345  itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
346  ++itr->p; \
347  } \
348  if (itr->p == itr->stack) { \
349  itr->p = NULL; \
350  return 0; \
351  } \
352  --itr->p; \
353  if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \
354  } \
355  } \
356  static inline int kb_itr_prev_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
357  { \
358  if (itr->p == NULL) return 0; \
359  for (;;) { \
360  while (itr->p->x && itr->p->i >= 0) { \
361  itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
362  itr->p[1].i = itr->p[1].x ? itr->p[1].x->n : -1; \
363  ++itr->p; \
364  } \
365  if (itr->p == itr->stack) { \
366  itr->p = NULL; \
367  return 0; \
368  } \
369  --itr->p; \
370  --itr->p->i; \
371  if (itr->p->x && itr->p->i >= 0) return 1; \
372  } \
373  } \
374  static inline int kb_itr_getp_##name(kbtree_##name##_t *b, key_t * __restrict k, kbitr_##name##_t *itr) \
375  { \
376  if (b->n_keys == 0) { \
377  itr->p = NULL; \
378  return 0; \
379  } \
380  int i, r = 0; \
381  itr->p = itr->stack; \
382  itr->p->x = b->root; \
383  while (itr->p->x) { \
384  i = __kb_getp_aux_##name(itr->p->x, k, &r); \
385  itr->p->i = i; \
386  if (i >= 0 && r == 0) return 1; \
387  ++itr->p->i; \
388  assert(itr->p->i <= 21); \
389  itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[i + 1] : 0; \
390  ++itr->p; \
391  } \
392  itr->p->i = 0; \
393  return 0; \
394  } \
395  static inline int kb_itr_get_##name(kbtree_##name##_t *b, key_t k, kbitr_##name##_t *itr) \
396  { \
397  return kb_itr_getp_##name(b,&k,itr); \
398  } \
399  static inline void kb_del_itr_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
400  { \
401  key_t k = kb_itr_key(itr); \
402  kb_delp_##name(b, &k); \
403  kb_itr_getp_##name(b, &k, itr); \
404  }
405 
406 #define KBTREE_INIT(name, key_t, __cmp, T) \
407  KBTREE_INIT_IMPL(name, key_t, kbnode_##name##_t, __cmp, T, (sizeof(kbnode_##name##_t)+(2*T)*sizeof(void *)))
408 
409 #define KBTREE_INIT_IMPL(name, key_t, kbnode_t, __cmp, T, ILEN) \
410  __KB_TREE_T(name, key_t, T) \
411  __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
412  __KB_GET(name, key_t, kbnode_t) \
413  __KB_INTERVAL(name, key_t, kbnode_t) \
414  __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
415  __KB_DEL(name, key_t, kbnode_t, T) \
416  __KB_ITR(name, key_t, kbnode_t)
417 
418 #define KB_DEFAULT_SIZE 512
419 
420 #define kbtree_t(name) kbtree_##name##_t
421 #define kbitr_t(name) kbitr_##name##_t
422 #define kb_init(b) ((b)->n_keys = (b)->n_nodes = 0, (b)->root = 0)
423 #define kb_destroy(name, b) __kb_destroy(kbnode_##name##_t, b)
424 #define kb_get(name, b, k) kb_get_##name(b, k)
425 #define kb_put(name, b, k) kb_put_##name(b, k)
426 #define kb_del(name, b, k) kb_del_##name(b, k)
427 #define kb_interval(name, b, k, l, u) kb_interval_##name(b, k, l, u)
428 #define kb_getp(name, b, k) kb_getp_##name(b, k)
429 #define kb_putp(name, b, k) kb_putp_##name(b, k)
430 #define kb_delp(name, b, k) kb_delp_##name(b, k)
431 #define kb_intervalp(name, b, k, l, u) kb_intervalp_##name(b, k, l, u)
432 
433 #define kb_itr_first(name, b, i) kb_itr_first_##name(b, i)
434 #define kb_itr_get(name, b, k, i) kb_itr_get_##name(b, k, i)
435 #define kb_itr_getp(name, b, k, i) kb_itr_getp_##name(b, k, i)
436 #define kb_itr_next(name, b, i) kb_itr_next_##name(b, i)
437 #define kb_itr_prev(name, b, i) kb_itr_prev_##name(b, i)
438 #define kb_del_itr(name, b, i) kb_del_itr_##name(b, i)
439 #define kb_itr_key(itr) __KB_KEY(dummy, (itr)->p->x)[(itr)->p->i]
440 #define kb_itr_valid(itr) ((itr)->p >= (itr)->stack)
441 
442 #define kb_size(b) ((b)->n_keys)
443 
444 #define kb_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
445 #define kb_str_cmp(a, b) strcmp(a, b)
446 
447 #endif // NVIM_LIB_KBTREE_H