kbtree.h
Go to the documentation of this file.
1 /*-
2  * Copyright 1997-1999, 2001, John-Mark Gurney.
3  * 2008-2009, Attractive Chaos <[email protected]>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 // Gotchas
29 // -------
30 //
31 // if you delete from a kbtree while iterating over it you must use
32 // kb_del_itr and not kb_del otherwise the iterator might point to freed memory.
33 
34 #ifndef NVIM_LIB_KBTREE_H
35 #define NVIM_LIB_KBTREE_H
36 
37 #include <assert.h>
38 #include <stdint.h>
39 #include <stdlib.h>
40 #include <string.h>
41 
42 #include "nvim/memory.h"
43 
44 #define KB_MAX_DEPTH 64
45 
46 #define __KB_KEY(type, x) (x->key)
47 #define __KB_PTR(btr, x) (x->ptr)
48 
49 #define __KB_TREE_T(name, key_t, T) \
50  typedef struct kbnode_##name##_s kbnode_##name##_t; \
51  struct kbnode_##name##_s { \
52  int32_t n; \
53  bool is_internal; \
54  key_t key[2*T-1]; \
55  kbnode_##name##_t *ptr[]; \
56  }; \
57  typedef struct { \
58  kbnode_##name##_t *root; \
59  int n_keys, n_nodes; \
60  } kbtree_##name##_t; \
61  typedef struct { \
62  kbnode_##name##_t *x; \
63  int i; \
64  } kbpos_##name##_t; \
65  typedef struct { \
66  kbpos_##name##_t stack[KB_MAX_DEPTH], *p; \
67  } kbitr_##name##_t; \
68 
69 
70 #define __kb_destroy(kbnode_t, b) do { \
71  int i; \
72  unsigned int max = 8; \
73  kbnode_t *x, **top, **stack = 0; \
74  if (b->root) { \
75  top = stack = (kbnode_t **)xcalloc(max, sizeof(kbnode_t *)); \
76  *top++ = (b)->root; \
77  while (top != stack) { \
78  x = *--top; \
79  if (x->is_internal == 0) { XFREE_CLEAR(x); continue; } \
80  for (i = 0; i <= x->n; ++i) \
81  if (__KB_PTR(b, x)[i]) { \
82  if (top - stack == (int)max) { \
83  max <<= 1; \
84  stack = (kbnode_t **)xrealloc(stack, max * sizeof(kbnode_t *)); \
85  top = stack + (max>>1); \
86  } \
87  *top++ = __KB_PTR(b, x)[i]; \
88  } \
89  XFREE_CLEAR(x); \
90  } \
91  } \
92  XFREE_CLEAR(stack); \
93 } while (0)
94 
95 #define __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
96  static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, key_t * __restrict k, \
97  int *r) \
98  { \
99  int tr, *rr, begin = 0, end = x->n; \
100  if (x->n == 0) return -1; \
101  rr = r? r : &tr; \
102  while (begin < end) { \
103  int mid = (begin + end) >> 1; \
104  if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
105  else end = mid; \
106  } \
107  if (begin == x->n) { *rr = 1; return x->n - 1; } \
108  if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
109  return begin; \
110  }
111 
112 #define __KB_GET(name, key_t, kbnode_t) \
113  static key_t *kb_getp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
114  { \
115  if (!b->root) { \
116  return 0; \
117  } \
118  int i, r = 0; \
119  kbnode_t *x = b->root; \
120  while (x) { \
121  i = __kb_getp_aux_##name(x, k, &r); \
122  if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
123  if (x->is_internal == 0) return 0; \
124  x = __KB_PTR(b, x)[i + 1]; \
125  } \
126  return 0; \
127  } \
128  static inline key_t *kb_get_##name(kbtree_##name##_t *b, key_t k) \
129  { \
130  return kb_getp_##name(b, &k); \
131  }
132 
133 #define __KB_INTERVAL(name, key_t, kbnode_t) \
134  static inline void kb_intervalp_##name(kbtree_##name##_t *b, key_t * __restrict k, key_t **lower, \
135  key_t **upper) \
136  { \
137  if (!b->root) { \
138  return; \
139  } \
140  int i, r = 0; \
141  kbnode_t *x = b->root; \
142  *lower = *upper = 0; \
143  while (x) { \
144  i = __kb_getp_aux_##name(x, k, &r); \
145  if (i >= 0 && r == 0) { \
146  *lower = *upper = &__KB_KEY(key_t, x)[i]; \
147  return; \
148  } \
149  if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
150  if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
151  if (x->is_internal == 0) return; \
152  x = __KB_PTR(b, x)[i + 1]; \
153  } \
154  } \
155  static inline void kb_interval_##name(kbtree_##name##_t *b, key_t k, key_t **lower, key_t **upper) \
156  { \
157  kb_intervalp_##name(b, &k, lower, upper); \
158  }
159 
160 #define __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
161  /* x must be an internal node */ \
162  static inline void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
163  { \
164  kbnode_t *z; \
165  z = (kbnode_t *)xcalloc(1, y->is_internal? ILEN : sizeof(kbnode_##name##_t)); \
166  ++b->n_nodes; \
167  z->is_internal = y->is_internal; \
168  z->n = T - 1; \
169  memcpy(__KB_KEY(key_t, z), &__KB_KEY(key_t, y)[T], sizeof(key_t) * (T - 1)); \
170  if (y->is_internal) memcpy(__KB_PTR(b, z), &__KB_PTR(b, y)[T], sizeof(void *) * T); \
171  y->n = T - 1; \
172  memmove(&__KB_PTR(b, x)[i + 2], &__KB_PTR(b, \
173  x)[i + 1], sizeof(void *) * (unsigned int)(x->n - i)); \
174  __KB_PTR(b, x)[i + 1] = z; \
175  memmove(&__KB_KEY(key_t, x)[i + 1], &__KB_KEY(key_t, x)[i], \
176  sizeof(key_t) * (unsigned int)(x->n - i)); \
177  __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[T - 1]; \
178  ++x->n; \
179  } \
180  static inline key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k) \
181  { \
182  int i = x->n - 1; \
183  key_t *ret; \
184  if (x->is_internal == 0) { \
185  i = __kb_getp_aux_##name(x, k, 0); \
186  if (i != x->n - 1) \
187  memmove(&__KB_KEY(key_t, x)[i + 2], &__KB_KEY(key_t, \
188  x)[i + 1], \
189  (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
190  ret = &__KB_KEY(key_t, x)[i + 1]; \
191  *ret = *k; \
192  ++x->n; \
193  } else { \
194  i = __kb_getp_aux_##name(x, k, 0) + 1; \
195  if (__KB_PTR(b, x)[i]->n == 2 * T - 1) { \
196  __kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
197  if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
198  } \
199  ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
200  } \
201  return ret; \
202  } \
203  static inline key_t *kb_putp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
204  { \
205  if (!b->root) { \
206  b->root = (kbnode_t *)xcalloc(1, ILEN); \
207  ++b->n_nodes; \
208  } \
209  kbnode_t *r, *s; \
210  ++b->n_keys; \
211  r = b->root; \
212  if (r->n == 2 * T - 1) { \
213  ++b->n_nodes; \
214  s = (kbnode_t *)xcalloc(1, ILEN); \
215  b->root = s; s->is_internal = 1; s->n = 0; \
216  __KB_PTR(b, s)[0] = r; \
217  __kb_split_##name(b, s, 0, r); \
218  r = s; \
219  } \
220  return __kb_putp_aux_##name(b, r, k); \
221  } \
222  static inline void kb_put_##name(kbtree_##name##_t *b, key_t k) \
223  { \
224  kb_putp_##name(b, &k); \
225  }
226 
227 
228 #define __KB_DEL(name, key_t, kbnode_t, T) \
229  static inline key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k, \
230  int s) \
231  { \
232  int yn, zn, i, r = 0; \
233  kbnode_t *xp, *y, *z; \
234  key_t kp; \
235  if (x == 0) return *k; \
236  if (s) { /* s can only be 0, 1 or 2 */ \
237  r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
238  i = s == 1? x->n - 1 : -1; \
239  } else i = __kb_getp_aux_##name(x, k, &r); \
240  if (x->is_internal == 0) { \
241  if (s == 2) ++i; \
242  kp = __KB_KEY(key_t, x)[i]; \
243  memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, \
244  x)[i + 1], \
245  (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
246  --x->n; \
247  return kp; \
248  } \
249  if (r == 0) { \
250  if ((yn = __KB_PTR(b, x)[i]->n) >= T) { \
251  xp = __KB_PTR(b, x)[i]; \
252  kp = __KB_KEY(key_t, x)[i]; \
253  __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
254  return kp; \
255  } else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= T) { \
256  xp = __KB_PTR(b, x)[i + 1]; \
257  kp = __KB_KEY(key_t, x)[i]; \
258  __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
259  return kp; \
260  } else if (yn == T - 1 && zn == T - 1) { \
261  y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
262  __KB_KEY(key_t, y)[y->n++] = *k; \
263  memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, z), (unsigned int)z->n * sizeof(key_t)); \
264  if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, \
265  z), \
266  (unsigned int)(z->n + 1) * sizeof(void *)); \
267  y->n += z->n; \
268  memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, \
269  x)[i + 1], \
270  (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
271  memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, \
272  x)[i + 2], \
273  (unsigned int)(x->n - i - 1) * sizeof(void *)); \
274  --x->n; \
275  XFREE_CLEAR(z); \
276  return __kb_delp_aux_##name(b, y, k, s); \
277  } \
278  } \
279  ++i; \
280  if ((xp = __KB_PTR(b, x)[i])->n == T - 1) { \
281  if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= T) { \
282  memmove(&__KB_KEY(key_t, xp)[1], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
283  if (xp->is_internal) memmove(&__KB_PTR(b, xp)[1], __KB_PTR(b, \
284  xp), \
285  (unsigned int)(xp->n + 1) * sizeof(void *)); \
286  __KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
287  __KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
288  if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
289  --y->n; ++xp->n; \
290  } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= T) { \
291  __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
292  __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
293  if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
294  --y->n; \
295  memmove(__KB_KEY(key_t, y), &__KB_KEY(key_t, y)[1], (unsigned int)y->n * sizeof(key_t)); \
296  if (y->is_internal) memmove(__KB_PTR(b, y), &__KB_PTR(b, \
297  y)[1], \
298  (unsigned int)(y->n + 1) * sizeof(void *)); \
299  } else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == T - 1) { \
300  __KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
301  memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, xp), \
302  (unsigned int)xp->n * sizeof(key_t)); \
303  if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, \
304  xp), \
305  (unsigned int)(xp->n + 1) * sizeof(void *)); \
306  y->n += xp->n; \
307  memmove(&__KB_KEY(key_t, x)[i - 1], &__KB_KEY(key_t, \
308  x)[i], \
309  (unsigned int)(x->n - i) * sizeof(key_t)); \
310  memmove(&__KB_PTR(b, x)[i], &__KB_PTR(b, \
311  x)[i + 1], (unsigned int)(x->n - i) * sizeof(void *)); \
312  --x->n; \
313  XFREE_CLEAR(xp); \
314  xp = y; \
315  } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == T - 1) { \
316  __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
317  memmove(&__KB_KEY(key_t, xp)[xp->n], __KB_KEY(key_t, y), \
318  (unsigned int)y->n * sizeof(key_t)); \
319  if (xp->is_internal) memmove(&__KB_PTR(b, xp)[xp->n], __KB_PTR(b, y), \
320  (unsigned int)(y->n + 1) * sizeof(void *)); \
321  xp->n += y->n; \
322  memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, \
323  x)[i + 1], \
324  (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
325  memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, \
326  x)[i + 2], \
327  (unsigned int)(x->n - i - 1) * sizeof(void *)); \
328  --x->n; \
329  XFREE_CLEAR(y); \
330  } \
331  } \
332  return __kb_delp_aux_##name(b, xp, k, s); \
333  } \
334  static inline key_t kb_delp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
335  { \
336  kbnode_t *x; \
337  key_t ret; \
338  ret = __kb_delp_aux_##name(b, b->root, k, 0); \
339  --b->n_keys; \
340  if (b->root->n == 0 && b->root->is_internal) { \
341  --b->n_nodes; \
342  x = b->root; \
343  b->root = __KB_PTR(b, x)[0]; \
344  XFREE_CLEAR(x); \
345  } \
346  return ret; \
347  } \
348  static inline key_t kb_del_##name(kbtree_##name##_t *b, key_t k) \
349  { \
350  return kb_delp_##name(b, &k); \
351  }
352 
353 #define __KB_ITR(name, key_t, kbnode_t) \
354  static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
355  { \
356  itr->p = NULL; \
357  if (b->n_keys == 0) return; \
358  itr->p = itr->stack; \
359  itr->p->x = b->root; itr->p->i = 0; \
360  while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \
361  kbnode_t *x = itr->p->x; \
362  ++itr->p; \
363  itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \
364  } \
365  } \
366  static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
367  { \
368  if (itr->p == NULL) return 0; \
369  for (;;) { \
370  ++itr->p->i; \
371  assert(itr->p->i <= 21); \
372  while (itr->p->x && itr->p->i <= itr->p->x->n) { \
373  itr->p[1].i = 0; \
374  itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
375  ++itr->p; \
376  } \
377  if (itr->p == itr->stack) { \
378  itr->p = NULL; \
379  return 0; \
380  } \
381  --itr->p; \
382  if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \
383  } \
384  } \
385  static inline int kb_itr_prev_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
386  { \
387  if (itr->p == NULL) return 0; \
388  for (;;) { \
389  while (itr->p->x && itr->p->i >= 0) { \
390  itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
391  itr->p[1].i = itr->p[1].x ? itr->p[1].x->n : -1; \
392  ++itr->p; \
393  } \
394  if (itr->p == itr->stack) { \
395  itr->p = NULL; \
396  return 0; \
397  } \
398  --itr->p; \
399  --itr->p->i; \
400  if (itr->p->x && itr->p->i >= 0) return 1; \
401  } \
402  } \
403  static inline int kb_itr_getp_##name(kbtree_##name##_t *b, key_t * __restrict k, \
404  kbitr_##name##_t *itr) \
405  { \
406  if (b->n_keys == 0) { \
407  itr->p = NULL; \
408  return 0; \
409  } \
410  int i, r = 0; \
411  itr->p = itr->stack; \
412  itr->p->x = b->root; \
413  while (itr->p->x) { \
414  i = __kb_getp_aux_##name(itr->p->x, k, &r); \
415  itr->p->i = i; \
416  if (i >= 0 && r == 0) return 1; \
417  ++itr->p->i; \
418  assert(itr->p->i <= 21); \
419  itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[i + 1] : 0; \
420  ++itr->p; \
421  } \
422  itr->p->i = 0; \
423  return 0; \
424  } \
425  static inline int kb_itr_get_##name(kbtree_##name##_t *b, key_t k, kbitr_##name##_t *itr) \
426  { \
427  return kb_itr_getp_##name(b, &k, itr); \
428  } \
429  static inline void kb_del_itr_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
430  { \
431  key_t k = kb_itr_key(itr); \
432  kb_delp_##name(b, &k); \
433  kb_itr_getp_##name(b, &k, itr); \
434  }
435 
436 #define KBTREE_INIT(name, key_t, __cmp, T) \
437  KBTREE_INIT_IMPL(name, key_t, kbnode_##name##_t, __cmp, T, \
438  (sizeof(kbnode_##name##_t)+(2*T)*sizeof(void *)))
439 
440 #define KBTREE_INIT_IMPL(name, key_t, kbnode_t, __cmp, T, ILEN) \
441  __KB_TREE_T(name, key_t, T) \
442  __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
443  __KB_GET(name, key_t, kbnode_t) \
444  __KB_INTERVAL(name, key_t, kbnode_t) \
445  __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
446  __KB_DEL(name, key_t, kbnode_t, T) \
447  __KB_ITR(name, key_t, kbnode_t)
448 
449 #define KB_DEFAULT_SIZE 512
450 
451 #define kbtree_t(name) kbtree_##name##_t
452 #define kbitr_t(name) kbitr_##name##_t
453 #define kb_init(b) ((b)->n_keys = (b)->n_nodes = 0, (b)->root = 0)
454 #define kb_destroy(name, b) __kb_destroy(kbnode_##name##_t, b)
455 #define kb_get(name, b, k) kb_get_##name(b, k)
456 #define kb_put(name, b, k) kb_put_##name(b, k)
457 #define kb_del(name, b, k) kb_del_##name(b, k)
458 #define kb_interval(name, b, k, l, u) kb_interval_##name(b, k, l, u)
459 #define kb_getp(name, b, k) kb_getp_##name(b, k)
460 #define kb_putp(name, b, k) kb_putp_##name(b, k)
461 #define kb_delp(name, b, k) kb_delp_##name(b, k)
462 #define kb_intervalp(name, b, k, l, u) kb_intervalp_##name(b, k, l, u)
463 
464 #define kb_itr_first(name, b, i) kb_itr_first_##name(b, i)
465 #define kb_itr_get(name, b, k, i) kb_itr_get_##name(b, k, i)
466 #define kb_itr_getp(name, b, k, i) kb_itr_getp_##name(b, k, i)
467 #define kb_itr_next(name, b, i) kb_itr_next_##name(b, i)
468 #define kb_itr_prev(name, b, i) kb_itr_prev_##name(b, i)
469 #define kb_del_itr(name, b, i) kb_del_itr_##name(b, i)
470 #define kb_itr_key(itr) __KB_KEY(dummy, (itr)->p->x)[(itr)->p->i]
471 #define kb_itr_valid(itr) ((itr)->p >= (itr)->stack)
472 
473 #define kb_size(b) ((b)->n_keys)
474 
475 #define kb_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
476 #define kb_str_cmp(a, b) strcmp(a, b)
477 
478 #endif // NVIM_LIB_KBTREE_H
assert.h
stdlib.h
memory.h