aboutsummaryrefslogtreecommitdiff
path: root/src/nvim/lib
diff options
context:
space:
mode:
Diffstat (limited to 'src/nvim/lib')
-rw-r--r--src/nvim/lib/kbtree.h431
-rw-r--r--src/nvim/lib/kvec.h22
-rw-r--r--src/nvim/lib/queue.h18
-rw-r--r--src/nvim/lib/ringbuf.h27
4 files changed, 482 insertions, 16 deletions
diff --git a/src/nvim/lib/kbtree.h b/src/nvim/lib/kbtree.h
new file mode 100644
index 0000000000..e2688064a8
--- /dev/null
+++ b/src/nvim/lib/kbtree.h
@@ -0,0 +1,431 @@
+/*-
+ * Copyright 1997-1999, 2001, John-Mark Gurney.
+ * 2008-2009, Attractive Chaos <attractor@live.co.uk>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef NVIM_LIB_KBTREE_H
+#define NVIM_LIB_KBTREE_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+
+#include "nvim/memory.h"
+
+#define KB_MAX_DEPTH 64
+
+#define __KB_KEY(type, x) (x->key)
+#define __KB_PTR(btr, x) (x->ptr)
+
+#define __KB_TREE_T(name,key_t,T) \
+ typedef struct kbnode_##name##_s kbnode_##name##_t; \
+ struct kbnode_##name##_s { \
+ int32_t n; \
+ bool is_internal; \
+ key_t key[2*T-1]; \
+ kbnode_##name##_t *ptr[]; \
+ } ; \
+ \
+ typedef struct { \
+ kbnode_##name##_t *root; \
+ int n_keys, n_nodes; \
+ } kbtree_##name##_t; \
+ \
+ typedef struct { \
+ kbnode_##name##_t *x; \
+ int i; \
+ } kbpos_##name##_t; \
+ typedef struct { \
+ kbpos_##name##_t stack[KB_MAX_DEPTH], *p; \
+ } kbitr_##name##_t; \
+
+
+#define __kb_destroy(kbnode_t,b) do { \
+ int i; \
+ unsigned int max = 8; \
+ kbnode_t *x, **top, **stack = 0; \
+ if (b->root) { \
+ top = stack = (kbnode_t**)xcalloc(max, sizeof(kbnode_t*)); \
+ *top++ = (b)->root; \
+ while (top != stack) { \
+ x = *--top; \
+ if (x->is_internal == 0) { xfree(x); continue; } \
+ for (i = 0; i <= x->n; ++i) \
+ if (__KB_PTR(b, x)[i]) { \
+ if (top - stack == (int)max) { \
+ max <<= 1; \
+ stack = (kbnode_t**)xrealloc(stack, max * sizeof(kbnode_t*)); \
+ top = stack + (max>>1); \
+ } \
+ *top++ = __KB_PTR(b, x)[i]; \
+ } \
+ xfree(x); \
+ } \
+ } \
+ xfree(stack); \
+ } while (0)
+
+#define __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
+ static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, key_t * __restrict k, int *r) \
+ { \
+ int tr, *rr, begin = 0, end = x->n; \
+ if (x->n == 0) return -1; \
+ rr = r? r : &tr; \
+ while (begin < end) { \
+ int mid = (begin + end) >> 1; \
+ if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
+ else end = mid; \
+ } \
+ if (begin == x->n) { *rr = 1; return x->n - 1; } \
+ if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
+ return begin; \
+ }
+
+#define __KB_GET(name, key_t, kbnode_t) \
+ static key_t *kb_getp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
+ { \
+ if (!b->root) { \
+ return 0; \
+ } \
+ int i, r = 0; \
+ kbnode_t *x = b->root; \
+ while (x) { \
+ i = __kb_getp_aux_##name(x, k, &r); \
+ if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
+ if (x->is_internal == 0) return 0; \
+ x = __KB_PTR(b, x)[i + 1]; \
+ } \
+ return 0; \
+ } \
+ static inline key_t *kb_get_##name(kbtree_##name##_t *b, key_t k) \
+ { \
+ return kb_getp_##name(b, &k); \
+ }
+
+#define __KB_INTERVAL(name, key_t, kbnode_t) \
+ static inline void kb_intervalp_##name(kbtree_##name##_t *b, key_t * __restrict k, key_t **lower, key_t **upper) \
+ { \
+ if (!b->root) { \
+ return; \
+ } \
+ int i, r = 0; \
+ kbnode_t *x = b->root; \
+ *lower = *upper = 0; \
+ while (x) { \
+ i = __kb_getp_aux_##name(x, k, &r); \
+ if (i >= 0 && r == 0) { \
+ *lower = *upper = &__KB_KEY(key_t, x)[i]; \
+ return; \
+ } \
+ if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
+ if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
+ if (x->is_internal == 0) return; \
+ x = __KB_PTR(b, x)[i + 1]; \
+ } \
+ } \
+ static inline void kb_interval_##name(kbtree_##name##_t *b, key_t k, key_t **lower, key_t **upper) \
+ { \
+ kb_intervalp_##name(b, &k, lower, upper); \
+ }
+
+#define __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
+ /* x must be an internal node */ \
+ static inline void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
+ { \
+ kbnode_t *z; \
+ z = (kbnode_t*)xcalloc(1, y->is_internal? ILEN : sizeof(kbnode_##name##_t)); \
+ ++b->n_nodes; \
+ z->is_internal = y->is_internal; \
+ z->n = T - 1; \
+ memcpy(__KB_KEY(key_t, z), &__KB_KEY(key_t, y)[T], sizeof(key_t) * (T - 1)); \
+ if (y->is_internal) memcpy(__KB_PTR(b, z), &__KB_PTR(b, y)[T], sizeof(void*) * T); \
+ y->n = T - 1; \
+ memmove(&__KB_PTR(b, x)[i + 2], &__KB_PTR(b, x)[i + 1], sizeof(void*) * (unsigned int)(x->n - i)); \
+ __KB_PTR(b, x)[i + 1] = z; \
+ memmove(&__KB_KEY(key_t, x)[i + 1], &__KB_KEY(key_t, x)[i], sizeof(key_t) * (unsigned int)(x->n - i)); \
+ __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[T - 1]; \
+ ++x->n; \
+ } \
+ static inline key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k) \
+ { \
+ int i = x->n - 1; \
+ key_t *ret; \
+ if (x->is_internal == 0) { \
+ i = __kb_getp_aux_##name(x, k, 0); \
+ if (i != x->n - 1) \
+ memmove(&__KB_KEY(key_t, x)[i + 2], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
+ ret = &__KB_KEY(key_t, x)[i + 1]; \
+ *ret = *k; \
+ ++x->n; \
+ } else { \
+ i = __kb_getp_aux_##name(x, k, 0) + 1; \
+ if (__KB_PTR(b, x)[i]->n == 2 * T - 1) { \
+ __kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
+ if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
+ } \
+ ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
+ } \
+ return ret; \
+ } \
+ static inline key_t *kb_putp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
+ { \
+ if (!b->root) { \
+ b->root = (kbnode_t*)xcalloc(1, ILEN); \
+ ++b->n_nodes; \
+ } \
+ kbnode_t *r, *s; \
+ ++b->n_keys; \
+ r = b->root; \
+ if (r->n == 2 * T - 1) { \
+ ++b->n_nodes; \
+ s = (kbnode_t*)xcalloc(1, ILEN); \
+ b->root = s; s->is_internal = 1; s->n = 0; \
+ __KB_PTR(b, s)[0] = r; \
+ __kb_split_##name(b, s, 0, r); \
+ r = s; \
+ } \
+ return __kb_putp_aux_##name(b, r, k); \
+ } \
+ static inline void kb_put_##name(kbtree_##name##_t *b, key_t k) \
+ { \
+ kb_putp_##name(b, &k); \
+ }
+
+
+#define __KB_DEL(name, key_t, kbnode_t, T) \
+ static inline key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k, int s) \
+ { \
+ int yn, zn, i, r = 0; \
+ kbnode_t *xp, *y, *z; \
+ key_t kp; \
+ if (x == 0) return *k; \
+ if (s) { /* s can only be 0, 1 or 2 */ \
+ r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
+ i = s == 1? x->n - 1 : -1; \
+ } else i = __kb_getp_aux_##name(x, k, &r); \
+ if (x->is_internal == 0) { \
+ if (s == 2) ++i; \
+ kp = __KB_KEY(key_t, x)[i]; \
+ memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
+ --x->n; \
+ return kp; \
+ } \
+ if (r == 0) { \
+ if ((yn = __KB_PTR(b, x)[i]->n) >= T) { \
+ xp = __KB_PTR(b, x)[i]; \
+ kp = __KB_KEY(key_t, x)[i]; \
+ __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
+ return kp; \
+ } else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= T) { \
+ xp = __KB_PTR(b, x)[i + 1]; \
+ kp = __KB_KEY(key_t, x)[i]; \
+ __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
+ return kp; \
+ } else if (yn == T - 1 && zn == T - 1) { \
+ y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
+ __KB_KEY(key_t, y)[y->n++] = *k; \
+ memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, z), (unsigned int)z->n * sizeof(key_t)); \
+ if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, z), (unsigned int)(z->n + 1) * sizeof(void*)); \
+ y->n += z->n; \
+ memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
+ memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \
+ --x->n; \
+ xfree(z); \
+ return __kb_delp_aux_##name(b, y, k, s); \
+ } \
+ } \
+ ++i; \
+ if ((xp = __KB_PTR(b, x)[i])->n == T - 1) { \
+ if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= T) { \
+ memmove(&__KB_KEY(key_t, xp)[1], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
+ if (xp->is_internal) memmove(&__KB_PTR(b, xp)[1], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \
+ __KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
+ __KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
+ if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
+ --y->n; ++xp->n; \
+ } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= T) { \
+ __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
+ __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
+ if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
+ --y->n; \
+ memmove(__KB_KEY(key_t, y), &__KB_KEY(key_t, y)[1], (unsigned int)y->n * sizeof(key_t)); \
+ if (y->is_internal) memmove(__KB_PTR(b, y), &__KB_PTR(b, y)[1], (unsigned int)(y->n + 1) * sizeof(void*)); \
+ } else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == T - 1) { \
+ __KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
+ memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
+ if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \
+ y->n += xp->n; \
+ memmove(&__KB_KEY(key_t, x)[i - 1], &__KB_KEY(key_t, x)[i], (unsigned int)(x->n - i) * sizeof(key_t)); \
+ memmove(&__KB_PTR(b, x)[i], &__KB_PTR(b, x)[i + 1], (unsigned int)(x->n - i) * sizeof(void*)); \
+ --x->n; \
+ xfree(xp); \
+ xp = y; \
+ } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == T - 1) { \
+ __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
+ memmove(&__KB_KEY(key_t, xp)[xp->n], __KB_KEY(key_t, y), (unsigned int)y->n * sizeof(key_t)); \
+ if (xp->is_internal) memmove(&__KB_PTR(b, xp)[xp->n], __KB_PTR(b, y), (unsigned int)(y->n + 1) * sizeof(void*)); \
+ xp->n += y->n; \
+ memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
+ memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \
+ --x->n; \
+ xfree(y); \
+ } \
+ } \
+ return __kb_delp_aux_##name(b, xp, k, s); \
+ } \
+ static inline key_t kb_delp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
+ { \
+ kbnode_t *x; \
+ key_t ret; \
+ ret = __kb_delp_aux_##name(b, b->root, k, 0); \
+ --b->n_keys; \
+ if (b->root->n == 0 && b->root->is_internal) { \
+ --b->n_nodes; \
+ x = b->root; \
+ b->root = __KB_PTR(b, x)[0]; \
+ xfree(x); \
+ } \
+ return ret; \
+ } \
+ static inline key_t kb_del_##name(kbtree_##name##_t *b, key_t k) \
+ { \
+ return kb_delp_##name(b, &k); \
+ }
+
+#define __KB_ITR(name, key_t, kbnode_t) \
+ static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
+ { \
+ itr->p = 0; \
+ if (b->n_keys == 0) return; \
+ itr->p = itr->stack; \
+ itr->p->x = b->root; itr->p->i = 0; \
+ while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \
+ kbnode_t *x = itr->p->x; \
+ ++itr->p; \
+ itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \
+ } \
+ } \
+ static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
+ { \
+ if (itr->p < itr->stack) return 0; \
+ for (;;) { \
+ ++itr->p->i; \
+ while (itr->p->x && itr->p->i <= itr->p->x->n) { \
+ itr->p[1].i = 0; \
+ itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
+ ++itr->p; \
+ } \
+ --itr->p; \
+ if (itr->p < itr->stack) return 0; \
+ if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \
+ } \
+ } \
+ static inline int kb_itr_prev_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
+ { \
+ if (itr->p < itr->stack) return 0; \
+ for (;;) { \
+ while (itr->p->x && itr->p->i >= 0) { \
+ itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
+ itr->p[1].i = itr->p[1].x ? itr->p[1].x->n : -1; \
+ ++itr->p; \
+ } \
+ --itr->p; \
+ if (itr->p < itr->stack) return 0; \
+ --itr->p->i; \
+ if (itr->p->x && itr->p->i >= 0) return 1; \
+ } \
+ } \
+ static inline int kb_itr_getp_##name(kbtree_##name##_t *b, key_t * __restrict k, kbitr_##name##_t *itr) \
+ { \
+ if (b->n_keys == 0) { \
+ itr->p = NULL; \
+ return 0; \
+ } \
+ int i, r = 0; \
+ itr->p = itr->stack; \
+ itr->p->x = b->root; \
+ while (itr->p->x) { \
+ i = __kb_getp_aux_##name(itr->p->x, k, &r); \
+ itr->p->i = i; \
+ if (i >= 0 && r == 0) return 1; \
+ ++itr->p->i; \
+ itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[i + 1] : 0; \
+ ++itr->p; \
+ } \
+ return 0; \
+ } \
+ static inline int kb_itr_get_##name(kbtree_##name##_t *b, key_t k, kbitr_##name##_t *itr) \
+ { \
+ return kb_itr_getp_##name(b,&k,itr); \
+ } \
+ static inline void kb_del_itr_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
+ { \
+ key_t k = kb_itr_key(itr); \
+ kb_delp_##name(b, &k); \
+ kb_itr_getp_##name(b, &k, itr); \
+ }
+
+#define KBTREE_INIT(name, key_t, __cmp, T) \
+ KBTREE_INIT_IMPL(name, key_t, kbnode_##name##_t, __cmp, T, (sizeof(kbnode_##name##_t)+(2*T)*sizeof(void *)))
+
+#define KBTREE_INIT_IMPL(name, key_t, kbnode_t, __cmp, T, ILEN) \
+ __KB_TREE_T(name, key_t, T) \
+ __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
+ __KB_GET(name, key_t, kbnode_t) \
+ __KB_INTERVAL(name, key_t, kbnode_t) \
+ __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
+ __KB_DEL(name, key_t, kbnode_t, T) \
+ __KB_ITR(name, key_t, kbnode_t)
+
+#define KB_DEFAULT_SIZE 512
+
+#define kbtree_t(name) kbtree_##name##_t
+#define kbitr_t(name) kbitr_##name##_t
+#define kb_init(b) ((b)->n_keys = (b)->n_nodes = 0, (b)->root = 0)
+#define kb_destroy(name, b) __kb_destroy(kbnode_##name##_t, b)
+#define kb_get(name, b, k) kb_get_##name(b, k)
+#define kb_put(name, b, k) kb_put_##name(b, k)
+#define kb_del(name, b, k) kb_del_##name(b, k)
+#define kb_interval(name, b, k, l, u) kb_interval_##name(b, k, l, u)
+#define kb_getp(name, b, k) kb_getp_##name(b, k)
+#define kb_putp(name, b, k) kb_putp_##name(b, k)
+#define kb_delp(name, b, k) kb_delp_##name(b, k)
+#define kb_intervalp(name, b, k, l, u) kb_intervalp_##name(b, k, l, u)
+
+#define kb_itr_first(name, b, i) kb_itr_first_##name(b, i)
+#define kb_itr_get(name, b, k, i) kb_itr_get_##name(b, k, i)
+#define kb_itr_getp(name, b, k, i) kb_itr_getp_##name(b, k, i)
+#define kb_itr_next(name, b, i) kb_itr_next_##name(b, i)
+#define kb_itr_prev(name, b, i) kb_itr_prev_##name(b, i)
+#define kb_del_itr(name, b, i) kb_del_itr_##name(b, i)
+#define kb_itr_key(itr) __KB_KEY(dummy, (itr)->p->x)[(itr)->p->i]
+#define kb_itr_valid(itr) ((itr)->p >= (itr)->stack)
+
+#define kb_size(b) ((b)->n_keys)
+
+#define kb_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
+#define kb_str_cmp(a, b) strcmp(a, b)
+
+#endif // NVIM_LIB_KBTREE_H
diff --git a/src/nvim/lib/kvec.h b/src/nvim/lib/kvec.h
index 584282d773..93b2f053bc 100644
--- a/src/nvim/lib/kvec.h
+++ b/src/nvim/lib/kvec.h
@@ -41,6 +41,7 @@
#include <string.h>
#include "nvim/memory.h"
+#include "nvim/os/os_defs.h"
#define kv_roundup32(x) \
((--(x)), \
@@ -62,7 +63,16 @@
#define kv_pop(v) ((v).items[--(v).size])
#define kv_size(v) ((v).size)
#define kv_max(v) ((v).capacity)
-#define kv_last(v) kv_A(v, kv_size(v) - 1)
+#define kv_Z(v, i) kv_A(v, kv_size(v) - (i) - 1)
+#define kv_last(v) kv_Z(v, 0)
+
+/// Drop last n items from kvec without resizing
+///
+/// Previously spelled as `(void)kv_pop(v)`, repeated n times.
+///
+/// @param[out] v Kvec to drop items from.
+/// @param[in] n Number of elements to drop.
+#define kv_drop(v, n) ((v).size -= (n))
#define kv_resize(v, s) \
((v).capacity = (s), \
@@ -88,14 +98,14 @@
(*kv_pushp(v) = (x))
#define kv_a(v, i) \
- (((v).capacity <= (size_t) (i) \
+ (*(((v).capacity <= (size_t) (i) \
? ((v).capacity = (v).size = (i) + 1, \
kv_roundup32((v).capacity), \
- kv_resize((v), (v).capacity), 0) \
+ kv_resize((v), (v).capacity), 0UL) \
: ((v).size <= (size_t) (i) \
? (v).size = (i) + 1 \
- : 0)), \
- (v).items[(i)])
+ : 0UL)), \
+ &(v).items[(i)]))
/// Type of a vector with a few first members allocated on stack
///
@@ -132,6 +142,8 @@ static inline void *_memcpy_free(void *const restrict dest,
return dest;
}
+// -V:kvi_push:512
+
/// Resize vector with preallocated array
///
/// @note May not resize to an array smaller then init_array: if requested,
diff --git a/src/nvim/lib/queue.h b/src/nvim/lib/queue.h
index 9fcedf298f..ab9270081e 100644
--- a/src/nvim/lib/queue.h
+++ b/src/nvim/lib/queue.h
@@ -1,3 +1,8 @@
+// Queue implemented by circularly-linked list.
+//
+// Adapted from libuv. Simpler and more efficient than klist.h for implementing
+// queues that support arbitrary insertion/removal.
+//
// Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
//
// Permission to use, copy, modify, and/or distribute this software for any
@@ -28,6 +33,8 @@ typedef struct _queue {
#define QUEUE_DATA(ptr, type, field) \
((type *)((char *)(ptr) - offsetof(type, field)))
+// Important note: mutating the list while QUEUE_FOREACH is
+// iterating over its elements results in undefined behavior.
#define QUEUE_FOREACH(q, h) \
for ( /* NOLINT(readability/braces) */ \
(q) = (h)->next; (q) != (h); (q) = (q)->next)
@@ -56,17 +63,6 @@ static inline void QUEUE_ADD(QUEUE *const h, QUEUE *const n)
h->prev->next = h;
}
-static inline void QUEUE_SPLIT(QUEUE *const h, QUEUE *const q, QUEUE *const n)
- FUNC_ATTR_ALWAYS_INLINE
-{
- n->prev = h->prev;
- n->prev->next = n;
- n->next = q;
- h->prev = q->prev;
- h->prev->next = h;
- q->prev = n;
-}
-
static inline void QUEUE_INSERT_HEAD(QUEUE *const h, QUEUE *const q)
FUNC_ATTR_ALWAYS_INLINE
{
diff --git a/src/nvim/lib/ringbuf.h b/src/nvim/lib/ringbuf.h
index 12b75ec65a..e63eae70b0 100644
--- a/src/nvim/lib/ringbuf.h
+++ b/src/nvim/lib/ringbuf.h
@@ -15,6 +15,7 @@
#ifndef NVIM_LIB_RINGBUF_H
#define NVIM_LIB_RINGBUF_H
+#include <stddef.h>
#include <string.h>
#include <assert.h>
#include <stdint.h>
@@ -73,6 +74,32 @@ typedef struct { \
RBType *buf_end; \
} TypeName##RingBuffer;
+/// Dummy item free macros, for use in RINGBUF_INIT
+///
+/// This macros actually does nothing.
+///
+/// @param[in] item Item to be freed.
+#define RINGBUF_DUMMY_FREE(item)
+
+/// Static ring buffer
+///
+/// @warning Ring buffers created with this macros must neither be freed nor
+/// deallocated.
+///
+/// @param scope Ring buffer scope.
+/// @param TypeName Ring buffer type name.
+/// @param RBType Type of the single ring buffer element.
+/// @param varname Variable name.
+/// @param rbsize Ring buffer size.
+#define RINGBUF_STATIC(scope, TypeName, RBType, varname, rbsize) \
+static RBType _##varname##_buf[rbsize]; \
+scope TypeName##RingBuffer varname = { \
+ .buf = _##varname##_buf, \
+ .next = _##varname##_buf, \
+ .first = NULL, \
+ .buf_end = _##varname##_buf + rbsize - 1, \
+};
+
/// Initialize a new ring buffer
///
/// @param TypeName Ring buffer type name. Actual type name will be