aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/klib/kvec.h23
-rw-r--r--src/nvim/README.md3
-rw-r--r--src/nvim/api/autocmd.c73
-rw-r--r--src/nvim/api/buffer.c99
-rw-r--r--src/nvim/api/command.c11
-rw-r--r--src/nvim/api/extmark.c139
-rw-r--r--src/nvim/api/keysets.h10
-rw-r--r--src/nvim/api/vim.c33
-rw-r--r--src/nvim/api/win_config.c19
-rw-r--r--src/nvim/autocmd.c2
-rw-r--r--src/nvim/buffer.c1
-rw-r--r--src/nvim/bufwrite.c2
-rw-r--r--src/nvim/decoration.c198
-rw-r--r--src/nvim/decoration.h2
-rw-r--r--src/nvim/decoration_provider.c9
-rw-r--r--src/nvim/drawline.c84
-rw-r--r--src/nvim/edit.c5
-rw-r--r--src/nvim/eval.lua2
-rw-r--r--src/nvim/eval/funcs.c2
-rw-r--r--src/nvim/eval/vars.c2
-rw-r--r--src/nvim/ex_cmds.c4
-rw-r--r--src/nvim/ex_getln.c6
-rw-r--r--src/nvim/extmark.c119
-rw-r--r--src/nvim/grid_defs.h2
-rw-r--r--src/nvim/highlight.c64
-rw-r--r--src/nvim/highlight_defs.h1
-rw-r--r--src/nvim/highlight_group.c8
-rw-r--r--src/nvim/keycodes.c3
-rw-r--r--src/nvim/macros.h9
-rw-r--r--src/nvim/map.c13
-rw-r--r--src/nvim/map.h6
-rw-r--r--src/nvim/marktree.c1596
-rw-r--r--src/nvim/marktree.h84
-rw-r--r--src/nvim/mbyte.c2
-rw-r--r--src/nvim/memfile.c279
-rw-r--r--src/nvim/memfile_defs.h66
-rw-r--r--src/nvim/memline.c131
-rw-r--r--src/nvim/menu.c2
-rw-r--r--src/nvim/mouse.c14
-rw-r--r--src/nvim/msgpack_rpc/channel.c8
-rw-r--r--src/nvim/option_defs.h1
-rw-r--r--src/nvim/options.lua8
-rw-r--r--src/nvim/os/shell.c2
-rw-r--r--src/nvim/plines.c4
-rw-r--r--src/nvim/spell.c2
-rw-r--r--src/nvim/usercmd.c105
-rw-r--r--src/nvim/version.h3
-rw-r--r--src/nvim/window.c14
48 files changed, 2174 insertions, 1101 deletions
diff --git a/src/klib/kvec.h b/src/klib/kvec.h
index f6674a0adf..fd9096e1ad 100644
--- a/src/klib/kvec.h
+++ b/src/klib/kvec.h
@@ -207,6 +207,16 @@ static inline void *_memcpy_free(void *const restrict dest, void *const restrict
/* 2^x initial array size. */ \
kvi_resize(v, (v).capacity << 1)
+/// fit at least "len" more items
+#define kvi_ensure_more_space(v, len) \
+ do { \
+ if ((v).capacity < (v).size + len) { \
+ (v).capacity = (v).size + len; \
+ kv_roundup32((v).capacity); \
+ kvi_resize((v), (v).capacity); \
+ } \
+ } while (0)
+
/// Get location where to store new element to a vector with preallocated array
///
/// @param[in,out] v Vector to push to.
@@ -223,6 +233,19 @@ static inline void *_memcpy_free(void *const restrict dest, void *const restrict
#define kvi_push(v, x) \
(*kvi_pushp(v) = (x))
+/// Copy a vector to a preallocated vector
+///
+/// @param[out] v1 destination
+/// @param[in] v0 source (can be either vector or preallocated vector)
+#define kvi_copy(v1, v0) \
+ do { \
+ if ((v1).capacity < (v0).size) { \
+ kvi_resize(v1, (v0).size); \
+ } \
+ (v1).size = (v0).size; \
+ memcpy((v1).items, (v0).items, sizeof((v1).items[0]) * (v0).size); \
+ } while (0)
+
/// Free array of elements of a vector with preallocated array if needed
///
/// @param[out] v Vector to free.
diff --git a/src/nvim/README.md b/src/nvim/README.md
index 0de0fb9a3f..4029777031 100644
--- a/src/nvim/README.md
+++ b/src/nvim/README.md
@@ -1,8 +1,7 @@
Nvim core
=========
-Module-specific details are documented at the top of each module (`terminal.c`,
-`screen.c`, …).
+Module-specific details are documented at the top of each module (`terminal.c`, `undo.c`, …).
See `:help dev` for guidelines.
diff --git a/src/nvim/api/autocmd.c b/src/nvim/api/autocmd.c
index aa0c2695ad..2e4d2a622d 100644
--- a/src/nvim/api/autocmd.c
+++ b/src/nvim/api/autocmd.c
@@ -49,19 +49,20 @@ static int64_t next_autocmd_id = 1;
/// Get all autocommands that match the corresponding {opts}.
///
/// These examples will get autocommands matching ALL the given criteria:
-/// <pre>lua
-/// -- Matches all criteria
-/// autocommands = vim.api.nvim_get_autocmds({
-/// group = "MyGroup",
-/// event = {"BufEnter", "BufWinEnter"},
-/// pattern = {"*.c", "*.h"}
-/// })
///
-/// -- All commands from one group
-/// autocommands = vim.api.nvim_get_autocmds({
-/// group = "MyGroup",
-/// })
-/// </pre>
+/// ```lua
+/// -- Matches all criteria
+/// autocommands = vim.api.nvim_get_autocmds({
+/// group = "MyGroup",
+/// event = {"BufEnter", "BufWinEnter"},
+/// pattern = {"*.c", "*.h"}
+/// })
+///
+/// -- All commands from one group
+/// autocommands = vim.api.nvim_get_autocmds({
+/// group = "MyGroup",
+/// })
+/// ```
///
/// NOTE: When multiple patterns or events are provided, it will find all the autocommands that
/// match any combination of them.
@@ -344,28 +345,31 @@ cleanup:
/// function _name_ string) or `command` (Ex command string).
///
/// Example using Lua callback:
-/// <pre>lua
-/// vim.api.nvim_create_autocmd({"BufEnter", "BufWinEnter"}, {
-/// pattern = {"*.c", "*.h"},
-/// callback = function(ev)
-/// print(string.format('event fired: \%s', vim.inspect(ev)))
-/// end
-/// })
-/// </pre>
+///
+/// ```lua
+/// vim.api.nvim_create_autocmd({"BufEnter", "BufWinEnter"}, {
+/// pattern = {"*.c", "*.h"},
+/// callback = function(ev)
+/// print(string.format('event fired: %s', vim.inspect(ev)))
+/// end
+/// })
+/// ```
///
/// Example using an Ex command as the handler:
-/// <pre>lua
-/// vim.api.nvim_create_autocmd({"BufEnter", "BufWinEnter"}, {
-/// pattern = {"*.c", "*.h"},
-/// command = "echo 'Entering a C or C++ file'",
-/// })
-/// </pre>
+///
+/// ```lua
+/// vim.api.nvim_create_autocmd({"BufEnter", "BufWinEnter"}, {
+/// pattern = {"*.c", "*.h"},
+/// command = "echo 'Entering a C or C++ file'",
+/// })
+/// ```
///
/// Note: `pattern` is NOT automatically expanded (unlike with |:autocmd|), thus names like "$HOME"
/// and "~" must be expanded explicitly:
-/// <pre>lua
-/// pattern = vim.fn.expand("~") .. "/some/path/*.py"
-/// </pre>
+///
+/// ```lua
+/// pattern = vim.fn.expand("~") .. "/some/path/*.py"
+/// ```
///
/// @param event (string|array) Event(s) that will trigger the handler (`callback` or `command`).
/// @param opts Options dict:
@@ -619,11 +623,12 @@ cleanup:
/// Create or get an autocommand group |autocmd-groups|.
///
/// To get an existing group id, do:
-/// <pre>lua
-/// local id = vim.api.nvim_create_augroup("MyGroup", {
-/// clear = false
-/// })
-/// </pre>
+///
+/// ```lua
+/// local id = vim.api.nvim_create_augroup("MyGroup", {
+/// clear = false
+/// })
+/// ```
///
/// @param name String: The name of the group
/// @param opts Dictionary Parameters
diff --git a/src/nvim/api/buffer.c b/src/nvim/api/buffer.c
index b8cb09ceb3..e8f9f809f2 100644
--- a/src/nvim/api/buffer.c
+++ b/src/nvim/api/buffer.c
@@ -85,11 +85,15 @@ Integer nvim_buf_line_count(Buffer buffer, Error *err)
///
/// Example (Lua): capture buffer updates in a global `events` variable
/// (use "vim.print(events)" to see its contents):
-/// <pre>lua
-/// events = {}
-/// vim.api.nvim_buf_attach(0, false, {
-/// on_lines=function(...) table.insert(events, {...}) end})
-/// </pre>
+///
+/// ```lua
+/// events = {}
+/// vim.api.nvim_buf_attach(0, false, {
+/// on_lines = function(...)
+/// table.insert(events, {...})
+/// end,
+/// })
+/// ```
///
/// @see |nvim_buf_detach()|
/// @see |api-buffer-updates-lua|
@@ -504,7 +508,10 @@ end:
///
/// Prefer |nvim_buf_set_lines()| if you are only adding or deleting entire lines.
///
+/// Prefer |nvim_put()| if you want to insert text at the cursor position.
+///
/// @see |nvim_buf_set_lines()|
+/// @see |nvim_put()|
///
/// @param channel_id
/// @param buffer Buffer handle, or 0 for current buffer
@@ -725,11 +732,12 @@ void nvim_buf_set_text(uint64_t channel_id, Buffer buffer, Integer start_row, In
FOR_ALL_TAB_WINDOWS(tp, win) {
if (win->w_buffer == buf) {
- // adjust cursor like an extmark ( i e it was inside last_part_len)
- if (win->w_cursor.lnum == end_row && win->w_cursor.col > end_col) {
- win->w_cursor.col -= col_extent - (colnr_T)last_item.size;
+ if (win->w_cursor.lnum >= start_row && win->w_cursor.lnum <= end_row) {
+ fix_cursor_cols(win, (linenr_T)start_row, (colnr_T)start_col, (linenr_T)end_row,
+ (colnr_T)end_col, (linenr_T)new_len, (colnr_T)last_item.size);
+ } else {
+ fix_cursor(win, (linenr_T)start_row, (linenr_T)end_row, (linenr_T)extra);
}
- fix_cursor(win, (linenr_T)start_row, (linenr_T)end_row, (linenr_T)extra);
}
}
@@ -1339,6 +1347,79 @@ static void fix_cursor(win_T *win, linenr_T lo, linenr_T hi, linenr_T extra)
invalidate_botline(win);
}
+/// Fix cursor position after replacing text
+/// between (start_row, start_col) and (end_row, end_col).
+///
+/// win->w_cursor.lnum is assumed to be >= start_row and <= end_row.
+static void fix_cursor_cols(win_T *win, linenr_T start_row, colnr_T start_col, linenr_T end_row,
+ colnr_T end_col, linenr_T new_rows, colnr_T new_cols_at_end_row)
+{
+ colnr_T mode_col_adj = win == curwin && (State & MODE_INSERT) ? 0 : 1;
+
+ colnr_T end_row_change_start = new_rows == 1 ? start_col : 0;
+ colnr_T end_row_change_end = end_row_change_start + new_cols_at_end_row;
+
+ // check if cursor is after replaced range or not
+ if (win->w_cursor.lnum == end_row && win->w_cursor.col + mode_col_adj > end_col) {
+ // if cursor is after replaced range, it's shifted
+ // to keep it's position the same, relative to end_col
+
+ linenr_T old_rows = end_row - start_row + 1;
+ win->w_cursor.lnum += new_rows - old_rows;
+ win->w_cursor.col += end_row_change_end - end_col;
+ } else {
+ // if cursor is inside replaced range
+ // and the new range got smaller,
+ // it's shifted to keep it inside the new range
+ //
+ // if cursor is before range or range did not
+ // got smaller, position is not changed
+
+ colnr_T old_coladd = win->w_cursor.coladd;
+
+ // it's easier to work with a single value here.
+ // col and coladd are fixed by a later call
+ // to check_cursor_col_win when necessary
+ win->w_cursor.col += win->w_cursor.coladd;
+ win->w_cursor.coladd = 0;
+
+ linenr_T new_end_row = start_row + new_rows - 1;
+
+ // make sure cursor row is in the new row range
+ if (win->w_cursor.lnum > new_end_row) {
+ win->w_cursor.lnum = new_end_row;
+
+ // don't simply move cursor up, but to the end
+ // of new_end_row, if it's not at or after
+ // it already (in case virtualedit is active)
+ // column might be additionally adjusted below
+ // to keep it inside col range if needed
+ colnr_T len = (colnr_T)strlen(ml_get_buf(win->w_buffer, new_end_row));
+ if (win->w_cursor.col < len) {
+ win->w_cursor.col = len;
+ }
+ }
+
+ // if cursor is at the last row and
+ // it wasn't after eol before, move it exactly
+ // to end_row_change_end
+ if (win->w_cursor.lnum == new_end_row
+ && win->w_cursor.col > end_row_change_end && old_coladd == 0) {
+ win->w_cursor.col = end_row_change_end;
+
+ // make sure cursor is inside range, not after it,
+ // except when doing so would move it before new range
+ if (win->w_cursor.col - mode_col_adj >= end_row_change_start) {
+ win->w_cursor.col -= mode_col_adj;
+ }
+ }
+ }
+
+ check_cursor_col_win(win);
+ changed_cline_bef_curs(win);
+ invalidate_botline(win);
+}
+
/// Initialise a string array either:
/// - on the Lua stack (as a table) (if lstate is not NULL)
/// - as an API array object (if lstate is NULL).
diff --git a/src/nvim/api/command.c b/src/nvim/api/command.c
index 2b09cfc4b2..808d4e0b8d 100644
--- a/src/nvim/api/command.c
+++ b/src/nvim/api/command.c
@@ -860,11 +860,12 @@ static void build_cmdline_str(char **cmdlinep, exarg_T *eap, CmdParseInfo *cmdin
/// For Lua usage see |lua-guide-commands-create|.
///
/// Example:
-/// <pre>vim
-/// :call nvim_create_user_command('SayHello', 'echo "Hello world!"', {'bang': v:true})
-/// :SayHello
-/// Hello world!
-/// </pre>
+///
+/// ```vim
+/// :call nvim_create_user_command('SayHello', 'echo "Hello world!"', {'bang': v:true})
+/// :SayHello
+/// Hello world!
+/// ```
///
/// @param name Name of the new user command. Must begin with an uppercase letter.
/// @param command Replacement command to execute when this user command is executed. When called
diff --git a/src/nvim/api/extmark.c b/src/nvim/api/extmark.c
index 6ec1fc4ee0..05f62f6c7c 100644
--- a/src/nvim/api/extmark.c
+++ b/src/nvim/api/extmark.c
@@ -300,29 +300,35 @@ ArrayOf(Integer) nvim_buf_get_extmark_by_id(Buffer buffer, Integer ns_id,
/// Region can be given as (row,col) tuples, or valid extmark ids (whose
/// positions define the bounds). 0 and -1 are understood as (0,0) and (-1,-1)
/// respectively, thus the following are equivalent:
-/// <pre>lua
-/// vim.api.nvim_buf_get_extmarks(0, my_ns, 0, -1, {})
-/// vim.api.nvim_buf_get_extmarks(0, my_ns, {0,0}, {-1,-1}, {})
-/// </pre>
+///
+/// ```lua
+/// vim.api.nvim_buf_get_extmarks(0, my_ns, 0, -1, {})
+/// vim.api.nvim_buf_get_extmarks(0, my_ns, {0,0}, {-1,-1}, {})
+/// ```
///
/// If `end` is less than `start`, traversal works backwards. (Useful
/// with `limit`, to get the first marks prior to a given position.)
///
+/// Note: when using extmark ranges (marks with a end_row/end_col position)
+/// the `overlap` option might be useful. Otherwise only the start position
+/// of an extmark will be considered.
+///
/// Example:
-/// <pre>lua
-/// local api = vim.api
-/// local pos = api.nvim_win_get_cursor(0)
-/// local ns = api.nvim_create_namespace('my-plugin')
-/// -- Create new extmark at line 1, column 1.
-/// local m1 = api.nvim_buf_set_extmark(0, ns, 0, 0, {})
-/// -- Create new extmark at line 3, column 1.
-/// local m2 = api.nvim_buf_set_extmark(0, ns, 2, 0, {})
-/// -- Get extmarks only from line 3.
-/// local ms = api.nvim_buf_get_extmarks(0, ns, {2,0}, {2,0}, {})
-/// -- Get all marks in this buffer + namespace.
-/// local all = api.nvim_buf_get_extmarks(0, ns, 0, -1, {})
-/// vim.print(ms)
-/// </pre>
+///
+/// ```lua
+/// local api = vim.api
+/// local pos = api.nvim_win_get_cursor(0)
+/// local ns = api.nvim_create_namespace('my-plugin')
+/// -- Create new extmark at line 1, column 1.
+/// local m1 = api.nvim_buf_set_extmark(0, ns, 0, 0, {})
+/// -- Create new extmark at line 3, column 1.
+/// local m2 = api.nvim_buf_set_extmark(0, ns, 2, 0, {})
+/// -- Get extmarks only from line 3.
+/// local ms = api.nvim_buf_get_extmarks(0, ns, {2,0}, {2,0}, {})
+/// -- Get all marks in this buffer + namespace.
+/// local all = api.nvim_buf_get_extmarks(0, ns, 0, -1, {})
+/// vim.print(ms)
+/// ```
///
/// @param buffer Buffer handle, or 0 for current buffer
/// @param ns_id Namespace id from |nvim_create_namespace()| or -1 for all namespaces
@@ -334,11 +340,13 @@ ArrayOf(Integer) nvim_buf_get_extmark_by_id(Buffer buffer, Integer ns_id,
/// - limit: Maximum number of marks to return
/// - details: Whether to include the details dict
/// - hl_name: Whether to include highlight group name instead of id, true if omitted
+/// - overlap: Also include marks which overlap the range, even if
+/// their start position is less than `start`
/// - type: Filter marks by type: "highlight", "sign", "virt_text" and "virt_lines"
/// @param[out] err Error details, if any
/// @return List of [extmark_id, row, col] tuples in "traversal order".
-Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object end, Dictionary opts,
- Error *err)
+Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object end,
+ Dict(get_extmarks) *opts, Error *err)
FUNC_API_SINCE(7)
{
Array rv = ARRAY_DICT_INIT;
@@ -348,63 +356,32 @@ Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object e
return rv;
}
- bool all_ns;
- if (ns_id == -1) {
- all_ns = true;
- } else {
- VALIDATE_INT(ns_initialized((uint32_t)ns_id), "ns_id", ns_id, {
- return rv;
- });
- all_ns = false;
- }
+ VALIDATE_INT(ns_id == -1 || ns_initialized((uint32_t)ns_id), "ns_id", ns_id, {
+ return rv;
+ });
- Integer limit = -1;
- bool details = false;
- bool hl_name = true;
- ExtmarkType type = kExtmarkNone;
+ bool details = opts->details;
+ bool hl_name = GET_BOOL_OR_TRUE(opts, get_extmarks, hl_name);
- for (size_t i = 0; i < opts.size; i++) {
- String k = opts.items[i].key;
- Object *v = &opts.items[i].value;
- if (strequal("limit", k.data)) {
- VALIDATE_T("limit", kObjectTypeInteger, v->type, {
- return rv;
- });
- limit = v->data.integer;
- } else if (strequal("details", k.data)) {
- details = api_object_to_bool(*v, "details", false, err);
- if (ERROR_SET(err)) {
- return rv;
- }
- } else if (strequal("hl_name", k.data)) {
- hl_name = api_object_to_bool(*v, "hl_name", false, err);
- if (ERROR_SET(err)) {
- return rv;
- }
- } else if (strequal("type", k.data)) {
- VALIDATE_EXP(v->type == kObjectTypeString, "type", "String", api_typename(v->type), {
- return rv;
- });
- if (strequal(v->data.string.data, "sign")) {
- type = kExtmarkSign;
- } else if (strequal(v->data.string.data, "virt_text")) {
- type = kExtmarkVirtText;
- } else if (strequal(v->data.string.data, "virt_lines")) {
- type = kExtmarkVirtLines;
- } else if (strequal(v->data.string.data, "highlight")) {
- type = kExtmarkHighlight;
- } else {
- VALIDATE_EXP(false, "type", "sign, virt_text, virt_lines or highlight", v->data.string.data, {
- return rv;
- });
- }
+ ExtmarkType type = kExtmarkNone;
+ if (HAS_KEY(opts, get_extmarks, type)) {
+ if (strequal(opts->type.data, "sign")) {
+ type = kExtmarkSign;
+ } else if (strequal(opts->type.data, "virt_text")) {
+ type = kExtmarkVirtText;
+ } else if (strequal(opts->type.data, "virt_lines")) {
+ type = kExtmarkVirtLines;
+ } else if (strequal(opts->type.data, "highlight")) {
+ type = kExtmarkHighlight;
} else {
- VALIDATE_S(false, "'opts' key", k.data, {
+ VALIDATE_EXP(false, "type", "sign, virt_text, virt_lines or highlight", opts->type.data, {
return rv;
});
}
}
+ Integer limit = HAS_KEY(opts, get_extmarks, limit) ? opts->limit : -1;
+
if (limit == 0) {
return rv;
} else if (limit < 0) {
@@ -429,11 +406,12 @@ Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object e
reverse = true;
}
+ // note: ns_id=-1 allowed, represented as UINT32_MAX
ExtmarkInfoArray marks = extmark_get(buf, (uint32_t)ns_id, l_row, l_col, u_row,
- u_col, (int64_t)limit, reverse, all_ns, type);
+ u_col, (int64_t)limit, reverse, type, opts->overlap);
for (size_t i = 0; i < kv_size(marks); i++) {
- ADD(rv, ARRAY_OBJ(extmark_to_array(&kv_A(marks, i), true, (bool)details, hl_name)));
+ ADD(rv, ARRAY_OBJ(extmark_to_array(&kv_A(marks, i), true, details, hl_name)));
}
kv_destroy(marks);
@@ -451,6 +429,11 @@ Array nvim_buf_get_extmarks(Buffer buffer, Integer ns_id, Object start, Object e
/// Using the optional arguments, it is possible to use this to highlight
/// a range of text, and also to associate virtual text to the mark.
///
+/// If present, the position defined by `end_col` and `end_row` should be after
+/// the start position in order for the extmark to cover a range.
+/// An earlier end position is not an error, but then it behaves like an empty
+/// range (no highlighting).
+///
/// @param buffer Buffer handle, or 0 for current buffer
/// @param ns_id Namespace id from |nvim_create_namespace()|
/// @param line Line where to place the mark, 0-based. |api-indexing|
@@ -1035,7 +1018,8 @@ void nvim_buf_clear_namespace(Buffer buffer, Integer ns_id, Integer line_start,
/// window callbacks)
/// ["buf", bufnr, tick]
/// - on_win: called when starting to redraw a
-/// specific window.
+/// specific window. botline_guess is an approximation
+/// that does not exceed the last line number.
/// ["win", winid, bufnr, topline, botline_guess]
/// - on_line: called for each buffer line being redrawn.
/// (The interaction with fold lines is subject to change)
@@ -1229,3 +1213,14 @@ free_exit:
clear_virttext(&virt_text);
return virt_text;
}
+
+String nvim__buf_debug_extmarks(Buffer buffer, Boolean keys, Boolean dot, Error *err)
+ FUNC_API_SINCE(7)
+{
+ buf_T *buf = find_buffer_by_handle(buffer, err);
+ if (!buf) {
+ return NULL_STRING;
+ }
+
+ return mt_inspect(buf->b_marktree, keys, dot);
+}
diff --git a/src/nvim/api/keysets.h b/src/nvim/api/keysets.h
index 1f5c7069a9..4e5e7af619 100644
--- a/src/nvim/api/keysets.h
+++ b/src/nvim/api/keysets.h
@@ -51,6 +51,15 @@ typedef struct {
} Dict(set_extmark);
typedef struct {
+ OptionalKeys is_set__get_extmarks_;
+ Integer limit;
+ Boolean details;
+ Boolean hl_name;
+ Boolean overlap;
+ String type;
+} Dict(get_extmarks);
+
+typedef struct {
OptionalKeys is_set__keymap_;
Boolean noremap;
Boolean nowait;
@@ -181,6 +190,7 @@ typedef struct {
Integer id;
String name;
Boolean link;
+ Boolean create;
} Dict(get_highlight);
typedef struct {
diff --git a/src/nvim/api/vim.c b/src/nvim/api/vim.c
index 411d63b921..916409b973 100644
--- a/src/nvim/api/vim.c
+++ b/src/nvim/api/vim.c
@@ -96,6 +96,7 @@ Integer nvim_get_hl_id_by_name(String name)
/// - name: (string) Get a highlight definition by name.
/// - id: (integer) Get a highlight definition by id.
/// - link: (boolean, default true) Show linked group name instead of effective definition |:hi-link|.
+/// - create: (boolean, default true) When highlight group doesn't exist create it.
///
/// @param[out] err Error details, if any.
/// @return Highlight groups as a map from group name to a highlight definition map as in |nvim_set_hl()|,
@@ -211,10 +212,11 @@ void nvim_set_hl_ns_fast(Integer ns_id, Error *err)
/// nvim_feedkeys().
///
/// Example:
-/// <pre>vim
-/// :let key = nvim_replace_termcodes("<C-o>", v:true, v:false, v:true)
-/// :call nvim_feedkeys(key, 'n', v:false)
-/// </pre>
+///
+/// ```vim
+/// :let key = nvim_replace_termcodes("<C-o>", v:true, v:false, v:true)
+/// :call nvim_feedkeys(key, 'n', v:false)
+/// ```
///
/// @param keys to be typed
/// @param mode behavior flags, see |feedkeys()|
@@ -1279,10 +1281,11 @@ void nvim_unsubscribe(uint64_t channel_id, String event)
/// "#rrggbb" hexadecimal string.
///
/// Example:
-/// <pre>vim
-/// :echo nvim_get_color_by_name("Pink")
-/// :echo nvim_get_color_by_name("#cbcbcb")
-/// </pre>
+///
+/// ```vim
+/// :echo nvim_get_color_by_name("Pink")
+/// :echo nvim_get_color_by_name("#cbcbcb")
+/// ```
///
/// @param name Color name or "#rrggbb" string
/// @return 24-bit RGB value, or -1 for invalid argument.
@@ -1419,14 +1422,16 @@ ArrayOf(Dictionary) nvim_get_keymap(String mode)
/// Empty {rhs} is |<Nop>|. |keycodes| are replaced as usual.
///
/// Example:
-/// <pre>vim
-/// call nvim_set_keymap('n', ' <NL>', '', {'nowait': v:true})
-/// </pre>
+///
+/// ```vim
+/// call nvim_set_keymap('n', ' <NL>', '', {'nowait': v:true})
+/// ```
///
/// is equivalent to:
-/// <pre>vim
-/// nmap <nowait> <Space><NL> <Nop>
-/// </pre>
+///
+/// ```vim
+/// nmap <nowait> <Space><NL> <Nop>
+/// ```
///
/// @param channel_id
/// @param mode Mode short-name (map command prefix: "n", "i", "v", "x", …)
diff --git a/src/nvim/api/win_config.c b/src/nvim/api/win_config.c
index 3a3e6da2b1..63cf3bb701 100644
--- a/src/nvim/api/win_config.c
+++ b/src/nvim/api/win_config.c
@@ -56,16 +56,19 @@
/// this should not be used to specify arbitrary WM screen positions.
///
/// Example (Lua): window-relative float
-/// <pre>lua
-/// vim.api.nvim_open_win(0, false,
-/// {relative='win', row=3, col=3, width=12, height=3})
-/// </pre>
+///
+/// ```lua
+/// vim.api.nvim_open_win(0, false,
+/// {relative='win', row=3, col=3, width=12, height=3})
+/// ```
///
/// Example (Lua): buffer-relative float (travels as buffer is scrolled)
-/// <pre>lua
-/// vim.api.nvim_open_win(0, false,
-/// {relative='win', width=12, height=3, bufpos={100,10}})
-/// </pre>
+///
+/// ```lua
+/// vim.api.nvim_open_win(0, false,
+/// {relative='win', width=12, height=3, bufpos={100,10}})
+/// })
+/// ```
///
/// @param buffer Buffer to display, or 0 for current buffer
/// @param enter Enter the window (make it the current window)
diff --git a/src/nvim/autocmd.c b/src/nvim/autocmd.c
index 5f94fbb014..3fa20f4e48 100644
--- a/src/nvim/autocmd.c
+++ b/src/nvim/autocmd.c
@@ -2524,7 +2524,7 @@ static bool arg_autocmd_flag_get(bool *flag, char **cmd_ptr, char *pattern, int
}
/// When kFalse: VimSuspend should be triggered next.
-/// When kTrue: VimResume should be triggerd next.
+/// When kTrue: VimResume should be triggered next.
/// When kNone: Currently triggering VimSuspend or VimResume.
static TriState pending_vimresume = kFalse;
diff --git a/src/nvim/buffer.c b/src/nvim/buffer.c
index 8eacec4d5e..d2a5eab0a5 100644
--- a/src/nvim/buffer.c
+++ b/src/nvim/buffer.c
@@ -747,6 +747,7 @@ void buf_clear_file(buf_T *buf)
void buf_clear(void)
{
linenr_T line_count = curbuf->b_ml.ml_line_count;
+ extmark_free_all(curbuf); // delete any extmarks
while (!(curbuf->b_ml.ml_flags & ML_EMPTY)) {
ml_delete((linenr_T)1, false);
}
diff --git a/src/nvim/bufwrite.c b/src/nvim/bufwrite.c
index 2796ed1f0b..445e946543 100644
--- a/src/nvim/bufwrite.c
+++ b/src/nvim/bufwrite.c
@@ -1402,7 +1402,7 @@ int buf_write(buf_T *buf, char *fname, char *sfname, linenr_T start, linenr_T en
while ((fd = os_open(wfname, fflags, mode)) < 0) {
// A forced write will try to create a new file if the old one
// is still readonly. This may also happen when the directory
- // is read-only. In that case the mch_remove() will fail.
+ // is read-only. In that case the os_remove() will fail.
if (err.msg == NULL) {
#ifdef UNIX
FileInfo file_info;
diff --git a/src/nvim/decoration.c b/src/nvim/decoration.c
index d9d1417d2a..f4ca31040a 100644
--- a/src/nvim/decoration.c
+++ b/src/nvim/decoration.c
@@ -158,7 +158,7 @@ Decoration *decor_find_virttext(buf_T *buf, int row, uint64_t ns_id)
MarkTreeIter itr[1] = { 0 };
marktree_itr_get(buf->b_marktree, row, 0, itr);
while (true) {
- mtkey_t mark = marktree_itr_current(itr);
+ MTKey mark = marktree_itr_current(itr);
if (mark.pos.row < 0 || mark.pos.row > row) {
break;
} else if (marktree_decor_level(mark) < kDecorLevelVisible) {
@@ -189,7 +189,7 @@ bool decor_redraw_reset(win_T *wp, DecorState *state)
return wp->w_buffer->b_marktree->n_keys;
}
-Decoration get_decor(mtkey_t mark)
+Decoration get_decor(MTKey mark)
{
if (mark.decor_full) {
return *mark.decor_full;
@@ -211,50 +211,20 @@ bool decor_redraw_start(win_T *wp, int top_row, DecorState *state)
{
buf_T *buf = wp->w_buffer;
state->top_row = top_row;
- marktree_itr_get(buf->b_marktree, top_row, 0, state->itr);
- if (!state->itr->node) {
+ if (!marktree_itr_get_overlap(buf->b_marktree, top_row, 0, state->itr)) {
return false;
}
- marktree_itr_rewind(buf->b_marktree, state->itr);
- while (true) {
- mtkey_t mark = marktree_itr_current(state->itr);
- if (mark.pos.row < 0) { // || mark.row > end_row
- break;
- }
- if ((mark.pos.row < top_row && mt_end(mark))
- || marktree_decor_level(mark) < kDecorLevelVisible) {
- goto next_mark;
- }
-
- Decoration decor = get_decor(mark);
+ MTPair pair;
- mtpos_t altpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
-
- // Exclude start marks if the end mark position is above the top row
- // Exclude end marks if we have already added the start mark
- if ((mt_start(mark) && altpos.row < top_row && !decor_virt_pos(&decor))
- || (mt_end(mark) && altpos.row >= top_row)) {
- goto next_mark;
+ while (marktree_itr_step_overlap(buf->b_marktree, state->itr, &pair)) {
+ if (marktree_decor_level(pair.start) < kDecorLevelVisible) {
+ continue;
}
- if (mt_end(mark)) {
- decor_add(state, altpos.row, altpos.col, mark.pos.row, mark.pos.col,
- &decor, false, mark.ns, mark.id);
- } else {
- if (altpos.row == -1) {
- altpos.row = mark.pos.row;
- altpos.col = mark.pos.col;
- }
- decor_add(state, mark.pos.row, mark.pos.col, altpos.row, altpos.col,
- &decor, false, mark.ns, mark.id);
- }
+ Decoration decor = get_decor(pair.start);
-next_mark:
- if (marktree_itr_node_done(state->itr)) {
- marktree_itr_next(buf->b_marktree, state->itr);
- break;
- }
- marktree_itr_next(buf->b_marktree, state->itr);
+ decor_add(state, pair.start.pos.row, pair.start.pos.col, pair.end_pos.row, pair.end_pos.col,
+ &decor, false, pair.start.ns, pair.start.id);
}
return true; // TODO(bfredl): check if available in the region
@@ -268,7 +238,13 @@ bool decor_redraw_line(win_T *wp, int row, DecorState *state)
state->row = row;
state->col_until = -1;
state->eol_col = -1;
- return true; // TODO(bfredl): be more precise
+
+ if (kv_size(state->active)) {
+ return true;
+ }
+
+ MTKey k = marktree_itr_current(state->itr);
+ return (k.pos.row >= 0 && k.pos.row <= row);
}
static void decor_add(DecorState *state, int start_row, int start_col, int end_row, int end_col,
@@ -292,6 +268,28 @@ static void decor_add(DecorState *state, int start_row, int start_col, int end_r
kv_A(state->active, index) = range;
}
+/// Initialize the draw_col of a newly-added non-inline virtual text item.
+static void decor_init_draw_col(int win_col, bool hidden, DecorRange *item)
+{
+ if (win_col < 0) {
+ item->draw_col = win_col;
+ } else if (item->decor.virt_text_pos == kVTOverlay) {
+ item->draw_col = (item->decor.virt_text_hide && hidden) ? INT_MIN : win_col;
+ } else {
+ item->draw_col = -1;
+ }
+}
+
+void decor_recheck_draw_col(int win_col, bool hidden, DecorState *state)
+{
+ for (size_t i = 0; i < kv_size(state->active); i++) {
+ DecorRange *item = &kv_A(state->active, i);
+ if (item->draw_col == -3) {
+ decor_init_draw_col(win_col, hidden, item);
+ }
+ }
+}
+
int decor_redraw_col(win_T *wp, int col, int win_col, bool hidden, DecorState *state)
{
buf_T *buf = wp->w_buffer;
@@ -302,7 +300,7 @@ int decor_redraw_col(win_T *wp, int col, int win_col, bool hidden, DecorState *s
while (true) {
// TODO(bfredl): check duplicate entry in "intersection"
// branch
- mtkey_t mark = marktree_itr_current(state->itr);
+ MTKey mark = marktree_itr_current(state->itr);
if (mark.pos.row < 0 || mark.pos.row > state->row) {
break;
} else if (mark.pos.row == state->row && mark.pos.col > col) {
@@ -317,8 +315,7 @@ int decor_redraw_col(win_T *wp, int col, int win_col, bool hidden, DecorState *s
Decoration decor = get_decor(mark);
- mtpos_t endpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
-
+ MTPos endpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
if (endpos.row == -1) {
endpos = mark.pos;
}
@@ -373,19 +370,10 @@ next_mark:
if (active && item.decor.spell != kNone) {
spell = item.decor.spell;
}
- if (item.start_row == state->row && decor_virt_pos(&item.decor)
- && item.draw_col != INT_MIN) {
- if (item.start_col <= col) {
- if (item.decor.virt_text_pos == kVTOverlay && item.draw_col == -1) {
- item.draw_col = (item.decor.virt_text_hide && hidden) ? INT_MIN : win_col;
- } else if (item.draw_col == -3) {
- item.draw_col = -1;
- }
- } else if (wp->w_p_wrap
- && (item.decor.virt_text_pos == kVTRightAlign
- || item.decor.virt_text_pos == kVTWinCol)) {
- item.draw_col = -3;
- }
+ if (item.start_row == state->row && item.start_col <= col
+ && decor_virt_pos(&item.decor) && item.draw_col == -1
+ && item.decor.virt_text_pos != kVTInline) {
+ decor_init_draw_col(win_col, hidden, &item);
}
if (keep) {
kv_A(state->active, j++) = item;
@@ -412,8 +400,28 @@ void decor_redraw_signs(buf_T *buf, int row, int *num_signs, SignTextAttrs sattr
MarkTreeIter itr[1] = { 0 };
marktree_itr_get(buf->b_marktree, row, 0, itr);
+ // TODO(bfredl): integrate with main decor loop.
+ if (!marktree_itr_get_overlap(buf->b_marktree, row, 0, itr)) {
+ return;
+ }
+
+ MTPair pair;
+ while (marktree_itr_step_overlap(buf->b_marktree, itr, &pair)) {
+ if (marktree_decor_level(pair.start) < kDecorLevelVisible) {
+ continue;
+ }
+
+ Decoration *decor = pair.start.decor_full;
+
+ if (!decor || !decor_has_sign(decor)) {
+ continue;
+ }
+
+ decor_to_sign(decor, num_signs, sattrs, num_id, line_id, cul_id);
+ }
+
while (true) {
- mtkey_t mark = marktree_itr_current(itr);
+ MTKey mark = marktree_itr_current(itr);
if (mark.pos.row < 0 || mark.pos.row > row) {
break;
}
@@ -428,43 +436,49 @@ void decor_redraw_signs(buf_T *buf, int row, int *num_signs, SignTextAttrs sattr
goto next_mark;
}
- if (decor->sign_text) {
- int j;
- for (j = (*num_signs); j > 0; j--) {
- if (sattrs[j - 1].priority >= decor->priority) {
- break;
- }
- if (j < SIGN_SHOW_MAX) {
- sattrs[j] = sattrs[j - 1];
- }
+ decor_to_sign(decor, num_signs, sattrs, num_id, line_id, cul_id);
+
+next_mark:
+ marktree_itr_next(buf->b_marktree, itr);
+ }
+}
+
+static void decor_to_sign(Decoration *decor, int *num_signs, SignTextAttrs sattrs[],
+ HlPriId *num_id, HlPriId *line_id, HlPriId *cul_id)
+{
+ if (decor->sign_text) {
+ int j;
+ for (j = (*num_signs); j > 0; j--) {
+ if (sattrs[j - 1].priority >= decor->priority) {
+ break;
}
if (j < SIGN_SHOW_MAX) {
- sattrs[j] = (SignTextAttrs) {
- .text = decor->sign_text,
- .hl_id = decor->sign_hl_id,
- .priority = decor->priority
- };
- (*num_signs)++;
+ sattrs[j] = sattrs[j - 1];
}
}
-
- struct { HlPriId *dest; int hl; } cattrs[] = {
- { line_id, decor->line_hl_id },
- { num_id, decor->number_hl_id },
- { cul_id, decor->cursorline_hl_id },
- { NULL, -1 },
- };
- for (int i = 0; cattrs[i].dest; i++) {
- if (cattrs[i].hl != 0 && decor->priority >= cattrs[i].dest->priority) {
- *cattrs[i].dest = (HlPriId) {
- .hl_id = cattrs[i].hl,
- .priority = decor->priority
- };
- }
+ if (j < SIGN_SHOW_MAX) {
+ sattrs[j] = (SignTextAttrs) {
+ .text = decor->sign_text,
+ .hl_id = decor->sign_hl_id,
+ .priority = decor->priority
+ };
+ (*num_signs)++;
}
+ }
-next_mark:
- marktree_itr_next(buf->b_marktree, itr);
+ struct { HlPriId *dest; int hl; } cattrs[] = {
+ { line_id, decor->line_hl_id },
+ { num_id, decor->number_hl_id },
+ { cul_id, decor->cursorline_hl_id },
+ { NULL, -1 },
+ };
+ for (int i = 0; cattrs[i].dest; i++) {
+ if (cattrs[i].hl != 0 && decor->priority >= cattrs[i].dest->priority) {
+ *cattrs[i].dest = (HlPriId) {
+ .hl_id = cattrs[i].hl,
+ .priority = decor->priority
+ };
+ }
}
}
@@ -488,7 +502,7 @@ int decor_signcols(buf_T *buf, DecorState *state, int row, int end_row, int max)
MarkTreeIter itr[1] = { 0 };
marktree_itr_get(buf->b_marktree, 0, -1, itr);
while (true) {
- mtkey_t mark = marktree_itr_current(itr);
+ MTKey mark = marktree_itr_current(itr);
if (mark.pos.row < 0 || mark.pos.row > end_row) {
break;
}
@@ -525,7 +539,7 @@ int decor_signcols(buf_T *buf, DecorState *state, int row, int end_row, int max)
goto next_mark;
}
- mtpos_t altpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
+ MTPos altpos = marktree_get_altpos(buf->b_marktree, mark, NULL);
if (mt_end(mark)) {
if (mark.pos.row >= row && altpos.row <= end_row) {
@@ -610,7 +624,7 @@ int decor_virt_lines(win_T *wp, linenr_T lnum, VirtLines *lines, TriState has_fo
MarkTreeIter itr[1] = { 0 };
marktree_itr_get(buf->b_marktree, start_row, 0, itr);
while (true) {
- mtkey_t mark = marktree_itr_current(itr);
+ MTKey mark = marktree_itr_current(itr);
if (mark.pos.row < 0 || mark.pos.row >= end_row) {
break;
} else if (mt_end(mark)
diff --git a/src/nvim/decoration.h b/src/nvim/decoration.h
index 3d16aa803e..0f191aa870 100644
--- a/src/nvim/decoration.h
+++ b/src/nvim/decoration.h
@@ -84,7 +84,7 @@ typedef struct {
bool virt_text_owned;
/// Screen column to draw the virtual text.
/// When -1, the virtual text may be drawn after deciding where.
- /// When -3, the virtual text should be drawn on a later screen line.
+ /// When -3, the virtual text should be drawn on the next screen line.
/// When INT_MIN, the virtual text should no longer be drawn.
int draw_col;
uint64_t ns_id;
diff --git a/src/nvim/decoration_provider.c b/src/nvim/decoration_provider.c
index 8e5809c4e0..63c9772fb8 100644
--- a/src/nvim/decoration_provider.c
+++ b/src/nvim/decoration_provider.c
@@ -126,9 +126,10 @@ void decor_providers_invoke_win(win_T *wp, DecorProviders *providers,
{
kvi_init(*line_providers);
- linenr_T knownmax = ((wp->w_valid & VALID_BOTLINE)
- ? wp->w_botline
- : (wp->w_topline + wp->w_height_inner));
+ linenr_T knownmax = MIN(wp->w_buffer->b_ml.ml_line_count,
+ ((wp->w_valid & VALID_BOTLINE)
+ ? wp->w_botline
+ : MAX(wp->w_topline + wp->w_height_inner, wp->w_botline)));
for (size_t k = 0; k < kv_size(*providers); k++) {
DecorProvider *p = kv_A(*providers, k);
@@ -138,7 +139,7 @@ void decor_providers_invoke_win(win_T *wp, DecorProviders *providers,
ADD_C(args, BUFFER_OBJ(wp->w_buffer->handle));
// TODO(bfredl): we are not using this, but should be first drawn line?
ADD_C(args, INTEGER_OBJ(wp->w_topline - 1));
- ADD_C(args, INTEGER_OBJ(knownmax));
+ ADD_C(args, INTEGER_OBJ(knownmax - 1));
if (decor_provider_invoke(p, "win", p->redraw_win, args, true)) {
kvi_push(*line_providers, p);
}
diff --git a/src/nvim/drawline.c b/src/nvim/drawline.c
index 4b989fa59a..e1550e0ece 100644
--- a/src/nvim/drawline.c
+++ b/src/nvim/drawline.c
@@ -136,8 +136,6 @@ typedef struct {
///< or w_skipcol or concealing
int skipped_cells; ///< nr of skipped cells for virtual text
///< to be added to wlv.vcol later
- bool more_virt_inline_chunks; ///< indicates if there is more inline virtual text
- ///< after n_extra
} winlinevars_T;
/// for line_putchar. Contains the state that needs to be remembered from
@@ -868,10 +866,12 @@ static void apply_cursorline_highlight(win_T *wp, winlinevars_T *wlv)
}
}
-/// Checks if there is more inline virtual text that need to be drawn
-/// and sets has_more_virt_inline_chunks to reflect that.
+/// Checks if there is more inline virtual text that need to be drawn.
static bool has_more_inline_virt(winlinevars_T *wlv, ptrdiff_t v)
{
+ if (wlv->virt_inline_i < kv_size(wlv->virt_inline)) {
+ return true;
+ }
DecorState *state = &decor_state;
for (size_t i = 0; i < kv_size(state->active); i++) {
DecorRange *item = &kv_A(state->active, i);
@@ -911,7 +911,6 @@ static void handle_inline_virtual_text(win_T *wp, winlinevars_T *wlv, ptrdiff_t
break;
}
}
- wlv->more_virt_inline_chunks = has_more_inline_virt(wlv, v);
if (!kv_size(wlv->virt_inline)) {
// no more inline virtual text here
break;
@@ -929,11 +928,6 @@ static void handle_inline_virtual_text(win_T *wp, winlinevars_T *wlv, ptrdiff_t
wlv->c_final = NUL;
wlv->extra_attr = vtc.hl_id ? syn_id2attr(vtc.hl_id) : 0;
wlv->n_attr = mb_charlen(vtc.text);
-
- // Checks if there is more inline virtual text chunks that need to be drawn.
- wlv->more_virt_inline_chunks = has_more_inline_virt(wlv, v)
- || wlv->virt_inline_i < kv_size(wlv->virt_inline);
-
// If the text didn't reach until the first window
// column we need to skip cells.
if (wlv->skip_cells > 0) {
@@ -1147,6 +1141,8 @@ int win_line(win_T *wp, linenr_T lnum, int startrow, int endrow, bool number_onl
bool saved_search_attr_from_match = false;
int win_col_offset = 0; // offset for window columns
+ bool area_active = false; // whether in Visual selection, for virtual text
+ bool decor_need_recheck = false; // call decor_recheck_draw_col() at next char
char buf_fold[FOLD_TEXT_LEN]; // Hold value returned by get_foldtext
@@ -1788,9 +1784,27 @@ int win_line(win_T *wp, linenr_T lnum, int startrow, int endrow, bool number_onl
}
if (has_decor && wlv.n_extra == 0) {
- bool selected = (area_highlighting
- && ((wlv.vcol >= wlv.fromcol && wlv.vcol < wlv.tocol)
- || (noinvcur && wlv.vcol == wp->w_virtcol)));
+ // Duplicate the Visual area check after this block,
+ // but don't check inside p_extra here.
+ if (wlv.vcol == wlv.fromcol
+ || (wlv.vcol + 1 == wlv.fromcol
+ && (wlv.n_extra == 0 && utf_ptr2cells(ptr) > 1))
+ || (vcol_prev == fromcol_prev
+ && vcol_prev < wlv.vcol
+ && wlv.vcol < wlv.tocol)) {
+ area_active = true;
+ } else if (area_active
+ && (wlv.vcol == wlv.tocol
+ || (noinvcur && wlv.vcol == wp->w_virtcol))) {
+ area_active = false;
+ }
+
+ bool selected = (area_active || (area_highlighting && noinvcur
+ && wlv.vcol == wp->w_virtcol));
+ if (decor_need_recheck) {
+ decor_recheck_draw_col(wlv.off, selected, &decor_state);
+ decor_need_recheck = false;
+ }
extmark_attr = decor_redraw_col(wp, (colnr_T)v, wlv.off, selected, &decor_state);
if (!has_fold && wp->w_buffer->b_virt_text_inline > 0) {
@@ -1824,10 +1838,12 @@ int win_line(win_T *wp, linenr_T lnum, int startrow, int endrow, bool number_onl
&& vcol_prev < wlv.vcol // not at margin
&& wlv.vcol < wlv.tocol)) {
*area_attr_p = vi_attr; // start highlighting
+ area_active = true;
} else if (*area_attr_p != 0
&& (wlv.vcol == wlv.tocol
|| (noinvcur && wlv.vcol == wp->w_virtcol))) {
*area_attr_p = 0; // stop highlighting
+ area_active = false;
}
if (!has_fold && wlv.n_extra == 0) {
@@ -2889,15 +2905,20 @@ int win_line(win_T *wp, linenr_T lnum, int startrow, int endrow, bool number_onl
&& !wp->w_p_wrap
&& wlv.filler_todo <= 0
&& (wp->w_p_rl ? wlv.col == 0 : wlv.col == grid->cols - 1)
- && !has_fold
- && (*ptr != NUL
- || lcs_eol_one > 0
- || (wlv.n_extra > 0 && (wlv.c_extra != NUL || *wlv.p_extra != NUL))
- || wlv.more_virt_inline_chunks)) {
- c = wp->w_p_lcs_chars.ext;
- wlv.char_attr = win_hl_attr(wp, HLF_AT);
- mb_c = c;
- mb_utf8 = check_mb_utf8(&c, u8cc);
+ && !has_fold) {
+ if (has_decor && *ptr == NUL && lcs_eol_one == 0) {
+ // Tricky: there might be a virtual text just _after_ the last char
+ decor_redraw_col(wp, (colnr_T)v, wlv.off, false, &decor_state);
+ }
+ if (*ptr != NUL
+ || lcs_eol_one > 0
+ || (wlv.n_extra > 0 && (wlv.c_extra != NUL || *wlv.p_extra != NUL))
+ || has_more_inline_virt(&wlv, v)) {
+ c = wp->w_p_lcs_chars.ext;
+ wlv.char_attr = win_hl_attr(wp, HLF_AT);
+ mb_c = c;
+ mb_utf8 = check_mb_utf8(&c, u8cc);
+ }
}
// advance to the next 'colorcolumn'
@@ -3079,6 +3100,21 @@ int win_line(win_T *wp, linenr_T lnum, int startrow, int endrow, bool number_onl
wlv.char_attr = saved_attr2;
}
+ if (has_decor && wlv.filler_todo <= 0
+ && (wp->w_p_rl ? (wlv.col < 0) : (wlv.col >= grid->cols))) {
+ // At the end of screen line: might need to peek for decorations just after
+ // this position.
+ if (!has_fold && wp->w_p_wrap && wlv.n_extra == 0) {
+ decor_redraw_col(wp, (int)(ptr - line), -3, false, &decor_state);
+ // Check position/hiding of virtual text again on next screen line.
+ decor_need_recheck = true;
+ } else if (has_fold || !wp->w_p_wrap) {
+ // Without wrapping, we might need to display right_align and win_col
+ // virt_text for the entire text line.
+ decor_redraw_col(wp, MAXCOL, -1, true, &decor_state);
+ }
+ }
+
// At end of screen line and there is more to come: Display the line
// so far. If there is no more to display it is caught above.
if ((wp->w_p_rl ? (wlv.col < 0) : (wlv.col >= grid->cols))
@@ -3089,7 +3125,7 @@ int win_line(win_T *wp, linenr_T lnum, int startrow, int endrow, bool number_onl
|| (wp->w_p_list && wp->w_p_lcs_chars.eol != NUL
&& wlv.p_extra != at_end_str)
|| (wlv.n_extra != 0 && (wlv.c_extra != NUL || *wlv.p_extra != NUL))
- || wlv.more_virt_inline_chunks)) {
+ || has_more_inline_virt(&wlv, v))) {
bool wrap = wp->w_p_wrap // Wrapping enabled.
&& wlv.filler_todo <= 0 // Not drawing diff filler lines.
&& lcs_eol_one != -1 // Haven't printed the lcs_eol character.
@@ -3102,7 +3138,7 @@ int win_line(win_T *wp, linenr_T lnum, int startrow, int endrow, bool number_onl
if (virt_line_offset >= 0) {
draw_virt_text_item(buf, virt_line_offset, kv_A(virt_lines, virt_line_index).line,
kHlModeReplace, grid->cols, 0);
- } else {
+ } else if (wlv.filler_todo <= 0) {
draw_virt_text(wp, buf, win_col_offset, &draw_col, grid->cols, wlv.row);
}
diff --git a/src/nvim/edit.c b/src/nvim/edit.c
index 06eb81be92..216f8a67db 100644
--- a/src/nvim/edit.c
+++ b/src/nvim/edit.c
@@ -4429,9 +4429,8 @@ static bool ins_tab(void)
}
}
if (!(State & VREPLACE_FLAG)) {
- extmark_splice_cols(curbuf, (int)fpos.lnum - 1, change_col,
- cursor->col - change_col, fpos.col - change_col,
- kExtmarkUndo);
+ inserted_bytes(fpos.lnum, change_col,
+ cursor->col - change_col, fpos.col - change_col);
}
}
cursor->col -= i;
diff --git a/src/nvim/eval.lua b/src/nvim/eval.lua
index a9200d1f5f..e6efbe85d8 100644
--- a/src/nvim/eval.lua
+++ b/src/nvim/eval.lua
@@ -7472,7 +7472,7 @@ M.funcs = {
*printf-$*
In certain languages, error and informative messages are
more readable when the order of words is different from the
- corresponding message in English. To accomodate translations
+ corresponding message in English. To accommodate translations
having a different word order, positional arguments may be
used to indicate this. For instance: >vim
diff --git a/src/nvim/eval/funcs.c b/src/nvim/eval/funcs.c
index 1ed0994222..93075cf0e2 100644
--- a/src/nvim/eval/funcs.c
+++ b/src/nvim/eval/funcs.c
@@ -6329,7 +6329,7 @@ static void reduce_string(typval_T *argvars, typval_T *expr, typval_T *rettv)
}
}
-/// Implementaion of reduce() for Blob "argvars[0]" using the function "expr"
+/// Implementation of reduce() for Blob "argvars[0]" using the function "expr"
/// starting with the optional initial value "argvars[2]" and return the result
/// in "rettv".
static void reduce_blob(typval_T *argvars, typval_T *expr, typval_T *rettv)
diff --git a/src/nvim/eval/vars.c b/src/nvim/eval/vars.c
index 9b6427fef7..e5b1b88eef 100644
--- a/src/nvim/eval/vars.c
+++ b/src/nvim/eval/vars.c
@@ -1810,7 +1810,7 @@ static void getwinvar(typval_T *argvars, typval_T *rettv, int off)
/// @param[in] tv typval to convert.
/// @param[in] option Option name.
/// @param[in] flags Option flags.
-/// @param[out] error Whether an error occured.
+/// @param[out] error Whether an error occurred.
///
/// @return Typval converted to OptVal. Must be freed by caller.
/// Returns NIL_OPTVAL for invalid option name.
diff --git a/src/nvim/ex_cmds.c b/src/nvim/ex_cmds.c
index a0618ce7d7..4f6b8f2c8f 100644
--- a/src/nvim/ex_cmds.c
+++ b/src/nvim/ex_cmds.c
@@ -3888,6 +3888,10 @@ static int do_sub(exarg_T *eap, const proftime_T timeout, const long cmdpreview_
nmatch = curbuf->b_ml.ml_line_count - sub_firstlnum + 1;
current_match.end.lnum = sub_firstlnum + (linenr_T)nmatch;
skip_match = true;
+ // safety check
+ if (nmatch < 0) {
+ goto skip;
+ }
}
// Save the line numbers for the preview buffer
diff --git a/src/nvim/ex_getln.c b/src/nvim/ex_getln.c
index 08b010c153..09781f392a 100644
--- a/src/nvim/ex_getln.c
+++ b/src/nvim/ex_getln.c
@@ -2410,6 +2410,9 @@ static void cmdpreview_restore_state(CpInfo *cpinfo)
buf->b_changed = cp_bufinfo.save_b_changed;
+ // Clear preview highlights.
+ extmark_clear(buf, (uint32_t)cmdpreview_ns, 0, 0, MAXLNUM, MAXCOL);
+
if (buf->b_u_seq_cur != cp_bufinfo.undo_info.save_b_u_seq_cur) {
int count = 0;
@@ -2439,9 +2442,6 @@ static void cmdpreview_restore_state(CpInfo *cpinfo)
}
buf->b_p_ul = cp_bufinfo.save_b_p_ul; // Restore 'undolevels'
-
- // Clear preview highlights.
- extmark_clear(buf, (uint32_t)cmdpreview_ns, 0, 0, MAXLNUM, MAXCOL);
}
for (size_t i = 0; i < cpinfo->win_info.size; i++) {
diff --git a/src/nvim/extmark.c b/src/nvim/extmark.c
index 77e41cb5cf..5140fe199e 100644
--- a/src/nvim/extmark.c
+++ b/src/nvim/extmark.c
@@ -82,7 +82,7 @@ void extmark_set(buf_T *buf, uint32_t ns_id, uint32_t *idp, int row, colnr_T col
id = ++*ns;
} else {
MarkTreeIter itr[1] = { 0 };
- mtkey_t old_mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
+ MTKey old_mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
if (old_mark.id) {
if (decor_state.running_on_lines) {
if (err) {
@@ -124,8 +124,8 @@ void extmark_set(buf_T *buf, uint32_t ns_id, uint32_t *idp, int row, colnr_T col
}
}
- mtkey_t mark = { { row, col }, ns_id, id, 0,
- mt_flags(right_gravity, decor_level), 0, NULL };
+ MTKey mark = { { row, col }, ns_id, id, 0,
+ mt_flags(right_gravity, decor_level), 0, NULL };
if (decor_full) {
mark.decor_full = decor;
} else if (decor) {
@@ -180,7 +180,7 @@ error:
static bool extmark_setraw(buf_T *buf, uint64_t mark, int row, colnr_T col)
{
MarkTreeIter itr[1] = { 0 };
- mtkey_t key = marktree_lookup(buf->b_marktree, mark, itr);
+ MTKey key = marktree_lookup(buf->b_marktree, mark, itr);
if (key.pos.row == -1) {
return false;
}
@@ -199,14 +199,14 @@ static bool extmark_setraw(buf_T *buf, uint64_t mark, int row, colnr_T col)
bool extmark_del(buf_T *buf, uint32_t ns_id, uint32_t id)
{
MarkTreeIter itr[1] = { 0 };
- mtkey_t key = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
+ MTKey key = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
if (!key.id) {
return false;
}
assert(key.pos.row >= 0);
uint64_t other = marktree_del_itr(buf->b_marktree, itr, false);
- mtkey_t key2 = key;
+ MTKey key2 = key;
if (other) {
key2 = marktree_lookup(buf->b_marktree, other, itr);
@@ -250,7 +250,7 @@ bool extmark_clear(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_r
MarkTreeIter itr[1] = { 0 };
marktree_itr_get(buf->b_marktree, l_row, l_col, itr);
while (true) {
- mtkey_t mark = marktree_itr_current(itr);
+ MTKey mark = marktree_itr_current(itr);
if (mark.pos.row < 0
|| mark.pos.row > u_row
|| (mark.pos.row == u_row && mark.pos.col > u_col)) {
@@ -292,7 +292,7 @@ bool extmark_clear(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_r
uint64_t id;
ssize_t decor_id;
map_foreach(&delete_set, id, decor_id, {
- mtkey_t mark = marktree_lookup(buf->b_marktree, id, itr);
+ MTKey mark = marktree_lookup(buf->b_marktree, id, itr);
assert(marktree_itr_valid(itr));
marktree_del_itr(buf->b_marktree, itr, false);
if (decor_id >= 0) {
@@ -313,17 +313,31 @@ bool extmark_clear(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_r
/// dir can be set to control the order of the array
/// amount = amount of marks to find or -1 for all
ExtmarkInfoArray extmark_get(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_row,
- colnr_T u_col, int64_t amount, bool reverse, bool all_ns,
- ExtmarkType type_filter)
+ colnr_T u_col, int64_t amount, bool reverse, ExtmarkType type_filter,
+ bool overlap)
{
ExtmarkInfoArray array = KV_INITIAL_VALUE;
MarkTreeIter itr[1];
- // Find all the marks
- marktree_itr_get_ext(buf->b_marktree, mtpos_t(l_row, l_col),
- itr, reverse, false, NULL);
+
+ if (overlap) {
+ // Find all the marks overlapping the start position
+ if (!marktree_itr_get_overlap(buf->b_marktree, l_row, l_col, itr)) {
+ return array;
+ }
+
+ MTPair pair;
+ while (marktree_itr_step_overlap(buf->b_marktree, itr, &pair)) {
+ push_mark(&array, ns_id, type_filter, pair.start, pair.end_pos, pair.end_right_gravity);
+ }
+ } else {
+ // Find all the marks beginning with the start position
+ marktree_itr_get_ext(buf->b_marktree, MTPos(l_row, l_col),
+ itr, reverse, false, NULL);
+ }
+
int order = reverse ? -1 : 1;
while ((int64_t)kv_size(array) < amount) {
- mtkey_t mark = marktree_itr_current(itr);
+ MTKey mark = marktree_itr_current(itr);
if (mark.pos.row < 0
|| (mark.pos.row - u_row) * order > 0
|| (mark.pos.row == u_row && (mark.pos.col - u_col) * order > 0)) {
@@ -333,35 +347,8 @@ ExtmarkInfoArray extmark_get(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_co
goto next_mark;
}
- uint16_t type_flags = kExtmarkNone;
- if (type_filter != kExtmarkNone) {
- Decoration *decor = mark.decor_full;
- if (decor && (decor->sign_text || decor->number_hl_id)) {
- type_flags |= kExtmarkSign;
- }
- if (decor && decor->virt_text.size) {
- type_flags |= kExtmarkVirtText;
- }
- if (decor && decor->virt_lines.size) {
- type_flags |= kExtmarkVirtLines;
- }
- if ((decor && (decor->line_hl_id || decor->cursorline_hl_id))
- || mark.hl_id) {
- type_flags |= kExtmarkHighlight;
- }
- }
-
- if ((all_ns || mark.ns == ns_id) && type_flags & type_filter) {
- mtkey_t end = marktree_get_alt(buf->b_marktree, mark, NULL);
- kv_push(array, ((ExtmarkInfo) { .ns_id = mark.ns,
- .mark_id = mark.id,
- .row = mark.pos.row, .col = mark.pos.col,
- .end_row = end.pos.row,
- .end_col = end.pos.col,
- .right_gravity = mt_right(mark),
- .end_right_gravity = mt_right(end),
- .decor = get_decor(mark) }));
- }
+ MTKey end = marktree_get_alt(buf->b_marktree, mark, NULL);
+ push_mark(&array, ns_id, type_filter, mark, end.pos, mt_right(end));
next_mark:
if (reverse) {
marktree_itr_prev(buf->b_marktree, itr);
@@ -372,16 +359,54 @@ next_mark:
return array;
}
+static void push_mark(ExtmarkInfoArray *array, uint32_t ns_id, ExtmarkType type_filter, MTKey mark,
+ MTPos end_pos, bool end_right)
+{
+ if (!(ns_id == UINT32_MAX || mark.ns == ns_id)) {
+ return;
+ }
+ uint16_t type_flags = kExtmarkNone;
+ if (type_filter != kExtmarkNone) {
+ Decoration *decor = mark.decor_full;
+ if (decor && (decor->sign_text || decor->number_hl_id)) {
+ type_flags |= kExtmarkSign;
+ }
+ if (decor && decor->virt_text.size) {
+ type_flags |= kExtmarkVirtText;
+ }
+ if (decor && decor->virt_lines.size) {
+ type_flags |= kExtmarkVirtLines;
+ }
+ if ((decor && (decor->line_hl_id || decor->cursorline_hl_id))
+ || mark.hl_id) {
+ type_flags |= kExtmarkHighlight;
+ }
+
+ if (!(type_flags & type_filter)) {
+ return;
+ }
+ }
+
+ kv_push(*array, ((ExtmarkInfo) { .ns_id = mark.ns,
+ .mark_id = mark.id,
+ .row = mark.pos.row, .col = mark.pos.col,
+ .end_row = end_pos.row,
+ .end_col = end_pos.col,
+ .right_gravity = mt_right(mark),
+ .end_right_gravity = end_right,
+ .decor = get_decor(mark) }));
+}
+
/// Lookup an extmark by id
ExtmarkInfo extmark_from_id(buf_T *buf, uint32_t ns_id, uint32_t id)
{
ExtmarkInfo ret = { 0, 0, -1, -1, -1, -1, false, false, DECORATION_INIT };
- mtkey_t mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, NULL);
+ MTKey mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, NULL);
if (!mark.id) {
return ret;
}
assert(mark.pos.row >= 0);
- mtkey_t end = marktree_get_alt(buf->b_marktree, mark, NULL);
+ MTKey end = marktree_get_alt(buf->b_marktree, mark, NULL);
ret.ns_id = ns_id;
ret.mark_id = id;
@@ -406,7 +431,7 @@ void extmark_free_all(buf_T *buf)
MarkTreeIter itr[1] = { 0 };
marktree_itr_get(buf->b_marktree, 0, 0, itr);
while (true) {
- mtkey_t mark = marktree_itr_current(itr);
+ MTKey mark = marktree_itr_current(itr);
if (mark.pos.row < 0) {
break;
}
@@ -462,7 +487,7 @@ void u_extmark_copy(buf_T *buf, int l_row, colnr_T l_col, int u_row, colnr_T u_c
MarkTreeIter itr[1] = { 0 };
marktree_itr_get(buf->b_marktree, (int32_t)l_row, l_col, itr);
while (true) {
- mtkey_t mark = marktree_itr_current(itr);
+ MTKey mark = marktree_itr_current(itr);
if (mark.pos.row < 0
|| mark.pos.row > u_row
|| (mark.pos.row == u_row && mark.pos.col > u_col)) {
diff --git a/src/nvim/grid_defs.h b/src/nvim/grid_defs.h
index aeaadea73c..4ad7d4cdb4 100644
--- a/src/nvim/grid_defs.h
+++ b/src/nvim/grid_defs.h
@@ -39,7 +39,7 @@ enum {
///
/// attrs[] contains the highlighting attribute for each cell.
///
-/// vcols[] countain the virtual columns in the line. -1 means not available
+/// vcols[] contains the virtual columns in the line. -1 means not available
/// (below last line), MAXCOL means after the end of the line.
///
/// line_offset[n] is the offset from chars[], attrs[] and vcols[] for the start
diff --git a/src/nvim/highlight.c b/src/nvim/highlight.c
index 29e5db7a96..df4bffdac3 100644
--- a/src/nvim/highlight.c
+++ b/src/nvim/highlight.c
@@ -40,13 +40,13 @@
static bool hlstate_active = false;
-static kvec_t(HlEntry) attr_entries = KV_INITIAL_VALUE;
-
-static Map(HlEntry, int) attr_entry_ids = MAP_INIT;
+static Set(HlEntry) attr_entries = SET_INIT;
static Map(int, int) combine_attr_entries = MAP_INIT;
static Map(int, int) blend_attr_entries = MAP_INIT;
static Map(int, int) blendthrough_attr_entries = MAP_INIT;
+#define attr_entry(i) attr_entries.keys[i]
+
/// highlight entries private to a namespace
static Map(ColorKey, ColorItem) ns_hls;
typedef int NSHlAttr[HLF_COUNT + 1];
@@ -55,8 +55,8 @@ static PMap(int) ns_hl_attr;
void highlight_init(void)
{
// index 0 is no attribute, add dummy entry:
- kv_push(attr_entries, ((HlEntry){ .attr = HLATTRS_INIT, .kind = kHlUnknown,
- .id1 = 0, .id2 = 0 }));
+ set_put(HlEntry, &attr_entries, ((HlEntry){ .attr = HLATTRS_INIT, .kind = kHlInvalid,
+ .id1 = 0, .id2 = 0 }));
}
/// @return true if hl table was reset
@@ -77,6 +77,7 @@ bool highlight_use_hlstate(void)
/// @return 0 for error.
static int get_attr_entry(HlEntry entry)
{
+ bool retried = false;
if (!hlstate_active) {
// This information will not be used, erase it and reduce the table size.
entry.kind = kHlUnknown;
@@ -84,17 +85,19 @@ static int get_attr_entry(HlEntry entry)
entry.id2 = 0;
}
- int id = map_get(HlEntry, int)(&attr_entry_ids, entry);
- if (id > 0) {
- return id;
+retry: {}
+ MhPutStatus status;
+ uint32_t k = set_put_idx(HlEntry, &attr_entries, entry, &status);
+ if (status == kMHExisting) {
+ return (int)k;
}
static bool recursive = false;
- if (kv_size(attr_entries) > MAX_TYPENR) {
+ if (set_size(&attr_entries) > MAX_TYPENR) {
// Running out of attribute entries! remove all attributes, and
// compute new ones for all groups.
// When called recursively, we are really out of numbers.
- if (recursive) {
+ if (recursive || retried) {
emsg(_("E424: Too many different highlighting attributes in use"));
return 0;
}
@@ -107,17 +110,12 @@ static int get_attr_entry(HlEntry entry)
// This entry is now invalid, don't put it
return 0;
}
+ retried = true;
+ goto retry;
}
- size_t next_id = kv_size(attr_entries);
- if (next_id > INT_MAX) {
- ELOG("The index on attr_entries has overflowed");
- return 0;
- }
- id = (int)next_id;
- kv_push(attr_entries, entry);
-
- map_put(HlEntry, int)(&attr_entry_ids, entry, id);
+ // new attr id, send event to remote ui:s
+ int id = (int)k;
Array inspect = hl_inspect(id);
@@ -131,10 +129,10 @@ static int get_attr_entry(HlEntry entry)
/// When a UI connects, we need to send it the table of highlights used so far.
void ui_send_all_hls(UI *ui)
{
- for (size_t i = 1; i < kv_size(attr_entries); i++) {
+ for (size_t i = 1; i < set_size(&attr_entries); i++) {
Array inspect = hl_inspect((int)i);
- remote_ui_hl_attr_define(ui, (Integer)i, kv_A(attr_entries, i).attr,
- kv_A(attr_entries, i).attr, inspect);
+ HlAttrs attr = attr_entry(i).attr;
+ remote_ui_hl_attr_define(ui, (Integer)i, attr, attr, inspect);
api_free_array(inspect);
}
for (size_t hlf = 0; hlf < HLF_COUNT; hlf++) {
@@ -364,7 +362,7 @@ void update_window_hl(win_T *wp, bool invalid)
// if blend= attribute is not set, 'winblend' value overrides it.
if (wp->w_floating && wp->w_p_winbl > 0) {
- HlEntry entry = kv_A(attr_entries, wp->w_hl_attr_normal);
+ HlEntry entry = attr_entry(wp->w_hl_attr_normal);
if (entry.attr.hl_blend == -1) {
entry.attr.hl_blend = (int)wp->w_p_winbl;
wp->w_hl_attr_normal = get_attr_entry(entry);
@@ -401,7 +399,7 @@ void update_window_hl(win_T *wp, bool invalid)
// if blend= attribute is not set, 'winblend' value overrides it.
if (wp->w_floating && wp->w_p_winbl > 0) {
- HlEntry entry = kv_A(attr_entries, wp->w_hl_attr_normalnc);
+ HlEntry entry = attr_entry(wp->w_hl_attr_normalnc);
if (entry.attr.hl_blend == -1) {
entry.attr.hl_blend = (int)wp->w_p_winbl;
wp->w_hl_attr_normalnc = get_attr_entry(entry);
@@ -490,8 +488,8 @@ int hl_get_term_attr(HlAttrs *aep)
void clear_hl_tables(bool reinit)
{
if (reinit) {
- kv_size(attr_entries) = 1;
- map_clear(HlEntry, &attr_entry_ids);
+ set_clear(HlEntry, &attr_entries);
+ highlight_init();
map_clear(int, &combine_attr_entries);
map_clear(int, &blend_attr_entries);
map_clear(int, &blendthrough_attr_entries);
@@ -500,8 +498,7 @@ void clear_hl_tables(bool reinit)
highlight_changed();
screen_invalidate_highlights();
} else {
- kv_destroy(attr_entries);
- map_destroy(HlEntry, &attr_entry_ids);
+ set_destroy(HlEntry, &attr_entries);
map_destroy(int, &combine_attr_entries);
map_destroy(int, &blend_attr_entries);
map_destroy(int, &blendthrough_attr_entries);
@@ -809,11 +806,11 @@ static int hl_cterm2rgb_color(int nr)
/// Get highlight attributes for a attribute code
HlAttrs syn_attr2entry(int attr)
{
- if (attr <= 0 || attr >= (int)kv_size(attr_entries)) {
+ if (attr <= 0 || attr >= (int)set_size(&attr_entries)) {
// invalid attribute code, or the tables were cleared
return HLATTRS_INIT;
}
- return kv_A(attr_entries, attr).attr;
+ return attr_entry(attr).attr;
}
/// Gets highlight description for id `attr_id` as a map.
@@ -825,7 +822,7 @@ Dictionary hl_get_attr_by_id(Integer attr_id, Boolean rgb, Arena *arena, Error *
return dic;
}
- if (attr_id <= 0 || attr_id >= (int)kv_size(attr_entries)) {
+ if (attr_id <= 0 || attr_id >= (int)set_size(&attr_entries)) {
api_set_error(err, kErrorTypeException,
"Invalid attribute id: %" PRId64, attr_id);
return dic;
@@ -1132,11 +1129,11 @@ Array hl_inspect(int attr)
static void hl_inspect_impl(Array *arr, int attr)
{
Dictionary item = ARRAY_DICT_INIT;
- if (attr <= 0 || attr >= (int)kv_size(attr_entries)) {
+ if (attr <= 0 || attr >= (int)set_size(&attr_entries)) {
return;
}
- HlEntry e = kv_A(attr_entries, attr);
+ HlEntry e = attr_entry(attr);
switch (e.kind) {
case kHlSyntax:
PUT(item, "kind", CSTR_TO_OBJ("syntax"));
@@ -1165,6 +1162,7 @@ static void hl_inspect_impl(Array *arr, int attr)
return;
case kHlUnknown:
+ case kHlInvalid:
return;
}
PUT(item, "id", INTEGER_OBJ(attr));
diff --git a/src/nvim/highlight_defs.h b/src/nvim/highlight_defs.h
index f690b57148..ae58ff8696 100644
--- a/src/nvim/highlight_defs.h
+++ b/src/nvim/highlight_defs.h
@@ -219,6 +219,7 @@ typedef enum {
kHlCombine,
kHlBlend,
kHlBlendThrough,
+ kHlInvalid,
} HlKind;
typedef struct {
diff --git a/src/nvim/highlight_group.c b/src/nvim/highlight_group.c
index c4d140d1e1..b970e752bb 100644
--- a/src/nvim/highlight_group.c
+++ b/src/nvim/highlight_group.c
@@ -1569,7 +1569,13 @@ Dictionary ns_get_hl_defs(NS ns_id, Dict(get_highlight) *opts, Arena *arena, Err
Boolean link = GET_BOOL_OR_TRUE(opts, get_highlight, link);
int id = -1;
if (HAS_KEY(opts, get_highlight, name)) {
- id = syn_check_group(opts->name.data, opts->name.size);
+ Boolean create = GET_BOOL_OR_TRUE(opts, get_highlight, create);
+ id = create ? syn_check_group(opts->name.data, opts->name.size)
+ : syn_name2id_len(opts->name.data, opts->name.size);
+ if (id == 0 && !create) {
+ Dictionary attrs = ARRAY_DICT_INIT;
+ return attrs;
+ }
} else if (HAS_KEY(opts, get_highlight, id)) {
id = (int)opts->id;
}
diff --git a/src/nvim/keycodes.c b/src/nvim/keycodes.c
index f2391cc541..55cd22f181 100644
--- a/src/nvim/keycodes.c
+++ b/src/nvim/keycodes.c
@@ -919,7 +919,8 @@ char *replace_termcodes(const char *const from, const size_t from_len, char **co
// Check for special <> keycodes, like "<C-S-LeftMouse>"
if (do_special && ((flags & REPTERM_DO_LT) || ((end - src) >= 3
&& strncmp(src, "<lt>", 4) != 0))) {
- // Replace <SID> by K_SNR <script-nr> _.
+ // Change <SID>Func to K_SNR <script-nr> _Func. This name is used
+ // for script-local user functions.
// (room: 5 * 6 = 30 bytes; needed: 3 + <nr> + 1 <= 14)
if (end - src >= 4 && STRNICMP(src, "<SID>", 5) == 0) {
if (sid_arg < 0 || (sid_arg == 0 && current_sctx.sc_sid <= 0)) {
diff --git a/src/nvim/macros.h b/src/nvim/macros.h
index 57cb298572..5eaf97ff87 100644
--- a/src/nvim/macros.h
+++ b/src/nvim/macros.h
@@ -82,15 +82,6 @@
#define READBIN "rb"
#define APPENDBIN "ab"
-// mch_open_rw(): invoke os_open() with third argument for user R/W.
-#if defined(UNIX) // open in rw------- mode
-# define MCH_OPEN_RW(n, f) os_open((n), (f), (mode_t)0600)
-#elif defined(MSWIN)
-# define MCH_OPEN_RW(n, f) os_open((n), (f), S_IREAD | S_IWRITE)
-#else
-# define MCH_OPEN_RW(n, f) os_open((n), (f), 0)
-#endif
-
#define REPLACE_NORMAL(s) (((s)& REPLACE_FLAG) && !((s)& VREPLACE_FLAG))
// MB_PTR_ADV(): advance a pointer to the next character, taking care of
diff --git a/src/nvim/map.c b/src/nvim/map.c
index e2c6443245..e1d0646083 100644
--- a/src/nvim/map.c
+++ b/src/nvim/map.c
@@ -25,6 +25,8 @@
#define equal_uint32_t equal_simple
#define hash_int(x) hash_uint32_t((uint32_t)(x))
#define equal_int equal_simple
+#define hash_int64_t(key) hash_uint64_t((uint64_t)key)
+#define equal_int64_t equal_simple
#if defined(ARCH_64)
# define hash_ptr_t(key) hash_uint64_t((uint64_t)(key))
@@ -182,13 +184,20 @@ void mh_clear(MapHash *h)
#undef VAL_NAME
#undef KEY_NAME
-#define KEY_NAME(x) x##HlEntry
+#define KEY_NAME(x) x##int64_t
#include "nvim/map_key_impl.c.h"
-#define VAL_NAME(x) quasiquote(x, int)
+#define VAL_NAME(x) quasiquote(x, ptr_t)
+#include "nvim/map_value_impl.c.h"
+#undef VAL_NAME
+#define VAL_NAME(x) quasiquote(x, int64_t)
#include "nvim/map_value_impl.c.h"
#undef VAL_NAME
#undef KEY_NAME
+#define KEY_NAME(x) x##HlEntry
+#include "nvim/map_key_impl.c.h"
+#undef KEY_NAME
+
#define KEY_NAME(x) x##ColorKey
#include "nvim/map_key_impl.c.h"
#define VAL_NAME(x) quasiquote(x, ColorItem)
diff --git a/src/nvim/map.h b/src/nvim/map.h
index a84d533262..23a5ea36a3 100644
--- a/src/nvim/map.h
+++ b/src/nvim/map.h
@@ -27,6 +27,7 @@ static const ptr_t value_init_ptr_t = NULL;
static const ssize_t value_init_ssize_t = -1;
static const uint32_t value_init_uint32_t = 0;
static const uint64_t value_init_uint64_t = 0;
+static const int64_t value_init_int64_t = 0;
static const String value_init_String = STRING_INIT;
static const ColorItem value_init_ColorItem = COLOR_ITEM_INITIALIZER;
@@ -123,6 +124,7 @@ KEY_DECLS(int)
KEY_DECLS(cstr_t)
KEY_DECLS(ptr_t)
KEY_DECLS(uint64_t)
+KEY_DECLS(int64_t)
KEY_DECLS(uint32_t)
KEY_DECLS(String)
KEY_DECLS(HlEntry)
@@ -137,8 +139,9 @@ MAP_DECLS(uint32_t, ptr_t)
MAP_DECLS(uint64_t, ptr_t)
MAP_DECLS(uint64_t, ssize_t)
MAP_DECLS(uint64_t, uint64_t)
+MAP_DECLS(int64_t, int64_t)
+MAP_DECLS(int64_t, ptr_t)
MAP_DECLS(uint32_t, uint32_t)
-MAP_DECLS(HlEntry, int)
MAP_DECLS(String, int)
MAP_DECLS(int, String)
MAP_DECLS(ColorKey, ColorItem)
@@ -146,6 +149,7 @@ MAP_DECLS(ColorKey, ColorItem)
#define set_has(T, set, key) set_has_##T(set, key)
#define set_put(T, set, key) set_put_##T(set, key, NULL)
#define set_put_ref(T, set, key, key_alloc) set_put_##T(set, key, key_alloc)
+#define set_put_idx(T, set, key, status) mh_put_##T(set, key, status)
#define set_del(T, set, key) set_del_##T(set, key)
#define set_destroy(T, set) (xfree((set)->keys), xfree((set)->h.hash))
#define set_clear(T, set) mh_clear(&(set)->h)
diff --git a/src/nvim/marktree.c b/src/nvim/marktree.c
index d07d176b6d..d8b8dbba29 100644
--- a/src/nvim/marktree.c
+++ b/src/nvim/marktree.c
@@ -14,8 +14,6 @@
// Use marktree_itr_current and marktree_itr_next/prev to read marks in a loop.
// marktree_del_itr deletes the current mark of the iterator and implicitly
// moves the iterator to the next mark.
-//
-// Work is ongoing to fully support ranges (mark pairs).
// Copyright notice for kbtree (included in heavily modified form):
//
@@ -58,19 +56,27 @@
#include "nvim/memory.h"
#include "nvim/pos.h"
+// only for debug functions
+#include "nvim/api/private/helpers.h"
+
#define T MT_BRANCH_FACTOR
-#define ILEN (sizeof(mtnode_t) + (2 * T) * sizeof(void *))
+#define ILEN (sizeof(MTNode) + (2 * T) * sizeof(void *))
#define ID_INCR (((uint64_t)1) << 2)
-#define rawkey(itr) ((itr)->node->key[(itr)->i])
+#define rawkey(itr) ((itr)->x->key[(itr)->i])
-static bool pos_leq(mtpos_t a, mtpos_t b)
+static bool pos_leq(MTPos a, MTPos b)
{
return a.row < b.row || (a.row == b.row && a.col <= b.col);
}
-static void relative(mtpos_t base, mtpos_t *val)
+static bool pos_less(MTPos a, MTPos b)
+{
+ return !pos_leq(b, a);
+}
+
+static void relative(MTPos base, MTPos *val)
{
assert(pos_leq(base, *val));
if (val->row == base.row) {
@@ -81,7 +87,7 @@ static void relative(mtpos_t base, mtpos_t *val)
}
}
-static void unrelative(mtpos_t base, mtpos_t *val)
+static void unrelative(MTPos base, MTPos *val)
{
if (val->row == 0) {
val->row = base.row;
@@ -91,7 +97,7 @@ static void unrelative(mtpos_t base, mtpos_t *val)
}
}
-static void compose(mtpos_t *base, mtpos_t val)
+static void compose(MTPos *base, MTPos val)
{
if (val.row == 0) {
base->col += val.col;
@@ -101,12 +107,21 @@ static void compose(mtpos_t *base, mtpos_t val)
}
}
+// Used by `marktree_splice`. Need to keep track of marks which moved
+// in order to repair intersections.
+typedef struct {
+ uint64_t id;
+ MTNode *old, *new;
+ int old_i, new_i;
+} Damage;
+typedef kvec_withinit_t(Damage, 8) DamageList;
+
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "marktree.c.generated.h"
#endif
#define mt_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
-static int key_cmp(mtkey_t a, mtkey_t b)
+static int key_cmp(MTKey a, MTKey b)
{
int cmp = mt_generic_cmp(a.pos.row, b.pos.row);
if (cmp != 0) {
@@ -116,18 +131,25 @@ static int key_cmp(mtkey_t a, mtkey_t b)
if (cmp != 0) {
return cmp;
}
- // NB: keeping the events at the same pos sorted by id is actually not
- // necessary only make sure that START is before END etc.
- return mt_generic_cmp(a.flags, b.flags);
+
+ // TODO(bfredl): MT_FLAG_REAL could go away if we fix marktree_getp_aux for real
+ const uint16_t cmp_mask = MT_FLAG_RIGHT_GRAVITY | MT_FLAG_END | MT_FLAG_REAL | MT_FLAG_LAST;
+ return mt_generic_cmp(a.flags & cmp_mask, b.flags & cmp_mask);
}
-static inline int marktree_getp_aux(const mtnode_t *x, mtkey_t k, int *r)
+/// @return position of k if it exists in the node, otherwise the position
+/// it should be inserted, which ranges from 0 to x->n _inclusively_
+/// @param match (optional) set to TRUE if match (pos, gravity) was found
+static inline int marktree_getp_aux(const MTNode *x, MTKey k, bool *match)
{
- int tr, *rr, begin = 0, end = x->n;
+ bool dummy_match;
+ bool *m = match ? match : &dummy_match;
+
+ int begin = 0, end = x->n;
if (x->n == 0) {
+ *m = false;
return -1;
}
- rr = r ? r : &tr;
while (begin < end) {
int mid = (begin + end) >> 1;
if (key_cmp(x->key[mid], k) < 0) {
@@ -137,47 +159,84 @@ static inline int marktree_getp_aux(const mtnode_t *x, mtkey_t k, int *r)
}
}
if (begin == x->n) {
- *rr = 1; return x->n - 1;
+ *m = false;
+ return x->n - 1;
}
- if ((*rr = key_cmp(k, x->key[begin])) < 0) {
+ if (!(*m = (key_cmp(k, x->key[begin]) == 0))) {
begin--;
}
return begin;
}
-static inline void refkey(MarkTree *b, mtnode_t *x, int i)
+static inline void refkey(MarkTree *b, MTNode *x, int i)
{
pmap_put(uint64_t)(b->id2node, mt_lookup_key(x->key[i]), x);
}
+static MTNode *id2node(MarkTree *b, uint64_t id)
+{
+ return pmap_get(uint64_t)(b->id2node, id);
+}
+
// put functions
// x must be an internal node, which is not full
// x->ptr[i] should be a full node, i e x->ptr[i]->n == 2*T-1
-static inline void split_node(MarkTree *b, mtnode_t *x, const int i)
+static inline void split_node(MarkTree *b, MTNode *x, const int i, MTKey next)
{
- mtnode_t *y = x->ptr[i];
- mtnode_t *z;
- z = (mtnode_t *)xcalloc(1, y->level ? ILEN : sizeof(mtnode_t));
- b->n_nodes++;
+ MTNode *y = x->ptr[i];
+ MTNode *z = marktree_alloc_node(b, y->level);
z->level = y->level;
z->n = T - 1;
- memcpy(z->key, &y->key[T], sizeof(mtkey_t) * (T - 1));
+
+ // tricky: we might split a node in between inserting the start node and the end
+ // node of the same pair. Then we must not intersect this id yet (done later
+ // in marktree_intersect_pair).
+ uint64_t last_start = mt_end(next) ? mt_lookup_id(next.ns, next.id, false) : MARKTREE_END_FLAG;
+
+ // no alloc in the common case (less than 4 intersects)
+ kvi_copy(z->intersect, y->intersect);
+
+ if (!y->level) {
+ uint64_t pi = pseudo_index(y, 0); // note: sloppy pseudo-index
+ for (int j = 0; j < T; j++) {
+ MTKey k = y->key[j];
+ uint64_t pi_end = pseudo_index_for_id(b, mt_lookup_id(k.ns, k.id, true), true);
+ if (mt_start(k) && pi_end > pi && mt_lookup_key(k) != last_start) {
+ intersect_node(b, z, mt_lookup_id(k.ns, k.id, false));
+ }
+ }
+
+ // note: y->key[T-1] is moved up and thus checked for both
+ for (int j = T - 1; j < (T * 2) - 1; j++) {
+ MTKey k = y->key[j];
+ uint64_t pi_start = pseudo_index_for_id(b, mt_lookup_id(k.ns, k.id, false), true);
+ if (mt_end(k) && pi_start > 0 && pi_start < pi) {
+ intersect_node(b, y, mt_lookup_id(k.ns, k.id, false));
+ }
+ }
+ }
+
+ memcpy(z->key, &y->key[T], sizeof(MTKey) * (T - 1));
for (int j = 0; j < T - 1; j++) {
refkey(b, z, j);
}
if (y->level) {
- memcpy(z->ptr, &y->ptr[T], sizeof(mtnode_t *) * T);
+ memcpy(z->ptr, &y->ptr[T], sizeof(MTNode *) * T);
for (int j = 0; j < T; j++) {
z->ptr[j]->parent = z;
+ z->ptr[j]->p_idx = (int16_t)j;
}
}
y->n = T - 1;
memmove(&x->ptr[i + 2], &x->ptr[i + 1],
- sizeof(mtnode_t *) * (size_t)(x->n - i));
+ sizeof(MTNode *) * (size_t)(x->n - i));
x->ptr[i + 1] = z;
z->parent = x; // == y->parent
- memmove(&x->key[i + 1], &x->key[i], sizeof(mtkey_t) * (size_t)(x->n - i));
+ for (int j = i + 1; j < x->n + 2; j++) {
+ x->ptr[j]->p_idx = (int16_t)j;
+ }
+ memmove(&x->key[i + 1], &x->key[i], sizeof(MTKey) * (size_t)(x->n - i));
// move key to internal layer:
x->key[i] = y->key[T - 1];
@@ -190,25 +249,32 @@ static inline void split_node(MarkTree *b, mtnode_t *x, const int i)
if (i > 0) {
unrelative(x->key[i - 1].pos, &x->key[i].pos);
}
+
+ if (y->level) {
+ bubble_up(y);
+ bubble_up(z);
+ } else {
+ // code above goose here
+ }
}
// x must not be a full node (even if there might be internal space)
-static inline void marktree_putp_aux(MarkTree *b, mtnode_t *x, mtkey_t k)
+static inline void marktree_putp_aux(MarkTree *b, MTNode *x, MTKey k)
{
- int i;
+ // TODO(bfredl): ugh, make sure this is the _last_ valid (pos, gravity) position,
+ // to minimize movement
+ int i = marktree_getp_aux(x, k, NULL) + 1;
if (x->level == 0) {
- i = marktree_getp_aux(x, k, 0);
- if (i != x->n - 1) {
- memmove(&x->key[i + 2], &x->key[i + 1],
- (size_t)(x->n - i - 1) * sizeof(mtkey_t));
+ if (i != x->n) {
+ memmove(&x->key[i + 1], &x->key[i],
+ (size_t)(x->n - i) * sizeof(MTKey));
}
- x->key[i + 1] = k;
- refkey(b, x, i + 1);
+ x->key[i] = k;
+ refkey(b, x, i);
x->n++;
} else {
- i = marktree_getp_aux(x, k, 0) + 1;
if (x->ptr[i]->n == 2 * T - 1) {
- split_node(b, x, i);
+ split_node(b, x, i, k);
if (key_cmp(k, x->key[i]) > 0) {
i++;
}
@@ -220,7 +286,7 @@ static inline void marktree_putp_aux(MarkTree *b, mtnode_t *x, mtkey_t k)
}
}
-void marktree_put(MarkTree *b, mtkey_t key, int end_row, int end_col, bool end_right)
+void marktree_put(MarkTree *b, MTKey key, int end_row, int end_col, bool end_right)
{
assert(!(key.flags & ~MT_FLAG_EXTERNAL_MASK));
if (end_row >= 0) {
@@ -230,32 +296,151 @@ void marktree_put(MarkTree *b, mtkey_t key, int end_row, int end_col, bool end_r
marktree_put_key(b, key);
if (end_row >= 0) {
- mtkey_t end_key = key;
+ MTKey end_key = key;
end_key.flags = (uint16_t)((uint16_t)(key.flags & ~MT_FLAG_RIGHT_GRAVITY)
|(uint16_t)MT_FLAG_END
|(uint16_t)(end_right ? MT_FLAG_RIGHT_GRAVITY : 0));
- end_key.pos = (mtpos_t){ end_row, end_col };
+ end_key.pos = (MTPos){ end_row, end_col };
marktree_put_key(b, end_key);
+ MarkTreeIter itr[1] = { 0 }, end_itr[1] = { 0 };
+ marktree_lookup(b, mt_lookup_key(key), itr);
+ marktree_lookup(b, mt_lookup_key(end_key), end_itr);
+
+ marktree_intersect_pair(b, mt_lookup_key(key), itr, end_itr, false);
+ }
+}
+
+// this is currently not used very often, but if it was it should use binary search
+static bool intersection_has(Intersection *x, uint64_t id)
+{
+ for (size_t i = 0; i < kv_size(*x); i++) {
+ if (kv_A(*x, i) == id) {
+ return true;
+ } else if (kv_A(*x, i) >= id) {
+ return false;
+ }
}
+ return false;
}
-void marktree_put_key(MarkTree *b, mtkey_t k)
+static void intersect_node(MarkTree *b, MTNode *x, uint64_t id)
+{
+ assert(!(id & MARKTREE_END_FLAG));
+ kvi_pushp(x->intersect);
+ // optimized for the common case: new key is always in the end
+ for (ssize_t i = (ssize_t)kv_size(x->intersect) - 1; i >= 0; i--) {
+ if (i > 0 && kv_A(x->intersect, i - 1) > id) {
+ kv_A(x->intersect, i) = kv_A(x->intersect, i - 1);
+ } else {
+ kv_A(x->intersect, i) = id;
+ break;
+ }
+ }
+}
+
+static void unintersect_node(MarkTree *b, MTNode *x, uint64_t id, bool strict)
+{
+ assert(!(id & MARKTREE_END_FLAG));
+ bool seen = false;
+ size_t i;
+ for (i = 0; i < kv_size(x->intersect); i++) {
+ if (kv_A(x->intersect, i) < id) {
+ continue;
+ } else if (kv_A(x->intersect, i) == id) {
+ seen = true;
+ break;
+ } else { // (kv_A(x->intersect, i) > id)
+ break;
+ }
+ }
+ if (strict) {
+ assert(seen);
+ }
+
+ if (seen) {
+ if (i < kv_size(x->intersect) - 1) {
+ memmove(&kv_A(x->intersect, i), &kv_A(x->intersect, i + 1), (kv_size(x->intersect) - i - 1) *
+ sizeof(kv_A(x->intersect, i)));
+ }
+ kv_size(x->intersect)--;
+ }
+}
+
+/// @param itr mutated
+/// @param end_itr not mutated
+void marktree_intersect_pair(MarkTree *b, uint64_t id, MarkTreeIter *itr, MarkTreeIter *end_itr,
+ bool delete)
+{
+ int lvl = 0, maxlvl = MIN(itr->lvl, end_itr->lvl);
+#define iat(itr, l, q) ((l == itr->lvl) ? itr->i + q : itr->s[l].i)
+ for (; lvl < maxlvl; lvl++) {
+ if (itr->s[lvl].i > end_itr->s[lvl].i) {
+ return; // empty range
+ } else if (itr->s[lvl].i < end_itr->s[lvl].i) {
+ break; // work to do
+ }
+ }
+ if (lvl == maxlvl && iat(itr, lvl, 1) > iat(end_itr, lvl, 0)) {
+ return; // empty range
+ }
+
+ while (itr->x) {
+ bool skip = false;
+ if (itr->x == end_itr->x) {
+ if (itr->x->level == 0 || itr->i >= end_itr->i) {
+ break;
+ } else {
+ skip = true;
+ }
+ } else if (itr->lvl > lvl) {
+ skip = true;
+ } else {
+ if (iat(itr, lvl, 1) < iat(end_itr, lvl, 1)) {
+ skip = true;
+ } else {
+ lvl++;
+ }
+ }
+
+ if (skip) {
+ if (itr->x->level) {
+ MTNode *x = itr->x->ptr[itr->i + 1];
+ if (delete) {
+ unintersect_node(b, x, id, true);
+ } else {
+ intersect_node(b, x, id);
+ }
+ }
+ }
+ marktree_itr_next_skip(b, itr, skip, true, NULL);
+ }
+#undef iat
+}
+
+static MTNode *marktree_alloc_node(MarkTree *b, bool internal)
+{
+ MTNode *x = xcalloc(1, internal ? ILEN : sizeof(MTNode));
+ kvi_init(x->intersect);
+ b->n_nodes++;
+ return x;
+}
+
+void marktree_put_key(MarkTree *b, MTKey k)
{
k.flags |= MT_FLAG_REAL; // let's be real.
if (!b->root) {
- b->root = (mtnode_t *)xcalloc(1, ILEN);
- b->n_nodes++;
+ b->root = marktree_alloc_node(b, true);
}
- mtnode_t *r, *s;
+ MTNode *r, *s;
b->n_keys++;
r = b->root;
if (r->n == 2 * T - 1) {
- b->n_nodes++;
- s = (mtnode_t *)xcalloc(1, ILEN);
+ s = marktree_alloc_node(b, true);
b->root = s; s->level = r->level + 1; s->n = 0;
s->ptr[0] = r;
r->parent = s;
- split_node(b, s, 0);
+ r->p_idx = 0;
+ split_node(b, s, 0, k);
r = s;
}
marktree_putp_aux(b, r, k);
@@ -289,22 +474,31 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
{
int adjustment = 0;
- mtnode_t *cur = itr->node;
+ MTNode *cur = itr->x;
int curi = itr->i;
uint64_t id = mt_lookup_key(cur->key[curi]);
- // fprintf(stderr, "\nDELET %lu\n", id);
- mtkey_t raw = rawkey(itr);
+ MTKey raw = rawkey(itr);
uint64_t other = 0;
- if (mt_paired(raw)) {
- other = mt_lookup_id(raw.ns, raw.id, !mt_end(raw));
+ if (mt_paired(raw) && !(raw.flags & MT_FLAG_ORPHANED)) {
+ other = mt_lookup_key_side(raw, !mt_end(raw));
+
+ MarkTreeIter other_itr[1];
+ marktree_lookup(b, other, other_itr);
+ rawkey(other_itr).flags |= MT_FLAG_ORPHANED;
+ // Remove intersect markers. NB: must match exactly!
+ if (mt_start(raw)) {
+ MarkTreeIter this_itr[1] = { *itr }; // mutated copy
+ marktree_intersect_pair(b, id, this_itr, other_itr, true);
+ } else {
+ marktree_intersect_pair(b, other, other_itr, itr, true);
+ }
}
- if (itr->node->level) {
+ if (itr->x->level) {
if (rev) {
abort();
} else {
- // fprintf(stderr, "INTERNAL %d\n", cur->level);
// steal previous node
marktree_itr_prev(b, itr);
adjustment = -1;
@@ -312,41 +506,72 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
}
// 3.
- mtnode_t *x = itr->node;
+ MTNode *x = itr->x;
assert(x->level == 0);
- mtkey_t intkey = x->key[itr->i];
+ MTKey intkey = x->key[itr->i];
if (x->n > itr->i + 1) {
memmove(&x->key[itr->i], &x->key[itr->i + 1],
- sizeof(mtkey_t) * (size_t)(x->n - itr->i - 1));
+ sizeof(MTKey) * (size_t)(x->n - itr->i - 1));
}
x->n--;
+ b->n_keys--;
+ pmap_del(uint64_t)(b->id2node, id, NULL);
+
// 4.
// if (adjustment == 1) {
// abort();
// }
if (adjustment == -1) {
int ilvl = itr->lvl - 1;
- const mtnode_t *lnode = x;
+ MTNode *lnode = x;
+ uint64_t start_id = 0;
+ bool did_bubble = false;
+ if (mt_end(intkey)) {
+ start_id = mt_lookup_key_side(intkey, false);
+ }
do {
- const mtnode_t *const p = lnode->parent;
+ MTNode *p = lnode->parent;
if (ilvl < 0) {
abort();
}
- const int i = itr->s[ilvl].i;
+ int i = itr->s[ilvl].i;
assert(p->ptr[i] == lnode);
if (i > 0) {
unrelative(p->key[i - 1].pos, &intkey.pos);
}
+
+ if (p != cur && start_id) {
+ if (intersection_has(&p->ptr[0]->intersect, start_id)) {
+ // if not the first time, we need to undo the addition in the
+ // previous step (`intersect_node` just below)
+ int last = (lnode != x) ? 1 : 0;
+ for (int k = 0; k < p->n + last; k++) { // one less as p->ptr[n] is the last
+ unintersect_node(b, p->ptr[k], start_id, true);
+ }
+ intersect_node(b, p, start_id);
+ did_bubble = true;
+ }
+ }
+
lnode = p;
ilvl--;
} while (lnode != cur);
- mtkey_t deleted = cur->key[curi];
+ MTKey deleted = cur->key[curi];
cur->key[curi] = intkey;
refkey(b, cur, curi);
+ // if `did_bubble` then we already added `start_id` to some parent
+ if (mt_end(cur->key[curi]) && !did_bubble) {
+ uint64_t pi = pseudo_index(x, 0); // note: sloppy pseudo-index
+ uint64_t pi_start = pseudo_index_for_id(b, start_id, true);
+ if (pi_start > 0 && pi_start < pi) {
+ intersect_node(b, x, start_id);
+ }
+ }
+
relative(intkey.pos, &deleted.pos);
- mtnode_t *y = cur->ptr[curi + 1];
+ MTNode *y = cur->ptr[curi + 1];
if (deleted.pos.row || deleted.pos.col) {
while (y) {
for (int k = 0; k < y->n; k++) {
@@ -358,46 +583,48 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
itr->i--;
}
- b->n_keys--;
- pmap_del(uint64_t)(b->id2node, id, NULL);
-
// 5.
bool itr_dirty = false;
int rlvl = itr->lvl - 1;
int *lasti = &itr->i;
+ MTPos ppos = itr->pos;
while (x != b->root) {
assert(rlvl >= 0);
- mtnode_t *p = x->parent;
+ MTNode *p = x->parent;
if (x->n >= T - 1) {
// we are done, if this node is fine the rest of the tree will be
break;
}
int pi = itr->s[rlvl].i;
assert(p->ptr[pi] == x);
+ if (pi > 0) {
+ ppos.row -= p->key[pi - 1].pos.row;
+ ppos.col = itr->s[rlvl].oldcol;
+ }
+ // ppos is now the pos of p
+
if (pi > 0 && p->ptr[pi - 1]->n > T - 1) {
*lasti += 1;
itr_dirty = true;
// steal one key from the left neighbour
- pivot_right(b, p, pi - 1);
+ pivot_right(b, ppos, p, pi - 1);
break;
} else if (pi < p->n && p->ptr[pi + 1]->n > T - 1) {
// steal one key from right neighbour
- pivot_left(b, p, pi);
+ pivot_left(b, ppos, p, pi);
break;
} else if (pi > 0) {
- // fprintf(stderr, "LEFT ");
assert(p->ptr[pi - 1]->n == T - 1);
// merge with left neighbour
*lasti += T;
x = merge_node(b, p, pi - 1);
if (lasti == &itr->i) {
// TRICKY: we merged the node the iterator was on
- itr->node = x;
+ itr->x = x;
}
itr->s[rlvl].i--;
itr_dirty = true;
} else {
- // fprintf(stderr, "RIGHT ");
assert(pi < p->n && p->ptr[pi + 1]->n == T - 1);
merge_node(b, p, pi);
// no iter adjustment needed
@@ -414,18 +641,18 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
itr->lvl--;
}
if (b->root->level) {
- mtnode_t *oldroot = b->root;
+ MTNode *oldroot = b->root;
b->root = b->root->ptr[0];
b->root->parent = NULL;
- xfree(oldroot);
+ marktree_free_node(b, oldroot);
} else {
// no items, nothing for iterator to point to
// not strictly needed, should handle delete right-most mark anyway
- itr->node = NULL;
+ itr->x = NULL;
}
}
- if (itr->node && itr_dirty) {
+ if (itr->x && itr_dirty) {
marktree_itr_fix_pos(b, itr);
}
@@ -441,10 +668,10 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
marktree_itr_next(b, itr);
marktree_itr_next(b, itr);
} else {
- if (itr->node && itr->i >= itr->node->n) {
+ if (itr->x && itr->i >= itr->x->n) {
// we deleted the last key of a leaf node
// go to the inner key after that.
- assert(itr->node->level == 0);
+ assert(itr->x->level == 0);
marktree_itr_next(b, itr);
}
}
@@ -452,9 +679,229 @@ uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
return other;
}
-static mtnode_t *merge_node(MarkTree *b, mtnode_t *p, int i)
+/// similar to intersect_common but modify x and y in place to retain
+/// only the items which are NOT in common
+static void intersect_merge(Intersection *restrict m, Intersection *restrict x,
+ Intersection *restrict y)
{
- mtnode_t *x = p->ptr[i], *y = p->ptr[i + 1];
+ size_t xi = 0, yi = 0;
+ size_t xn = 0, yn = 0;
+ while (xi < kv_size(*x) && yi < kv_size(*y)) {
+ if (kv_A(*x, xi) == kv_A(*y, yi)) {
+ // TODO(bfredl): kvi_pushp is actually quite complex, break out kvi_resize() to a function?
+ kvi_push(*m, kv_A(*x, xi));
+ xi++;
+ yi++;
+ } else if (kv_A(*x, xi) < kv_A(*y, yi)) {
+ kv_A(*x, xn++) = kv_A(*x, xi++);
+ } else {
+ kv_A(*y, yn++) = kv_A(*y, yi++);
+ }
+ }
+
+ if (xi < kv_size(*x)) {
+ memmove(&kv_A(*x, xn), &kv_A(*x, xi), sizeof(kv_A(*x, xn)) * (kv_size(*x) - xi));
+ xn += kv_size(*x) - xi;
+ }
+ if (yi < kv_size(*y)) {
+ memmove(&kv_A(*y, yn), &kv_A(*y, yi), sizeof(kv_A(*y, yn)) * (kv_size(*y) - yi));
+ yn += kv_size(*y) - yi;
+ }
+
+ kv_size(*x) = xn;
+ kv_size(*y) = yn;
+}
+
+// w used to be a child of x but it is now a child of y, adjust intersections accordingly
+// @param[out] d are intersections which should be added to the old children of y
+static void intersect_mov(Intersection *restrict x, Intersection *restrict y,
+ Intersection *restrict w, Intersection *restrict d)
+{
+ size_t wi = 0, yi = 0;
+ size_t wn = 0, yn = 0;
+ size_t xi = 0;
+ while (wi < kv_size(*w) || xi < kv_size(*x)) {
+ if (wi < kv_size(*w) && (xi >= kv_size(*x) || kv_A(*x, xi) >= kv_A(*w, wi))) {
+ if (xi < kv_size(*x) && kv_A(*x, xi) == kv_A(*w, wi)) {
+ xi++;
+ }
+ // now w < x strictly
+ while (yi < kv_size(*y) && kv_A(*y, yi) < kv_A(*w, wi)) {
+ kvi_push(*d, kv_A(*y, yi));
+ yi++;
+ }
+ if (yi < kv_size(*y) && kv_A(*y, yi) == kv_A(*w, wi)) {
+ kv_A(*y, yn++) = kv_A(*y, yi++);
+ wi++;
+ } else {
+ kv_A(*w, wn++) = kv_A(*w, wi++);
+ }
+ } else {
+ // x < w strictly
+ while (yi < kv_size(*y) && kv_A(*y, yi) < kv_A(*x, xi)) {
+ kvi_push(*d, kv_A(*y, yi));
+ yi++;
+ }
+ if (yi < kv_size(*y) && kv_A(*y, yi) == kv_A(*x, xi)) {
+ kv_A(*y, yn++) = kv_A(*y, yi++);
+ xi++;
+ } else {
+ // add kv_A(x, xi) at kv_A(w, wn), pushing up wi if wi == wn
+ if (wi == wn) {
+ size_t n = kv_size(*w) - wn;
+ kvi_pushp(*w);
+ if (n > 0) {
+ memmove(&kv_A(*w, wn + 1), &kv_A(*w, wn), n * sizeof(kv_A(*w, 0)));
+ }
+ kv_A(*w, wi) = kv_A(*x, xi);
+ wn++;
+ wi++; // no need to consider the added element again
+ } else {
+ assert(wn < wi);
+ kv_A(*w, wn++) = kv_A(*x, xi);
+ }
+ xi++;
+ }
+ }
+ }
+ if (yi < kv_size(*y)) {
+ // move remaining items to d
+ size_t n = kv_size(*y) - yi; // at least one
+ kvi_ensure_more_space(*d, n);
+ memcpy(&kv_A(*d, kv_size(*d)), &kv_A(*y, yi), n * sizeof(kv_A(*d, 0)));
+ kv_size(*d) += n;
+ }
+ kv_size(*w) = wn;
+ kv_size(*y) = yn;
+}
+
+bool intersect_mov_test(uint64_t *x, size_t nx, uint64_t *y, size_t ny, uint64_t *win, size_t nwin,
+ uint64_t *wout, size_t *nwout, uint64_t *dout, size_t *ndout)
+{
+ // x is immutable in the context of intersect_mov. y might shrink, but we
+ // don't care about it (we get it the deleted ones in d)
+ Intersection xi = { .items = x, .size = nx };
+ Intersection yi = { .items = y, .size = ny };
+
+ Intersection w;
+ kvi_init(w);
+ for (size_t i = 0; i < nwin; i++) {
+ kvi_push(w, win[i]);
+ }
+ Intersection d;
+ kvi_init(d);
+
+ intersect_mov(&xi, &yi, &w, &d);
+
+ if (w.size > *nwout || d.size > *ndout) {
+ return false;
+ }
+
+ memcpy(wout, w.items, sizeof(w.items[0]) * w.size);
+ *nwout = w.size;
+
+ memcpy(dout, d.items, sizeof(d.items[0]) * d.size);
+ *ndout = d.size;
+
+ return true;
+}
+
+/// intersection: i = x & y
+static void intersect_common(Intersection *i, Intersection *x, Intersection *y)
+{
+ size_t xi = 0, yi = 0;
+ while (xi < kv_size(*x) && yi < kv_size(*y)) {
+ if (kv_A(*x, xi) == kv_A(*y, yi)) {
+ kvi_push(*i, kv_A(*x, xi));
+ xi++;
+ yi++;
+ } else if (kv_A(*x, xi) < kv_A(*y, yi)) {
+ xi++;
+ } else {
+ yi++;
+ }
+ }
+}
+
+// inplace union: x |= y
+static void intersect_add(Intersection *x, Intersection *y)
+{
+ size_t xi = 0, yi = 0;
+ while (xi < kv_size(*x) && yi < kv_size(*y)) {
+ if (kv_A(*x, xi) == kv_A(*y, yi)) {
+ xi++;
+ yi++;
+ } else if (kv_A(*y, yi) < kv_A(*x, xi)) {
+ size_t n = kv_size(*x) - xi; // at least one
+ kvi_pushp(*x);
+ memmove(&kv_A(*x, xi + 1), &kv_A(*x, xi), n * sizeof(kv_A(*x, 0)));
+ kv_A(*x, xi) = kv_A(*y, yi);
+ xi++; // newly added element
+ yi++;
+ } else {
+ xi++;
+ }
+ }
+ if (yi < kv_size(*y)) {
+ size_t n = kv_size(*y) - yi; // at least one
+ kvi_ensure_more_space(*x, n);
+ memcpy(&kv_A(*x, kv_size(*x)), &kv_A(*y, yi), n * sizeof(kv_A(*x, 0)));
+ kv_size(*x) += n;
+ }
+}
+
+// inplace assymetric difference: x &= ~y
+static void intersect_sub(Intersection *restrict x, Intersection *restrict y)
+{
+ size_t xi = 0, yi = 0;
+ size_t xn = 0;
+ while (xi < kv_size(*x) && yi < kv_size(*y)) {
+ if (kv_A(*x, xi) == kv_A(*y, yi)) {
+ xi++;
+ yi++;
+ } else if (kv_A(*x, xi) < kv_A(*y, yi)) {
+ kv_A(*x, xn++) = kv_A(*x, xi++);
+ } else {
+ yi++;
+ }
+ }
+ if (xi < kv_size(*x)) {
+ size_t n = kv_size(*x) - xi;
+ if (xn < xi) { // otherwise xn == xi
+ memmove(&kv_A(*x, xn), &kv_A(*x, xi), n * sizeof(kv_A(*x, 0)));
+ }
+ xn += n;
+ }
+ kv_size(*x) = xn;
+}
+
+/// x is a node which shrunk, or the half of a split
+///
+/// this means that intervals which previously intersected all the (current)
+/// child nodes, now instead intersects `x` itself.
+static void bubble_up(MTNode *x)
+{
+ Intersection xi;
+ kvi_init(xi);
+ // due to invariants, the largest subset of _all_ subnodes is the intersection
+ // between the first and the last
+ intersect_common(&xi, &x->ptr[0]->intersect, &x->ptr[x->n]->intersect);
+ if (kv_size(xi)) {
+ for (int i = 0; i < x->n + 1; i++) {
+ intersect_sub(&x->ptr[i]->intersect, &xi);
+ }
+ intersect_add(&x->intersect, &xi);
+ }
+ kvi_destroy(xi);
+}
+
+static MTNode *merge_node(MarkTree *b, MTNode *p, int i)
+{
+ MTNode *x = p->ptr[i], *y = p->ptr[i + 1];
+ Intersection m;
+ kvi_init(m);
+
+ intersect_merge(&m, &x->intersect, &y->intersect);
x->key[x->n] = p->key[i];
refkey(b, x, x->n);
@@ -462,35 +909,78 @@ static mtnode_t *merge_node(MarkTree *b, mtnode_t *p, int i)
relative(p->key[i - 1].pos, &x->key[x->n].pos);
}
- memmove(&x->key[x->n + 1], y->key, (size_t)y->n * sizeof(mtkey_t));
+ memmove(&x->key[x->n + 1], y->key, (size_t)y->n * sizeof(MTKey));
for (int k = 0; k < y->n; k++) {
refkey(b, x, x->n + 1 + k);
unrelative(x->key[x->n].pos, &x->key[x->n + 1 + k].pos);
}
if (x->level) {
- memmove(&x->ptr[x->n + 1], y->ptr, ((size_t)y->n + 1) * sizeof(mtnode_t *));
- for (int k = 0; k < y->n + 1; k++) {
- x->ptr[x->n + k + 1]->parent = x;
+ // bubble down: ranges that intersected old-x but not old-y or vice versa
+ // must be moved to their respective children
+ memmove(&x->ptr[x->n + 1], y->ptr, ((size_t)y->n + 1) * sizeof(MTNode *));
+ for (int k = 0; k < x->n + 1; k++) {
+ // TODO(bfredl): dedicated impl for "Z |= Y"
+ for (size_t idx = 0; idx < kv_size(x->intersect); idx++) {
+ intersect_node(b, x->ptr[k], kv_A(x->intersect, idx));
+ }
+ }
+ for (int ky = 0; ky < y->n + 1; ky++) {
+ int k = x->n + ky + 1;
+ // nodes that used to be in y, now the second half of x
+ x->ptr[k]->parent = x;
+ x->ptr[k]->p_idx = (int16_t)k;
+ // TODO(bfredl): dedicated impl for "Z |= X"
+ for (size_t idx = 0; idx < kv_size(y->intersect); idx++) {
+ intersect_node(b, x->ptr[k], kv_A(y->intersect, idx));
+ }
}
}
x->n += y->n + 1;
- memmove(&p->key[i], &p->key[i + 1], (size_t)(p->n - i - 1) * sizeof(mtkey_t));
+ memmove(&p->key[i], &p->key[i + 1], (size_t)(p->n - i - 1) * sizeof(MTKey));
memmove(&p->ptr[i + 1], &p->ptr[i + 2],
- (size_t)(p->n - i - 1) * sizeof(mtkey_t *));
+ (size_t)(p->n - i - 1) * sizeof(MTKey *));
+ for (int j = i + 1; j < p->n; j++) { // note: one has been deleted
+ p->ptr[j]->p_idx = (int16_t)j;
+ }
p->n--;
- xfree(y);
- b->n_nodes--;
+ marktree_free_node(b, y);
+
+ kvi_destroy(x->intersect);
+
+ // move of a kvec_withinit_t, messy!
+ // TODO(bfredl): special case version of intersect_merge(x_out, x_in_m_out, y) to avoid this
+ kvi_move(&x->intersect, &m);
+
return x;
}
+/// @param dest is overwritten (assumed to already been freed/moved)
+/// @param src consumed (don't free or use)
+void kvi_move(Intersection *dest, Intersection *src)
+{
+ dest->size = src->size;
+ dest->capacity = src->capacity;
+ if (src->items == src->init_array) {
+ memcpy(dest->init_array, src->init_array, src->size * sizeof(*src->init_array));
+ dest->items = dest->init_array;
+ } else {
+ dest->items = src->items;
+ }
+}
+
// TODO(bfredl): as a potential "micro" optimization, pivoting should balance
// the two nodes instead of stealing just one key
-static void pivot_right(MarkTree *b, mtnode_t *p, int i)
+// x_pos is the absolute position of the key just before x (or a dummy key strictly less than any
+// key inside x, if x is the first leaf)
+static void pivot_right(MarkTree *b, MTPos p_pos, MTNode *p, const int i)
{
- mtnode_t *x = p->ptr[i], *y = p->ptr[i + 1];
- memmove(&y->key[1], y->key, (size_t)y->n * sizeof(mtkey_t));
+ MTNode *x = p->ptr[i], *y = p->ptr[i + 1];
+ memmove(&y->key[1], y->key, (size_t)y->n * sizeof(MTKey));
if (y->level) {
- memmove(&y->ptr[1], y->ptr, ((size_t)y->n + 1) * sizeof(mtnode_t *));
+ memmove(&y->ptr[1], y->ptr, ((size_t)y->n + 1) * sizeof(MTNode *));
+ for (int j = 1; j < y->n + 2; j++) {
+ y->ptr[j]->p_idx = (int16_t)j;
+ }
}
y->key[0] = p->key[i];
refkey(b, y, 0);
@@ -499,6 +989,7 @@ static void pivot_right(MarkTree *b, mtnode_t *p, int i)
if (x->level) {
y->ptr[0] = x->ptr[x->n];
y->ptr[0]->parent = y;
+ y->ptr[0]->p_idx = 0;
}
x->n--;
y->n++;
@@ -509,11 +1000,46 @@ static void pivot_right(MarkTree *b, mtnode_t *p, int i)
for (int k = 1; k < y->n; k++) {
unrelative(y->key[0].pos, &y->key[k].pos);
}
+
+ // repair intersections of x
+ if (x->level) {
+ // handle y and first new y->ptr[0]
+ Intersection d;
+ kvi_init(d);
+ // y->ptr[0] was moved from x to y
+ // adjust y->ptr[0] for a difference between the parents
+ // in addition, this might cause some intersection of the old y
+ // to bubble down to the old children of y (if y->ptr[0] wasn't intersected)
+ intersect_mov(&x->intersect, &y->intersect, &y->ptr[0]->intersect, &d);
+ if (kv_size(d)) {
+ for (int yi = 1; yi < y->n + 1; yi++) {
+ intersect_add(&y->ptr[yi]->intersect, &d);
+ }
+ }
+ kvi_destroy(d);
+
+ bubble_up(x);
+ } else {
+ // if the last element of x used to be an end node, check if it now covers all of x
+ if (mt_end(p->key[i])) {
+ uint64_t pi = pseudo_index(x, 0); // note: sloppy pseudo-index
+ uint64_t start_id = mt_lookup_key_side(p->key[i], false);
+ uint64_t pi_start = pseudo_index_for_id(b, start_id, true);
+ if (pi_start > 0 && pi_start < pi) {
+ intersect_node(b, x, start_id);
+ }
+ }
+
+ if (mt_start(y->key[0])) {
+ // no need for a check, just delet it if it was there
+ unintersect_node(b, y, mt_lookup_key(y->key[0]), false);
+ }
+ }
}
-static void pivot_left(MarkTree *b, mtnode_t *p, int i)
+static void pivot_left(MarkTree *b, MTPos p_pos, MTNode *p, int i)
{
- mtnode_t *x = p->ptr[i], *y = p->ptr[i + 1];
+ MTNode *x = p->ptr[i], *y = p->ptr[i + 1];
// reverse from how we "always" do it. but pivot_left
// is just the inverse of pivot_right, so reverse it literally.
@@ -532,40 +1058,88 @@ static void pivot_left(MarkTree *b, mtnode_t *p, int i)
if (x->level) {
x->ptr[x->n + 1] = y->ptr[0];
x->ptr[x->n + 1]->parent = x;
+ x->ptr[x->n + 1]->p_idx = (int16_t)(x->n + 1);
}
- memmove(y->key, &y->key[1], (size_t)(y->n - 1) * sizeof(mtkey_t));
+ memmove(y->key, &y->key[1], (size_t)(y->n - 1) * sizeof(MTKey));
if (y->level) {
- memmove(y->ptr, &y->ptr[1], (size_t)y->n * sizeof(mtnode_t *));
+ memmove(y->ptr, &y->ptr[1], (size_t)y->n * sizeof(MTNode *));
+ for (int j = 0; j < y->n; j++) { // note: last item deleted
+ y->ptr[j]->p_idx = (int16_t)j;
+ }
}
x->n++;
y->n--;
+
+ // repair intersections of x,y
+ if (x->level) {
+ // handle y and first new y->ptr[0]
+ Intersection d;
+ kvi_init(d);
+ // x->ptr[x->n] was moved from y to x
+ // adjust x->ptr[x->n] for a difference between the parents
+ // in addition, this might cause some intersection of the old x
+ // to bubble down to the old children of x (if x->ptr[n] wasn't intersected)
+ intersect_mov(&y->intersect, &x->intersect, &x->ptr[x->n]->intersect, &d);
+ if (kv_size(d)) {
+ for (int xi = 0; xi < x->n; xi++) { // ptr[x->n| deliberately skipped
+ intersect_add(&x->ptr[xi]->intersect, &d);
+ }
+ }
+ kvi_destroy(d);
+
+ bubble_up(y);
+ } else {
+ // if the first element of y used to be an start node, check if it now covers all of y
+ if (mt_start(p->key[i])) {
+ uint64_t pi = pseudo_index(y, 0); // note: sloppy pseudo-index
+
+ uint64_t end_id = mt_lookup_key_side(p->key[i], true);
+ uint64_t pi_end = pseudo_index_for_id(b, end_id, true);
+
+ if (pi_end > pi) {
+ intersect_node(b, y, mt_lookup_key(p->key[i]));
+ }
+ }
+
+ if (mt_end(x->key[x->n - 1])) {
+ // no need for a check, just delet it if it was there
+ unintersect_node(b, x, mt_lookup_key_side(x->key[x->n - 1], false), false);
+ }
+ }
}
/// frees all mem, resets tree to valid empty state
void marktree_clear(MarkTree *b)
{
if (b->root) {
- marktree_free_node(b->root);
+ marktree_free_subtree(b, b->root);
b->root = NULL;
}
map_destroy(uint64_t, b->id2node);
*b->id2node = (PMap(uint64_t)) MAP_INIT;
b->n_keys = 0;
- b->n_nodes = 0;
+ assert(b->n_nodes == 0);
}
-void marktree_free_node(mtnode_t *x)
+void marktree_free_subtree(MarkTree *b, MTNode *x)
{
if (x->level) {
for (int i = 0; i < x->n + 1; i++) {
- marktree_free_node(x->ptr[i]);
+ marktree_free_subtree(b, x->ptr[i]);
}
}
+ marktree_free_node(b, x);
+}
+
+static void marktree_free_node(MarkTree *b, MTNode *x)
+{
+ kvi_destroy(x->intersect);
xfree(x);
+ b->n_nodes--;
}
/// NB: caller must check not pair!
-void marktree_revise(MarkTree *b, MarkTreeIter *itr, uint8_t decor_level, mtkey_t key)
+void marktree_revise(MarkTree *b, MarkTreeIter *itr, uint8_t decor_level, MTKey key)
{
// TODO(bfredl): clean up this mess and re-instantiate &= and |= forms
// once we upgrade to a non-broken version of gcc in functionaltest-lua CI
@@ -578,49 +1152,108 @@ void marktree_revise(MarkTree *b, MarkTreeIter *itr, uint8_t decor_level, mtkey_
rawkey(itr).priority = key.priority;
}
+/// @param itr iterator is invalid after call
void marktree_move(MarkTree *b, MarkTreeIter *itr, int row, int col)
{
- mtkey_t key = rawkey(itr);
- // TODO(bfredl): optimize when moving a mark within a leaf without moving it
- // across neighbours!
- marktree_del_itr(b, itr, false);
- key.pos = (mtpos_t){ row, col };
+ MTKey key = rawkey(itr);
+ MTNode *x = itr->x;
+ if (!x->level) {
+ bool internal = false;
+ MTPos newpos = MTPos(row, col);
+ if (x->parent != NULL) {
+ // strictly _after_ key before `x`
+ // (not optimal when x is very first leaf of the entire tree, but that's fine)
+ if (pos_less(itr->pos, newpos)) {
+ relative(itr->pos, &newpos);
+
+ // strictly before the end of x. (this could be made sharper by
+ // finding the internal key just after x, but meh)
+ if (pos_less(newpos, x->key[x->n - 1].pos)) {
+ internal = true;
+ }
+ }
+ } else {
+ // tree is one node. newpos thus is already "relative" itr->pos
+ internal = true;
+ }
+
+ if (internal) {
+ key.pos = newpos;
+ bool match;
+ // tricky: could minimize movement in either direction better
+ int new_i = marktree_getp_aux(x, key, &match);
+ if (!match) {
+ new_i++;
+ }
+ if (new_i == itr->i || key_cmp(key, x->key[new_i]) == 0) {
+ x->key[itr->i].pos = newpos;
+ } else if (new_i < itr->i) {
+ memmove(&x->key[new_i + 1], &x->key[new_i], sizeof(MTKey) * (size_t)(itr->i - new_i));
+ x->key[new_i] = key;
+ } else if (new_i > itr->i) {
+ memmove(&x->key[itr->i], &x->key[itr->i + 1], sizeof(MTKey) * (size_t)(new_i - itr->i));
+ x->key[new_i] = key;
+ }
+ return;
+ }
+ }
+ uint64_t other = marktree_del_itr(b, itr, false);
+ key.pos = (MTPos){ row, col };
marktree_put_key(b, key);
- itr->node = NULL; // itr might become invalid by put
+
+ if (other) {
+ marktree_restore_pair(b, key);
+ }
+ itr->x = NULL; // itr might become invalid by put
+}
+
+void marktree_restore_pair(MarkTree *b, MTKey key)
+{
+ MarkTreeIter itr[1];
+ MarkTreeIter end_itr[1];
+ marktree_lookup(b, mt_lookup_key_side(key, false), itr);
+ marktree_lookup(b, mt_lookup_key_side(key, true), end_itr);
+ if (!itr->x || !end_itr->x) {
+ // this could happen if the other end is waiting to be restored later
+ // this function will be called again for the other end.
+ return;
+ }
+ rawkey(itr).flags &= (uint16_t) ~MT_FLAG_ORPHANED;
+ rawkey(end_itr).flags &= (uint16_t) ~MT_FLAG_ORPHANED;
+
+ marktree_intersect_pair(b, mt_lookup_key_side(key, false), itr, end_itr, false);
}
// itr functions
-// TODO(bfredl): static inline?
bool marktree_itr_get(MarkTree *b, int32_t row, int col, MarkTreeIter *itr)
{
- return marktree_itr_get_ext(b, (mtpos_t){ row, col },
- itr, false, false, NULL);
+ return marktree_itr_get_ext(b, MTPos(row, col), itr, false, false, NULL);
}
-bool marktree_itr_get_ext(MarkTree *b, mtpos_t p, MarkTreeIter *itr, bool last, bool gravity,
- mtpos_t *oldbase)
+bool marktree_itr_get_ext(MarkTree *b, MTPos p, MarkTreeIter *itr, bool last, bool gravity,
+ MTPos *oldbase)
{
if (b->n_keys == 0) {
- itr->node = NULL;
+ itr->x = NULL;
return false;
}
- mtkey_t k = { .pos = p, .flags = gravity ? MT_FLAG_RIGHT_GRAVITY : 0 };
+ MTKey k = { .pos = p, .flags = gravity ? MT_FLAG_RIGHT_GRAVITY : 0 };
if (last && !gravity) {
k.flags = MT_FLAG_LAST;
}
- itr->pos = (mtpos_t){ 0, 0 };
- itr->node = b->root;
+ itr->pos = (MTPos){ 0, 0 };
+ itr->x = b->root;
itr->lvl = 0;
if (oldbase) {
oldbase[itr->lvl] = itr->pos;
}
while (true) {
- itr->i = marktree_getp_aux(itr->node, k, 0) + 1;
+ itr->i = marktree_getp_aux(itr->x, k, 0) + 1;
- if (itr->node->level == 0) {
+ if (itr->x->level == 0) {
break;
}
@@ -628,10 +1261,10 @@ bool marktree_itr_get_ext(MarkTree *b, mtpos_t p, MarkTreeIter *itr, bool last,
itr->s[itr->lvl].oldcol = itr->pos.col;
if (itr->i > 0) {
- compose(&itr->pos, itr->node->key[itr->i - 1].pos);
- relative(itr->node->key[itr->i - 1].pos, &k.pos);
+ compose(&itr->pos, itr->x->key[itr->i - 1].pos);
+ relative(itr->x->key[itr->i - 1].pos, &k.pos);
}
- itr->node = itr->node->ptr[itr->i];
+ itr->x = itr->x->ptr[itr->i];
itr->lvl++;
if (oldbase) {
oldbase[itr->lvl] = itr->pos;
@@ -640,7 +1273,7 @@ bool marktree_itr_get_ext(MarkTree *b, mtpos_t p, MarkTreeIter *itr, bool last,
if (last) {
return marktree_itr_prev(b, itr);
- } else if (itr->i >= itr->node->n) {
+ } else if (itr->i >= itr->x->n) {
return marktree_itr_next(b, itr);
}
return true;
@@ -648,19 +1281,20 @@ bool marktree_itr_get_ext(MarkTree *b, mtpos_t p, MarkTreeIter *itr, bool last,
bool marktree_itr_first(MarkTree *b, MarkTreeIter *itr)
{
- itr->node = b->root;
if (b->n_keys == 0) {
+ itr->x = NULL;
return false;
}
+ itr->x = b->root;
itr->i = 0;
itr->lvl = 0;
- itr->pos = (mtpos_t){ 0, 0 };
- while (itr->node->level > 0) {
+ itr->pos = MTPos(0, 0);
+ while (itr->x->level > 0) {
itr->s[itr->lvl].i = 0;
itr->s[itr->lvl].oldcol = 0;
itr->lvl++;
- itr->node = itr->node->ptr[0];
+ itr->x = itr->x->ptr[0];
}
return true;
}
@@ -669,16 +1303,16 @@ bool marktree_itr_first(MarkTree *b, MarkTreeIter *itr)
int marktree_itr_last(MarkTree *b, MarkTreeIter *itr)
{
if (b->n_keys == 0) {
- itr->node = NULL;
+ itr->x = NULL;
return false;
}
- itr->pos = (mtpos_t){ 0, 0 };
- itr->node = b->root;
+ itr->pos = MTPos(0, 0);
+ itr->x = b->root;
itr->lvl = 0;
while (true) {
- itr->i = itr->node->n;
+ itr->i = itr->x->n;
- if (itr->node->level == 0) {
+ if (itr->x->level == 0) {
break;
}
@@ -686,63 +1320,71 @@ int marktree_itr_last(MarkTree *b, MarkTreeIter *itr)
itr->s[itr->lvl].oldcol = itr->pos.col;
assert(itr->i > 0);
- compose(&itr->pos, itr->node->key[itr->i - 1].pos);
+ compose(&itr->pos, itr->x->key[itr->i - 1].pos);
- itr->node = itr->node->ptr[itr->i];
+ itr->x = itr->x->ptr[itr->i];
itr->lvl++;
}
itr->i--;
return true;
}
-// TODO(bfredl): static inline
bool marktree_itr_next(MarkTree *b, MarkTreeIter *itr)
{
- return marktree_itr_next_skip(b, itr, false, NULL);
+ return marktree_itr_next_skip(b, itr, false, false, NULL);
}
-static bool marktree_itr_next_skip(MarkTree *b, MarkTreeIter *itr, bool skip, mtpos_t oldbase[])
+static bool marktree_itr_next_skip(MarkTree *b, MarkTreeIter *itr, bool skip, bool preload,
+ MTPos oldbase[])
{
- if (!itr->node) {
+ if (!itr->x) {
return false;
}
itr->i++;
- if (itr->node->level == 0 || skip) {
- if (itr->i < itr->node->n) {
+ if (itr->x->level == 0 || skip) {
+ if (preload && itr->x->level == 0 && skip) {
+ // skip rest of this leaf node
+ itr->i = itr->x->n;
+ } else if (itr->i < itr->x->n) {
// TODO(bfredl): this is the common case,
// and could be handled by inline wrapper
return true;
}
// we ran out of non-internal keys. Go up until we find an internal key
- while (itr->i >= itr->node->n) {
- itr->node = itr->node->parent;
- if (itr->node == NULL) {
+ while (itr->i >= itr->x->n) {
+ itr->x = itr->x->parent;
+ if (itr->x == NULL) {
return false;
}
itr->lvl--;
itr->i = itr->s[itr->lvl].i;
if (itr->i > 0) {
- itr->pos.row -= itr->node->key[itr->i - 1].pos.row;
+ itr->pos.row -= itr->x->key[itr->i - 1].pos.row;
itr->pos.col = itr->s[itr->lvl].oldcol;
}
}
} else {
// we stood at an "internal" key. Go down to the first non-internal
// key after it.
- while (itr->node->level > 0) {
+ while (itr->x->level > 0) {
// internal key, there is always a child after
if (itr->i > 0) {
itr->s[itr->lvl].oldcol = itr->pos.col;
- compose(&itr->pos, itr->node->key[itr->i - 1].pos);
+ compose(&itr->pos, itr->x->key[itr->i - 1].pos);
}
if (oldbase && itr->i == 0) {
oldbase[itr->lvl + 1] = oldbase[itr->lvl];
}
itr->s[itr->lvl].i = itr->i;
- assert(itr->node->ptr[itr->i]->parent == itr->node);
- itr->node = itr->node->ptr[itr->i];
- itr->i = 0;
+ assert(itr->x->ptr[itr->i]->parent == itr->x);
itr->lvl++;
+ itr->x = itr->x->ptr[itr->i];
+ if (preload && itr->x->level) {
+ itr->i = -1;
+ break;
+ } else {
+ itr->i = 0;
+ }
}
}
return true;
@@ -750,10 +1392,10 @@ static bool marktree_itr_next_skip(MarkTree *b, MarkTreeIter *itr, bool skip, mt
bool marktree_itr_prev(MarkTree *b, MarkTreeIter *itr)
{
- if (!itr->node) {
+ if (!itr->x) {
return false;
}
- if (itr->node->level == 0) {
+ if (itr->x->level == 0) {
itr->i--;
if (itr->i >= 0) {
// TODO(bfredl): this is the common case,
@@ -762,30 +1404,30 @@ bool marktree_itr_prev(MarkTree *b, MarkTreeIter *itr)
}
// we ran out of non-internal keys. Go up until we find a non-internal key
while (itr->i < 0) {
- itr->node = itr->node->parent;
- if (itr->node == NULL) {
+ itr->x = itr->x->parent;
+ if (itr->x == NULL) {
return false;
}
itr->lvl--;
itr->i = itr->s[itr->lvl].i - 1;
if (itr->i >= 0) {
- itr->pos.row -= itr->node->key[itr->i].pos.row;
+ itr->pos.row -= itr->x->key[itr->i].pos.row;
itr->pos.col = itr->s[itr->lvl].oldcol;
}
}
} else {
// we stood at an "internal" key. Go down to the last non-internal
// key before it.
- while (itr->node->level > 0) {
+ while (itr->x->level > 0) {
// internal key, there is always a child before
if (itr->i > 0) {
itr->s[itr->lvl].oldcol = itr->pos.col;
- compose(&itr->pos, itr->node->key[itr->i - 1].pos);
+ compose(&itr->pos, itr->x->key[itr->i - 1].pos);
}
itr->s[itr->lvl].i = itr->i;
- assert(itr->node->ptr[itr->i]->parent == itr->node);
- itr->node = itr->node->ptr[itr->i];
- itr->i = itr->node->n;
+ assert(itr->x->ptr[itr->i]->parent == itr->x);
+ itr->x = itr->x->ptr[itr->i];
+ itr->i = itr->x->n;
itr->lvl++;
}
itr->i--;
@@ -793,33 +1435,22 @@ bool marktree_itr_prev(MarkTree *b, MarkTreeIter *itr)
return true;
}
-void marktree_itr_rewind(MarkTree *b, MarkTreeIter *itr)
-{
- if (!itr->node) {
- return;
- }
- if (itr->node->level) {
- marktree_itr_prev(b, itr);
- }
- itr->i = 0;
-}
-
bool marktree_itr_node_done(MarkTreeIter *itr)
{
- return !itr->node || itr->i == itr->node->n - 1;
+ return !itr->x || itr->i == itr->x->n - 1;
}
-mtpos_t marktree_itr_pos(MarkTreeIter *itr)
+MTPos marktree_itr_pos(MarkTreeIter *itr)
{
- mtpos_t pos = rawkey(itr).pos;
+ MTPos pos = rawkey(itr).pos;
unrelative(itr->pos, &pos);
return pos;
}
-mtkey_t marktree_itr_current(MarkTreeIter *itr)
+MTKey marktree_itr_current(MarkTreeIter *itr)
{
- if (itr->node) {
- mtkey_t key = rawkey(itr);
+ if (itr->x) {
+ MTKey key = rawkey(itr);
key.pos = marktree_itr_pos(itr);
return key;
}
@@ -831,47 +1462,198 @@ static bool itr_eq(MarkTreeIter *itr1, MarkTreeIter *itr2)
return (&rawkey(itr1) == &rawkey(itr2));
}
-static void itr_swap(MarkTreeIter *itr1, MarkTreeIter *itr2)
+/// Get all marks which overlaps the position (row,col)
+///
+/// After calling this function, use marktree_itr_step_overlap to step through
+/// one overlapping mark at a time, until it returns false
+///
+/// NOTE: It's possible to get all marks which overlaps a region (row,col) to (row_end,col_end)
+/// To do this, first call marktree_itr_get_overlap with the start position and
+/// keep calling marktree_itr_step_overlap until it returns false.
+/// After this, as a second loop, keep calling the marktree_itr_next() until
+/// the iterator is invalid or reaches past (row_end, col_end). In this loop,
+/// consider all "start" marks (and unpaired marks if relevant), but skip over
+/// all "end" marks, using mt_end(mark).
+///
+/// @return false if we already know no marks can be found
+/// even if "true" the first call to marktree_itr_step_overlap
+/// could return false
+bool marktree_itr_get_overlap(MarkTree *b, int row, int col, MarkTreeIter *itr)
+{
+ if (b->n_keys == 0) {
+ itr->x = NULL;
+ return false;
+ }
+
+ itr->x = b->root;
+ itr->i = -1;
+ itr->lvl = 0;
+ itr->pos = MTPos(0, 0);
+ itr->intersect_pos = MTPos(row, col);
+ // intersect_pos but will be adjusted relative itr->x
+ itr->intersect_pos_x = MTPos(row, col);
+ itr->intersect_idx = 0;
+ return true;
+}
+
+static inline MTPair pair_from(MTKey start, MTKey end)
+{
+ return (MTPair){ .start = start, .end_pos = end.pos, .end_right_gravity = mt_right(end) };
+}
+
+/// Step through all overlapping pairs at a position.
+///
+/// This function must only be used with an iterator from |marktree_itr_step_overlap|
+///
+/// @return true if a valid pair was found (returned as `pair`)
+/// When all overlapping mark pairs have been found, false will be returned. `itr`
+/// is then valid as an ordinary iterator at the (row, col) position specified in
+/// marktree_itr_step_overlap
+bool marktree_itr_step_overlap(MarkTree *b, MarkTreeIter *itr, MTPair *pair)
+{
+ // phase one: we start at the root node and step inwards towards itr->intersect_pos
+ // (the position queried in marktree_itr_get_overlap)
+ //
+ // For each node (ancestor node to the node containing the sought position)
+ // we return all intersecting intervals, one at a time
+ while (itr->i == -1) {
+ if (itr->intersect_idx < kv_size(itr->x->intersect)) {
+ uint64_t id = kv_A(itr->x->intersect, itr->intersect_idx++);
+ *pair = pair_from(marktree_lookup(b, id, NULL),
+ marktree_lookup(b, id|MARKTREE_END_FLAG, NULL));
+ return true;
+ }
+
+ if (itr->x->level == 0) {
+ itr->s[itr->lvl].i = itr->i = 0;
+ break;
+ }
+
+ MTKey k = { .pos = itr->intersect_pos_x, .flags = 0 };
+ itr->i = marktree_getp_aux(itr->x, k, 0) + 1;
+
+ itr->s[itr->lvl].i = itr->i;
+ itr->s[itr->lvl].oldcol = itr->pos.col;
+
+ if (itr->i > 0) {
+ compose(&itr->pos, itr->x->key[itr->i - 1].pos);
+ relative(itr->x->key[itr->i - 1].pos, &itr->intersect_pos_x);
+ }
+ itr->x = itr->x->ptr[itr->i];
+ itr->lvl++;
+ itr->i = -1;
+ itr->intersect_idx = 0;
+ }
+
+ // phase two: we now need to handle the node found at itr->intersect_pos
+ // first consider all start nodes in the node before this position.
+ while (itr->i < itr->x->n && pos_less(rawkey(itr).pos, itr->intersect_pos_x)) {
+ MTKey k = itr->x->key[itr->i++];
+ itr->s[itr->lvl].i = itr->i;
+ if (mt_start(k)) {
+ MTKey end = marktree_lookup(b, mt_lookup_id(k.ns, k.id, true), NULL);
+ if (pos_less(end.pos, itr->intersect_pos)) {
+ continue;
+ }
+
+ unrelative(itr->pos, &k.pos);
+ *pair = pair_from(k, end);
+ return true; // it's a start!
+ }
+ }
+
+ // phase 2B: We also need to step to the end of this node and consider all end marks, which
+ // might end an interval overlapping itr->intersect_pos
+ while (itr->i < itr->x->n) {
+ MTKey k = itr->x->key[itr->i++];
+ if (mt_end(k)) {
+ uint64_t id = mt_lookup_id(k.ns, k.id, false);
+ if (id2node(b, id) == itr->x) {
+ continue;
+ }
+ unrelative(itr->pos, &k.pos);
+ MTKey start = marktree_lookup(b, id, NULL);
+ if (pos_less(itr->intersect_pos, start.pos)) {
+ continue;
+ }
+ *pair = pair_from(start, k);
+ return true; // end of a range which began before us!
+ }
+ }
+
+ // when returning false, get back to the queried position, to ensure the caller
+ // can keep using it as an ordinary iterator at the queried position. The docstring
+ // for marktree_itr_get_overlap explains how this is useful.
+ itr->i = itr->s[itr->lvl].i;
+ assert(itr->i >= 0);
+ if (itr->i >= itr->x->n) {
+ marktree_itr_next(b, itr);
+ }
+
+ // either on or after the intersected position, bail out
+ return false;
+}
+
+static void swap_keys(MarkTree *b, MarkTreeIter *itr1, MarkTreeIter *itr2, DamageList *damage)
{
- mtkey_t key1 = rawkey(itr1);
- mtkey_t key2 = rawkey(itr2);
+ if (itr1->x != itr2->x) {
+ if (mt_paired(rawkey(itr1))) {
+ kvi_push(*damage, ((Damage){ mt_lookup_key(rawkey(itr1)), itr1->x, itr2->x,
+ itr1->i, itr2->i }));
+ }
+ if (mt_paired(rawkey(itr2))) {
+ kvi_push(*damage, ((Damage){ mt_lookup_key(rawkey(itr2)), itr2->x, itr1->x,
+ itr2->i, itr1->i }));
+ }
+ }
+
+ MTKey key1 = rawkey(itr1);
+ MTKey key2 = rawkey(itr2);
rawkey(itr1) = key2;
rawkey(itr1).pos = key1.pos;
rawkey(itr2) = key1;
rawkey(itr2).pos = key2.pos;
+ refkey(b, itr1->x, itr1->i);
+ refkey(b, itr2->x, itr2->i);
+}
+
+static int damage_cmp(const void *s1, const void *s2)
+{
+ Damage *d1 = (Damage *)s1, *d2 = (Damage *)s2;
+ assert(d1->id != d2->id);
+ return d1->id > d2->id;
}
bool marktree_splice(MarkTree *b, int32_t start_line, int start_col, int old_extent_line,
int old_extent_col, int new_extent_line, int new_extent_col)
{
- mtpos_t start = { start_line, start_col };
- mtpos_t old_extent = { old_extent_line, old_extent_col };
- mtpos_t new_extent = { new_extent_line, new_extent_col };
+ MTPos start = { start_line, start_col };
+ MTPos old_extent = { old_extent_line, old_extent_col };
+ MTPos new_extent = { new_extent_line, new_extent_col };
bool may_delete = (old_extent.row != 0 || old_extent.col != 0);
bool same_line = old_extent.row == 0 && new_extent.row == 0;
unrelative(start, &old_extent);
unrelative(start, &new_extent);
- MarkTreeIter itr[1] = { 0 };
- MarkTreeIter enditr[1] = { 0 };
+ MarkTreeIter itr[1] = { 0 }, enditr[1] = { 0 };
- mtpos_t oldbase[MT_MAX_DEPTH] = { 0 };
+ MTPos oldbase[MT_MAX_DEPTH] = { 0 };
marktree_itr_get_ext(b, start, itr, false, true, oldbase);
- if (!itr->node) {
+ if (!itr->x) {
// den e FÄRDIG
return false;
}
- mtpos_t delta = { new_extent.row - old_extent.row,
- new_extent.col - old_extent.col };
+ MTPos delta = { new_extent.row - old_extent.row,
+ new_extent.col - old_extent.col };
if (may_delete) {
- mtpos_t ipos = marktree_itr_pos(itr);
+ MTPos ipos = marktree_itr_pos(itr);
if (!pos_leq(old_extent, ipos)
|| (old_extent.row == ipos.row && old_extent.col == ipos.col
&& !mt_right(rawkey(itr)))) {
marktree_itr_get_ext(b, old_extent, enditr, true, true, NULL);
- assert(enditr->node);
+ assert(enditr->x);
// "assert" (itr <= enditr)
} else {
may_delete = false;
@@ -880,14 +1662,16 @@ bool marktree_splice(MarkTree *b, int32_t start_line, int start_col, int old_ext
bool past_right = false;
bool moved = false;
+ DamageList damage;
+ kvi_init(damage);
// Follow the general strategy of messing things up and fix them later
// "oldbase" carries the information needed to calculate old position of
// children.
if (may_delete) {
- while (itr->node && !past_right) {
- mtpos_t loc_start = start;
- mtpos_t loc_old = old_extent;
+ while (itr->x && !past_right) {
+ MTPos loc_start = start;
+ MTPos loc_old = old_extent;
relative(itr->pos, &loc_start);
relative(oldbase[itr->lvl], &loc_old);
@@ -905,9 +1689,7 @@ continue_same_node:
marktree_itr_prev(b, enditr);
}
if (!mt_right(rawkey(enditr))) {
- itr_swap(itr, enditr);
- refkey(b, itr->node, itr->i);
- refkey(b, enditr->node, enditr->i);
+ swap_keys(b, itr, enditr, &damage);
} else {
past_right = true; // NOLINT
(void)past_right;
@@ -921,14 +1703,14 @@ continue_same_node:
}
moved = true;
- if (itr->node->level) {
+ if (itr->x->level) {
oldbase[itr->lvl + 1] = rawkey(itr).pos;
unrelative(oldbase[itr->lvl], &oldbase[itr->lvl + 1]);
rawkey(itr).pos = loc_start;
- marktree_itr_next_skip(b, itr, false, oldbase);
+ marktree_itr_next_skip(b, itr, false, false, oldbase);
} else {
rawkey(itr).pos = loc_start;
- if (itr->i < itr->node->n - 1) {
+ if (itr->i < itr->x->n - 1) {
itr->i++;
if (!past_right) {
goto continue_same_node;
@@ -938,10 +1720,10 @@ continue_same_node:
}
}
}
- while (itr->node) {
- mtpos_t loc_new = new_extent;
+ while (itr->x) {
+ MTPos loc_new = new_extent;
relative(itr->pos, &loc_new);
- mtpos_t limit = old_extent;
+ MTPos limit = old_extent;
relative(oldbase[itr->lvl], &limit);
@@ -951,16 +1733,16 @@ past_continue_same_node:
break;
}
- mtpos_t oldpos = rawkey(itr).pos;
+ MTPos oldpos = rawkey(itr).pos;
rawkey(itr).pos = loc_new;
moved = true;
- if (itr->node->level) {
+ if (itr->x->level) {
oldbase[itr->lvl + 1] = oldpos;
unrelative(oldbase[itr->lvl], &oldbase[itr->lvl + 1]);
- marktree_itr_next_skip(b, itr, false, oldbase);
+ marktree_itr_next_skip(b, itr, false, false, oldbase);
} else {
- if (itr->i < itr->node->n - 1) {
+ if (itr->i < itr->x->n - 1) {
itr->i++;
goto past_continue_same_node;
} else {
@@ -970,7 +1752,7 @@ past_continue_same_node:
}
}
- while (itr->node) {
+ while (itr->x) {
unrelative(oldbase[itr->lvl], &rawkey(itr).pos);
int realrow = rawkey(itr).pos.row;
assert(realrow >= old_extent.row);
@@ -978,7 +1760,6 @@ past_continue_same_node:
if (realrow == old_extent.row) {
if (delta.col) {
rawkey(itr).pos.col += delta.col;
- moved = true;
}
} else {
if (same_line) {
@@ -994,22 +1775,78 @@ past_continue_same_node:
if (done) {
break;
}
- marktree_itr_next_skip(b, itr, true, NULL);
+ marktree_itr_next_skip(b, itr, true, false, NULL);
+ }
+
+ if (kv_size(damage)) {
+ // TODO(bfredl): a full sort is not really needed. we just need a "start" node to find
+ // its corresponding "end" node. Set up some dedicated hash for this later (c.f. the
+ // "grow only" variant of khash_t branch)
+ qsort((void *)&kv_A(damage, 0), kv_size(damage), sizeof(kv_A(damage, 0)),
+ damage_cmp);
+
+ for (size_t i = 0; i < kv_size(damage); i++) {
+ Damage d = kv_A(damage, i);
+ if (!(d.id & MARKTREE_END_FLAG)) { // start
+ if (i + 1 < kv_size(damage) && kv_A(damage, i + 1).id == (d.id | MARKTREE_END_FLAG)) {
+ Damage d2 = kv_A(damage, i + 1);
+
+ // pair
+ marktree_itr_set_node(b, itr, d.old, d.old_i);
+ marktree_itr_set_node(b, enditr, d2.old, d2.old_i);
+ marktree_intersect_pair(b, d.id, itr, enditr, true);
+ marktree_itr_set_node(b, itr, d.new, d.new_i);
+ marktree_itr_set_node(b, enditr, d2.new, d2.new_i);
+ marktree_intersect_pair(b, d.id, itr, enditr, false);
+
+ i++; // consume two items
+ continue;
+ }
+
+ // d is lone start, end didn't move
+ MarkTreeIter endpos[1];
+ marktree_lookup(b, d.id | MARKTREE_END_FLAG, endpos);
+ if (endpos->x) {
+ marktree_itr_set_node(b, itr, d.old, d.old_i);
+ *enditr = *endpos;
+ marktree_intersect_pair(b, d.id, itr, enditr, true);
+ marktree_itr_set_node(b, itr, d.new, d.new_i);
+ *enditr = *endpos;
+ marktree_intersect_pair(b, d.id, itr, enditr, false);
+ }
+ } else {
+ // d is lone end, start didn't move
+ MarkTreeIter startpos[1];
+ uint64_t start_id = d.id & ~MARKTREE_END_FLAG;
+
+ marktree_lookup(b, start_id, startpos);
+ if (startpos->x) {
+ *itr = *startpos;
+ marktree_itr_set_node(b, enditr, d.old, d.old_i);
+ marktree_intersect_pair(b, start_id, itr, enditr, true);
+ *itr = *startpos;
+ marktree_itr_set_node(b, enditr, d.new, d.new_i);
+ marktree_intersect_pair(b, start_id, itr, enditr, false);
+ }
+ }
+ }
}
+ kvi_destroy(damage);
+
return moved;
}
void marktree_move_region(MarkTree *b, int start_row, colnr_T start_col, int extent_row,
colnr_T extent_col, int new_row, colnr_T new_col)
{
- mtpos_t start = { start_row, start_col }, size = { extent_row, extent_col };
- mtpos_t end = size;
+ MTPos start = { start_row, start_col }, size = { extent_row, extent_col };
+ MTPos end = size;
unrelative(start, &end);
MarkTreeIter itr[1] = { 0 };
marktree_itr_get_ext(b, start, itr, false, true, NULL);
- kvec_t(mtkey_t) saved = KV_INITIAL_VALUE;
- while (itr->node) {
- mtkey_t k = marktree_itr_current(itr);
+ kvec_t(MTKey) saved = KV_INITIAL_VALUE;
+ while (itr->x) {
+ MTKey k = marktree_itr_current(itr);
if (!pos_leq(k.pos, end) || (k.pos.row == end.row && k.pos.col == end.col
&& mt_right(k))) {
break;
@@ -1020,57 +1857,101 @@ void marktree_move_region(MarkTree *b, int start_row, colnr_T start_col, int ext
}
marktree_splice(b, start.row, start.col, size.row, size.col, 0, 0);
- mtpos_t new = { new_row, new_col };
+ MTPos new = { new_row, new_col };
marktree_splice(b, new.row, new.col,
0, 0, size.row, size.col);
for (size_t i = 0; i < kv_size(saved); i++) {
- mtkey_t item = kv_A(saved, i);
+ MTKey item = kv_A(saved, i);
unrelative(new, &item.pos);
marktree_put_key(b, item);
+ if (mt_paired(item)) {
+ // other end might be later in `saved`, this will safely bail out then
+ marktree_restore_pair(b, item);
+ }
}
kv_destroy(saved);
}
/// @param itr OPTIONAL. set itr to pos.
-mtkey_t marktree_lookup_ns(MarkTree *b, uint32_t ns, uint32_t id, bool end, MarkTreeIter *itr)
+MTKey marktree_lookup_ns(MarkTree *b, uint32_t ns, uint32_t id, bool end, MarkTreeIter *itr)
{
return marktree_lookup(b, mt_lookup_id(ns, id, end), itr);
}
+static uint64_t pseudo_index(MTNode *x, int i)
+{
+ int off = MT_LOG2_BRANCH * x->level;
+ uint64_t index = 0;
+
+ while (x) {
+ index |= (uint64_t)(i + 1) << off;
+ off += MT_LOG2_BRANCH;
+ i = x->p_idx;
+ x = x->parent;
+ }
+
+ return index;
+}
+
+/// @param itr OPTIONAL. set itr to pos.
+/// if sloppy, two keys at the same _leaf_ node has the same index
+static uint64_t pseudo_index_for_id(MarkTree *b, uint64_t id, bool sloppy)
+{
+ MTNode *n = id2node(b, id);
+ if (n == NULL) {
+ return 0; // a valid pseudo-index is never zero!
+ }
+
+ int i = 0;
+ if (n->level || !sloppy) {
+ for (i = 0; i < n->n; i++) {
+ if (mt_lookup_key(n->key[i]) == id) {
+ break;
+ }
+ }
+ assert(i < n->n);
+ if (n->level) {
+ i += 1; // internal key i comes after ptr[i]
+ }
+ }
+
+ return pseudo_index(n, i);
+}
+
/// @param itr OPTIONAL. set itr to pos.
-mtkey_t marktree_lookup(MarkTree *b, uint64_t id, MarkTreeIter *itr)
+MTKey marktree_lookup(MarkTree *b, uint64_t id, MarkTreeIter *itr)
{
- mtnode_t *n = pmap_get(uint64_t)(b->id2node, id);
+ MTNode *n = id2node(b, id);
if (n == NULL) {
if (itr) {
- itr->node = NULL;
+ itr->x = NULL;
}
return MT_INVALID_KEY;
}
int i = 0;
for (i = 0; i < n->n; i++) {
if (mt_lookup_key(n->key[i]) == id) {
- goto found;
+ return marktree_itr_set_node(b, itr, n, i);
}
}
+
abort();
-found: {}
- mtkey_t key = n->key[i];
+}
+
+MTKey marktree_itr_set_node(MarkTree *b, MarkTreeIter *itr, MTNode *n, int i)
+{
+ MTKey key = n->key[i];
if (itr) {
itr->i = i;
- itr->node = n;
+ itr->x = n;
itr->lvl = b->root->level - n->level;
}
while (n->parent != NULL) {
- mtnode_t *p = n->parent;
- for (i = 0; i < p->n + 1; i++) {
- if (p->ptr[i] == n) {
- goto found_node;
- }
- }
- abort();
-found_node:
+ MTNode *p = n->parent;
+ i = n->p_idx;
+ assert(p->ptr[i] == n);
+
if (itr) {
itr->s[b->root->level - p->level].i = i;
}
@@ -1085,14 +1966,14 @@ found_node:
return key;
}
-mtpos_t marktree_get_altpos(MarkTree *b, mtkey_t mark, MarkTreeIter *itr)
+MTPos marktree_get_altpos(MarkTree *b, MTKey mark, MarkTreeIter *itr)
{
return marktree_get_alt(b, mark, itr).pos;
}
-mtkey_t marktree_get_alt(MarkTree *b, mtkey_t mark, MarkTreeIter *itr)
+MTKey marktree_get_alt(MarkTree *b, MTKey mark, MarkTreeIter *itr)
{
- mtkey_t end = MT_INVALID_KEY;
+ MTKey end = MT_INVALID_KEY;
if (mt_paired(mark)) {
end = marktree_lookup_ns(b, mark.ns, mark.id, !mt_end(mark), itr);
}
@@ -1101,8 +1982,8 @@ mtkey_t marktree_get_alt(MarkTree *b, mtkey_t mark, MarkTreeIter *itr)
static void marktree_itr_fix_pos(MarkTree *b, MarkTreeIter *itr)
{
- itr->pos = (mtpos_t){ 0, 0 };
- mtnode_t *x = b->root;
+ itr->pos = (MTPos){ 0, 0 };
+ MTNode *x = b->root;
for (int lvl = 0; lvl < itr->lvl; lvl++) {
itr->s[lvl].oldcol = itr->pos.col;
int i = itr->s[lvl].i;
@@ -1112,23 +1993,36 @@ static void marktree_itr_fix_pos(MarkTree *b, MarkTreeIter *itr)
assert(x->level);
x = x->ptr[i];
}
- assert(x == itr->node);
+ assert(x == itr->x);
}
// for unit test
-void marktree_put_test(MarkTree *b, uint32_t id, int row, int col, bool right_gravity)
+void marktree_put_test(MarkTree *b, uint32_t ns, uint32_t id, int row, int col, bool right_gravity,
+ int end_row, int end_col, bool end_right)
{
- mtkey_t key = { { row, col }, UINT32_MAX, id, 0,
- mt_flags(right_gravity, 0), 0, NULL };
- marktree_put(b, key, -1, -1, false);
+ MTKey key = { { row, col }, ns, id, 0,
+ mt_flags(right_gravity, 0), 0, NULL };
+ marktree_put(b, key, end_row, end_col, end_right);
}
// for unit test
-bool mt_right_test(mtkey_t key)
+bool mt_right_test(MTKey key)
{
return mt_right(key);
}
+// for unit test
+void marktree_del_pair_test(MarkTree *b, uint32_t ns, uint32_t id)
+{
+ MarkTreeIter itr[1];
+ marktree_lookup_ns(b, ns, id, false, itr);
+
+ uint64_t other = marktree_del_itr(b, itr, false);
+ assert(other);
+ marktree_lookup(b, other, itr);
+ marktree_del_itr(b, itr, false);
+}
+
void marktree_check(MarkTree *b)
{
#ifndef NDEBUG
@@ -1139,7 +2033,7 @@ void marktree_check(MarkTree *b)
return;
}
- mtpos_t dummy;
+ MTPos dummy;
bool last_right = false;
size_t nkeys = marktree_check_node(b, b->root, &dummy, &last_right);
assert(b->n_keys == nkeys);
@@ -1151,7 +2045,7 @@ void marktree_check(MarkTree *b)
}
#ifndef NDEBUG
-size_t marktree_check_node(MarkTree *b, mtnode_t *x, mtpos_t *last, bool *last_right)
+size_t marktree_check_node(MarkTree *b, MTNode *x, MTPos *last, bool *last_right)
{
assert(x->n <= 2 * T - 1);
// TODO(bfredl): too strict if checking "in repair" post-delete tree.
@@ -1162,7 +2056,7 @@ size_t marktree_check_node(MarkTree *b, mtnode_t *x, mtpos_t *last, bool *last_r
if (x->level) {
n_keys += marktree_check_node(b, x->ptr[i], last, last_right);
} else {
- *last = (mtpos_t) { 0, 0 };
+ *last = (MTPos) { 0, 0 };
}
if (i > 0) {
unrelative(x->key[i - 1].pos, last);
@@ -1182,6 +2076,7 @@ size_t marktree_check_node(MarkTree *b, mtnode_t *x, mtpos_t *last, bool *last_r
for (int i = 0; i < x->n + 1; i++) {
assert(x->ptr[i]->parent == x);
+ assert(x->ptr[i]->p_idx == i);
assert(x->ptr[i]->level == x->level - 1);
// PARANOIA: check no double node ref
for (int j = 0; j < i; j++) {
@@ -1193,34 +2088,221 @@ size_t marktree_check_node(MarkTree *b, mtnode_t *x, mtpos_t *last, bool *last_r
}
return n_keys;
}
+
+bool marktree_check_intersections(MarkTree *b)
+{
+ if (!b->root) {
+ return true;
+ }
+ PMap(ptr_t) checked = MAP_INIT;
+
+ // 1. move x->intersect to checked[x] and reinit x->intersect
+ mt_recurse_nodes(b->root, &checked);
+
+ // 2. iterate over all marks. for each START mark of a pair,
+ // intersect the nodes between the pair
+ MarkTreeIter itr[1];
+ marktree_itr_first(b, itr);
+ while (true) {
+ MTKey mark = marktree_itr_current(itr);
+ if (mark.pos.row < 0) {
+ break;
+ }
+
+ if (mt_start(mark)) {
+ MarkTreeIter start_itr[1];
+ MarkTreeIter end_itr[1];
+ uint64_t end_id = mt_lookup_id(mark.ns, mark.id, true);
+ MTKey k = marktree_lookup(b, end_id, end_itr);
+ if (k.pos.row >= 0) {
+ *start_itr = *itr;
+ marktree_intersect_pair(b, mt_lookup_key(mark), start_itr, end_itr, false);
+ }
+ }
+
+ marktree_itr_next(b, itr);
+ }
+
+ // 3. for each node check if the recreated intersection
+ // matches the old checked[x] intersection.
+ bool status = mt_recurse_nodes_compare(b->root, &checked);
+
+ uint64_t *val;
+ map_foreach_value(&checked, val, {
+ xfree(val);
+ });
+ map_destroy(ptr_t, &checked);
+
+ return status;
+}
+
+void mt_recurse_nodes(MTNode *x, PMap(ptr_t) *checked)
+{
+ if (kv_size(x->intersect)) {
+ kvi_push(x->intersect, (uint64_t)-1); // sentinel
+ uint64_t *val;
+ if (x->intersect.items == x->intersect.init_array) {
+ val = xmemdup(x->intersect.items, x->intersect.size * sizeof(*x->intersect.items));
+ } else {
+ val = x->intersect.items;
+ }
+ pmap_put(ptr_t)(checked, x, val);
+ kvi_init(x->intersect);
+ }
+
+ if (x->level) {
+ for (int i = 0; i < x->n + 1; i++) {
+ mt_recurse_nodes(x->ptr[i], checked);
+ }
+ }
+}
+
+bool mt_recurse_nodes_compare(MTNode *x, PMap(ptr_t) *checked)
+{
+ uint64_t *ref = pmap_get(ptr_t)(checked, x);
+ if (ref != NULL) {
+ for (size_t i = 0;; i++) {
+ if (ref[i] == (uint64_t)-1) {
+ if (i != kv_size(x->intersect)) {
+ return false;
+ }
+
+ break;
+ } else {
+ if (kv_size(x->intersect) <= i || ref[i] != kv_A(x->intersect, i)) {
+ return false;
+ }
+ }
+ }
+ } else {
+ if (kv_size(x->intersect)) {
+ return false;
+ }
+ }
+
+ if (x->level) {
+ for (int i = 0; i < x->n + 1; i++) {
+ if (!mt_recurse_nodes_compare(x->ptr[i], checked)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
#endif
-char *mt_inspect_rec(MarkTree *b)
+// TODO(bfredl): kv_print
+#define GA_PUT(x) ga_concat(ga, (char *)(x))
+#define GA_PRINT(fmt, ...) snprintf(buf, sizeof(buf), fmt, __VA_ARGS__); \
+ GA_PUT(buf);
+
+String mt_inspect(MarkTree *b, bool keys, bool dot)
{
- garray_T ga;
- ga_init(&ga, (int)sizeof(char), 80);
- mtpos_t p = { 0, 0 };
- mt_inspect_node(b, &ga, b->root, p);
- return ga.ga_data;
+ garray_T ga[1];
+ ga_init(ga, (int)sizeof(char), 80);
+ MTPos p = { 0, 0 };
+ if (b->root) {
+ if (dot) {
+ GA_PUT("digraph D {\n\n");
+ mt_inspect_dotfile_node(b, ga, b->root, p, NULL);
+ GA_PUT("\n}");
+ } else {
+ mt_inspect_node(b, ga, keys, b->root, p);
+ }
+ }
+ return ga_take_string(ga);
}
-void mt_inspect_node(MarkTree *b, garray_T *ga, mtnode_t *n, mtpos_t off)
+void mt_inspect_node(MarkTree *b, garray_T *ga, bool keys, MTNode *n, MTPos off)
{
static char buf[1024];
- ga_concat(ga, "[");
+ GA_PUT("[");
+ if (keys && kv_size(n->intersect)) {
+ for (size_t i = 0; i < kv_size(n->intersect); i++) {
+ GA_PUT(i == 0 ? "{" : ";");
+ // GA_PRINT("%"PRIu64, kv_A(n->intersect, i));
+ GA_PRINT("%" PRIu64, mt_dbg_id(kv_A(n->intersect, i)));
+ }
+ GA_PUT("},");
+ }
if (n->level) {
- mt_inspect_node(b, ga, n->ptr[0], off);
+ mt_inspect_node(b, ga, keys, n->ptr[0], off);
}
for (int i = 0; i < n->n; i++) {
- mtpos_t p = n->key[i].pos;
+ MTPos p = n->key[i].pos;
unrelative(off, &p);
- snprintf(buf, sizeof(buf), "%d/%d", p.row, p.col);
- ga_concat(ga, buf);
+ GA_PRINT("%d/%d", p.row, p.col);
+ if (keys) {
+ MTKey key = n->key[i];
+ GA_PUT(":");
+ if (mt_start(key)) {
+ GA_PUT("<");
+ }
+ // GA_PRINT("%"PRIu64, mt_lookup_id(key.ns, key.id, false));
+ GA_PRINT("%" PRIu32, key.id);
+ if (mt_end(key)) {
+ GA_PUT(">");
+ }
+ }
if (n->level) {
- mt_inspect_node(b, ga, n->ptr[i + 1], p);
+ mt_inspect_node(b, ga, keys, n->ptr[i + 1], p);
} else {
ga_concat(ga, ",");
}
}
ga_concat(ga, "]");
}
+
+void mt_inspect_dotfile_node(MarkTree *b, garray_T *ga, MTNode *n, MTPos off, char *parent)
+{
+ static char buf[1024];
+ char namebuf[64];
+ if (parent != NULL) {
+ snprintf(namebuf, sizeof namebuf, "%s_%c%d", parent, 'a' + n->level, n->p_idx);
+ } else {
+ snprintf(namebuf, sizeof namebuf, "Node");
+ }
+
+ GA_PRINT(" %s[shape=plaintext, label=<\n", namebuf);
+ GA_PUT(" <table border='0' cellborder='1' cellspacing='0'>\n");
+ if (kv_size(n->intersect)) {
+ GA_PUT(" <tr><td>");
+ for (size_t i = 0; i < kv_size(n->intersect); i++) {
+ if (i > 0) {
+ GA_PUT(", ");
+ }
+ GA_PRINT("%" PRIu64, mt_dbg_id(kv_A(n->intersect, i)));
+ }
+ GA_PUT("</td></tr>\n");
+ }
+
+ GA_PUT(" <tr><td>");
+ for (int i = 0; i < n->n; i++) {
+ MTKey k = n->key[i];
+ if (i > 0) {
+ GA_PUT(", ");
+ }
+ GA_PRINT("%d", k.id);
+ if (mt_paired(k)) {
+ GA_PUT(mt_end(k) ? "e" : "s");
+ }
+ }
+ GA_PUT("</td></tr>\n");
+ GA_PUT(" </table>\n");
+ GA_PUT(">];\n");
+ if (parent) {
+ GA_PRINT(" %s -> %s\n", parent, namebuf);
+ }
+ if (n->level) {
+ mt_inspect_dotfile_node(b, ga, n->ptr[0], off, namebuf);
+ }
+ for (int i = 0; i < n->n; i++) {
+ MTPos p = n->key[i].pos;
+ unrelative(off, &p);
+ if (n->level) {
+ mt_inspect_dotfile_node(b, ga, n->ptr[i + 1], p, namebuf);
+ }
+ }
+}
diff --git a/src/nvim/marktree.h b/src/nvim/marktree.h
index cd56115b58..f53a54f3cc 100644
--- a/src/nvim/marktree.h
+++ b/src/nvim/marktree.h
@@ -6,29 +6,37 @@
#include <stddef.h>
#include <stdint.h>
+#include "klib/kvec.h"
#include "nvim/assert.h"
#include "nvim/garray.h"
#include "nvim/map.h"
#include "nvim/pos.h"
#include "nvim/types.h"
+// only for debug functions:
+#include "api/private/defs.h"
+
struct mtnode_s;
#define MT_MAX_DEPTH 20
#define MT_BRANCH_FACTOR 10
+// note max branch is actually 2*MT_BRANCH_FACTOR
+// and strictly this is ceil(log2(2*MT_BRANCH_FACTOR + 1))
+// as we need a pseudo-index for "right before this node"
+#define MT_LOG2_BRANCH 5
typedef struct {
int32_t row;
int32_t col;
-} mtpos_t;
-#define mtpos_t(r, c) ((mtpos_t){ .row = (r), .col = (c) })
+} MTPos;
+#define MTPos(r, c) ((MTPos){ .row = (r), .col = (c) })
-typedef struct mtnode_s mtnode_t;
+typedef struct mtnode_s MTNode;
typedef struct {
- mtpos_t pos;
+ MTPos pos;
int lvl;
- mtnode_t *node;
+ MTNode *x;
int i;
struct {
int oldcol;
@@ -36,33 +44,43 @@ typedef struct {
} s[MT_MAX_DEPTH];
size_t intersect_idx;
- mtpos_t intersect_pos;
+ MTPos intersect_pos;
+ MTPos intersect_pos_x;
} MarkTreeIter;
-#define marktree_itr_valid(itr) ((itr)->node != NULL)
+#define marktree_itr_valid(itr) ((itr)->x != NULL)
// Internal storage
//
// NB: actual marks have flags > 0, so we can use (row,col,0) pseudo-key for
// "space before (row,col)"
typedef struct {
- mtpos_t pos;
+ MTPos pos;
uint32_t ns;
uint32_t id;
int32_t hl_id;
uint16_t flags;
uint16_t priority;
Decoration *decor_full;
-} mtkey_t;
-#define MT_INVALID_KEY (mtkey_t) { { -1, -1 }, 0, 0, 0, 0, 0, NULL }
+} MTKey;
+
+typedef struct {
+ MTKey start;
+ MTPos end_pos;
+ bool end_right_gravity;
+} MTPair;
+
+#define MT_INVALID_KEY (MTKey) { { -1, -1 }, 0, 0, 0, 0, 0, NULL }
#define MT_FLAG_REAL (((uint16_t)1) << 0)
#define MT_FLAG_END (((uint16_t)1) << 1)
#define MT_FLAG_PAIRED (((uint16_t)1) << 2)
-#define MT_FLAG_HL_EOL (((uint16_t)1) << 3)
+// orphaned: the other side of this paired mark was deleted. this mark must be deleted very soon!
+#define MT_FLAG_ORPHANED (((uint16_t)1) << 3)
+#define MT_FLAG_HL_EOL (((uint16_t)1) << 4)
#define DECOR_LEVELS 4
-#define MT_FLAG_DECOR_OFFSET 4
+#define MT_FLAG_DECOR_OFFSET 5
#define MT_FLAG_DECOR_MASK (((uint16_t)(DECOR_LEVELS - 1)) << MT_FLAG_DECOR_OFFSET)
// next flag is (((uint16_t)1) << 6)
@@ -73,39 +91,44 @@ typedef struct {
#define MT_FLAG_EXTERNAL_MASK (MT_FLAG_DECOR_MASK | MT_FLAG_RIGHT_GRAVITY | MT_FLAG_HL_EOL)
-#define MARKTREE_END_FLAG (((uint64_t)1) << 63)
+// this is defined so that start and end of the same range have adjacent ids
+#define MARKTREE_END_FLAG ((uint64_t)1)
static inline uint64_t mt_lookup_id(uint32_t ns, uint32_t id, bool enda)
{
- return (uint64_t)ns << 32 | id | (enda ? MARKTREE_END_FLAG : 0);
+ return (uint64_t)ns << 33 | (id <<1) | (enda ? MARKTREE_END_FLAG : 0);
}
-#undef MARKTREE_END_FLAG
-static inline uint64_t mt_lookup_key(mtkey_t key)
+static inline uint64_t mt_lookup_key_side(MTKey key, bool end)
+{
+ return mt_lookup_id(key.ns, key.id, end);
+}
+
+static inline uint64_t mt_lookup_key(MTKey key)
{
return mt_lookup_id(key.ns, key.id, key.flags & MT_FLAG_END);
}
-static inline bool mt_paired(mtkey_t key)
+static inline bool mt_paired(MTKey key)
{
return key.flags & MT_FLAG_PAIRED;
}
-static inline bool mt_end(mtkey_t key)
+static inline bool mt_end(MTKey key)
{
return key.flags & MT_FLAG_END;
}
-static inline bool mt_start(mtkey_t key)
+static inline bool mt_start(MTKey key)
{
return mt_paired(key) && !mt_end(key);
}
-static inline bool mt_right(mtkey_t key)
+static inline bool mt_right(MTKey key)
{
return key.flags & MT_FLAG_RIGHT_GRAVITY;
}
-static inline uint8_t marktree_decor_level(mtkey_t key)
+static inline uint8_t marktree_decor_level(MTKey key)
{
return (uint8_t)((key.flags&MT_FLAG_DECOR_MASK) >> MT_FLAG_DECOR_OFFSET);
}
@@ -117,18 +140,27 @@ static inline uint16_t mt_flags(bool right_gravity, uint8_t decor_level)
| (decor_level << MT_FLAG_DECOR_OFFSET));
}
+typedef kvec_withinit_t(uint64_t, 4) Intersection;
+
struct mtnode_s {
int32_t n;
- int32_t level;
+ int16_t level;
+ int16_t p_idx; // index in parent
+ Intersection intersect;
// TODO(bfredl): we could consider having a only-sometimes-valid
// index into parent for faster "cached" lookup.
- mtnode_t *parent;
- mtkey_t key[2 * MT_BRANCH_FACTOR - 1];
- mtnode_t *ptr[];
+ MTNode *parent;
+ MTKey key[2 * MT_BRANCH_FACTOR - 1];
+ MTNode *ptr[];
};
+static inline uint64_t mt_dbg_id(uint64_t id)
+{
+ return (id>>1)&0xffffffff;
+}
+
typedef struct {
- mtnode_t *root;
+ MTNode *root;
size_t n_keys, n_nodes;
// TODO(bfredl): the pointer to node could be part of the larger
// Map(uint64_t, ExtmarkItem) essentially;
diff --git a/src/nvim/mbyte.c b/src/nvim/mbyte.c
index fd9efb1387..6182646fe7 100644
--- a/src/nvim/mbyte.c
+++ b/src/nvim/mbyte.c
@@ -1648,7 +1648,7 @@ bool utf_allow_break_before(int cc)
0x2021, // ‡ double dagger
0x2026, // … horizontal ellipsis
0x2030, // ‰ per mille sign
- 0x2031, // ‱ per then thousand sign
+ 0x2031, // ‱ per the thousand sign
0x203c, // ‼ double exclamation mark
0x2047, // ⁇ double question mark
0x2048, // ⁈ question exclamation mark
diff --git a/src/nvim/memfile.c b/src/nvim/memfile.c
index 333ff75f7b..6722d6bd8a 100644
--- a/src/nvim/memfile.c
+++ b/src/nvim/memfile.c
@@ -101,11 +101,9 @@ memfile_T *mf_open(char *fname, int flags)
}
mfp->mf_free_first = NULL; // free list is empty
- mfp->mf_used_first = NULL; // used list is empty
- mfp->mf_used_last = NULL;
mfp->mf_dirty = MF_DIRTY_NO;
- mf_hash_init(&mfp->mf_hash);
- mf_hash_init(&mfp->mf_trans);
+ mfp->mf_hash = (PMap(int64_t)) MAP_INIT;
+ mfp->mf_trans = (Map(int64_t, int64_t)) MAP_INIT;
mfp->mf_page_size = MEMFILE_PAGE_SIZE;
// Try to set the page size equal to device's block size. Speeds up I/O a lot.
@@ -182,15 +180,15 @@ void mf_close(memfile_T *mfp, bool del_file)
}
// free entries in used list
- for (bhdr_T *hp = mfp->mf_used_first, *nextp; hp != NULL; hp = nextp) {
- nextp = hp->bh_next;
+ bhdr_T *hp;
+ map_foreach_value(&mfp->mf_hash, hp, {
mf_free_bhdr(hp);
- }
+ })
while (mfp->mf_free_first != NULL) { // free entries in free list
xfree(mf_rem_free(mfp));
}
- mf_hash_free(&mfp->mf_hash);
- mf_hash_free_all(&mfp->mf_trans); // free hashtable and its items
+ map_destroy(int64_t, &mfp->mf_hash);
+ map_destroy(int64_t, &mfp->mf_trans); // free hashtable and its items
mf_free_fnames(mfp);
xfree(mfp);
}
@@ -271,8 +269,7 @@ bhdr_T *mf_new(memfile_T *mfp, bool negative, unsigned page_count)
hp->bh_flags = BH_LOCKED | BH_DIRTY; // new block is always dirty
mfp->mf_dirty = MF_DIRTY_YES;
hp->bh_page_count = page_count;
- mf_ins_used(mfp, hp);
- mf_ins_hash(mfp, hp);
+ pmap_put(int64_t)(&mfp->mf_hash, hp->bh_bnum, hp);
// Init the data to all zero, to avoid reading uninitialized data.
// This also avoids that the passwd file ends up in the swap file!
@@ -294,7 +291,7 @@ bhdr_T *mf_get(memfile_T *mfp, blocknr_T nr, unsigned page_count)
}
// see if it is in the cache
- bhdr_T *hp = mf_find_hash(mfp, nr);
+ bhdr_T *hp = pmap_get(int64_t)(&mfp->mf_hash, nr);
if (hp == NULL) { // not in the hash list
if (nr < 0 || nr >= mfp->mf_infile_count) { // can't be in the file
return NULL;
@@ -317,13 +314,11 @@ bhdr_T *mf_get(memfile_T *mfp, blocknr_T nr, unsigned page_count)
return NULL;
}
} else {
- mf_rem_used(mfp, hp); // remove from list, insert in front below
- mf_rem_hash(mfp, hp);
+ pmap_del(int64_t)(&mfp->mf_hash, hp->bh_bnum, NULL);
}
hp->bh_flags |= BH_LOCKED;
- mf_ins_used(mfp, hp); // put in front of used list
- mf_ins_hash(mfp, hp); // put in front of hash list
+ pmap_put(int64_t)(&mfp->mf_hash, hp->bh_bnum, hp); // put in front of hash table
return hp;
}
@@ -356,8 +351,7 @@ void mf_put(memfile_T *mfp, bhdr_T *hp, bool dirty, bool infile)
void mf_free(memfile_T *mfp, bhdr_T *hp)
{
xfree(hp->bh_data); // free data
- mf_rem_hash(mfp, hp); // get *hp out of the hash list
- mf_rem_used(mfp, hp); // get *hp out of the used list
+ pmap_del(int64_t)(&mfp->mf_hash, hp->bh_bnum, NULL); // get *hp out of the hash table
if (hp->bh_bnum < 0) {
xfree(hp); // don't want negative numbers in free list
mfp->mf_neg_count--;
@@ -399,7 +393,8 @@ int mf_sync(memfile_T *mfp, int flags)
// fails then we give up.
int status = OK;
bhdr_T *hp;
- for (hp = mfp->mf_used_last; hp != NULL; hp = hp->bh_prev) {
+ // note, "last" block is typically earlier in the hash list
+ map_foreach_value(&mfp->mf_hash, hp, {
if (((flags & MFS_ALL) || hp->bh_bnum >= 0)
&& (hp->bh_flags & BH_DIRTY)
&& (status == OK || (hp->bh_bnum >= 0
@@ -424,7 +419,7 @@ int mf_sync(memfile_T *mfp, int flags)
break;
}
}
- }
+ })
// If the whole list is flushed, the memfile is not dirty anymore.
// In case of an error, dirty flag is also set, to avoid trying all the time.
@@ -447,61 +442,15 @@ int mf_sync(memfile_T *mfp, int flags)
/// These are blocks that need to be written to a newly created swapfile.
void mf_set_dirty(memfile_T *mfp)
{
- for (bhdr_T *hp = mfp->mf_used_last; hp != NULL; hp = hp->bh_prev) {
+ bhdr_T *hp;
+ map_foreach_value(&mfp->mf_hash, hp, {
if (hp->bh_bnum > 0) {
hp->bh_flags |= BH_DIRTY;
}
- }
+ })
mfp->mf_dirty = MF_DIRTY_YES;
}
-/// Insert block in front of memfile's hash list.
-static void mf_ins_hash(memfile_T *mfp, bhdr_T *hp)
-{
- mf_hash_add_item(&mfp->mf_hash, (mf_hashitem_T *)hp);
-}
-
-/// Remove block from memfile's hash list.
-static void mf_rem_hash(memfile_T *mfp, bhdr_T *hp)
-{
- mf_hash_rem_item(&mfp->mf_hash, (mf_hashitem_T *)hp);
-}
-
-/// Lookup block with number "nr" in memfile's hash list.
-static bhdr_T *mf_find_hash(memfile_T *mfp, blocknr_T nr)
-{
- return (bhdr_T *)mf_hash_find(&mfp->mf_hash, nr);
-}
-
-/// Insert block at the front of memfile's used list.
-static void mf_ins_used(memfile_T *mfp, bhdr_T *hp)
-{
- hp->bh_next = mfp->mf_used_first;
- mfp->mf_used_first = hp;
- hp->bh_prev = NULL;
- if (hp->bh_next == NULL) { // list was empty, adjust last pointer
- mfp->mf_used_last = hp;
- } else {
- hp->bh_next->bh_prev = hp;
- }
-}
-
-/// Remove block from memfile's used list.
-static void mf_rem_used(memfile_T *mfp, bhdr_T *hp)
-{
- if (hp->bh_next == NULL) { // last block in used list
- mfp->mf_used_last = hp->bh_prev;
- } else {
- hp->bh_next->bh_prev = hp->bh_prev;
- }
-
- if (hp->bh_prev == NULL) { // first block in used list
- mfp->mf_used_first = hp->bh_next;
- } else {
- hp->bh_prev->bh_next = hp->bh_next;
- }
-}
-
/// Release as many blocks as possible.
///
/// Used in case of out of memory
@@ -520,17 +469,18 @@ bool mf_release_all(void)
// Flush as many blocks as possible, only if there is a swapfile.
if (mfp->mf_fd >= 0) {
- for (bhdr_T *hp = mfp->mf_used_last; hp != NULL;) {
+ for (int i = 0; i < (int)map_size(&mfp->mf_hash);) {
+ bhdr_T *hp = mfp->mf_hash.values[i];
if (!(hp->bh_flags & BH_LOCKED)
&& (!(hp->bh_flags & BH_DIRTY)
|| mf_write(mfp, hp) != FAIL)) {
- mf_rem_used(mfp, hp);
- mf_rem_hash(mfp, hp);
+ pmap_del(int64_t)(&mfp->mf_hash, hp->bh_bnum, NULL);
mf_free_bhdr(hp);
- hp = mfp->mf_used_last; // restart, list was changed
retval = true;
+ // Rerun with the same value of i. another item will have taken
+ // its place (or it was the last)
} else {
- hp = hp->bh_prev;
+ i++;
}
}
}
@@ -558,7 +508,7 @@ static void mf_free_bhdr(bhdr_T *hp)
/// Insert a block in the free list.
static void mf_ins_free(memfile_T *mfp, bhdr_T *hp)
{
- hp->bh_next = mfp->mf_free_first;
+ hp->bh_data = mfp->mf_free_first;
mfp->mf_free_first = hp;
}
@@ -568,7 +518,7 @@ static void mf_ins_free(memfile_T *mfp, bhdr_T *hp)
static bhdr_T *mf_rem_free(memfile_T *mfp)
{
bhdr_T *hp = mfp->mf_free_first;
- mfp->mf_free_first = hp->bh_next;
+ mfp->mf_free_first = hp->bh_data;
return hp;
}
@@ -637,7 +587,7 @@ static int mf_write(memfile_T *mfp, bhdr_T *hp)
blocknr_T nr = hp->bh_bnum; // block nr which is being written
if (nr > mfp->mf_infile_count) { // beyond end of file
nr = mfp->mf_infile_count;
- hp2 = mf_find_hash(mfp, nr); // NULL caught below
+ hp2 = pmap_get(int64_t)(&mfp->mf_hash, nr); // NULL caught below
} else {
hp2 = hp;
}
@@ -690,8 +640,6 @@ static int mf_trans_add(memfile_T *mfp, bhdr_T *hp)
return OK;
}
- mf_blocknr_trans_item_T *np = xmalloc(sizeof(mf_blocknr_trans_item_T));
-
// Get a new number for the block.
// If the first item in the free list has sufficient pages, use its number.
// Otherwise use mf_blocknr_max.
@@ -714,15 +662,13 @@ static int mf_trans_add(memfile_T *mfp, bhdr_T *hp)
mfp->mf_blocknr_max += page_count;
}
- np->nt_old_bnum = hp->bh_bnum; // adjust number
- np->nt_new_bnum = new_bnum;
-
- mf_rem_hash(mfp, hp); // remove from old hash list
+ blocknr_T old_bnum = hp->bh_bnum; // adjust number
+ pmap_del(int64_t)(&mfp->mf_hash, hp->bh_bnum, NULL);
hp->bh_bnum = new_bnum;
- mf_ins_hash(mfp, hp); // insert in new hash list
+ pmap_put(int64_t)(&mfp->mf_hash, new_bnum, hp);
// Insert "np" into "mf_trans" hashtable with key "np->nt_old_bnum".
- mf_hash_add_item(&mfp->mf_trans, (mf_hashitem_T *)np);
+ map_put(int64_t, int64_t)(&mfp->mf_trans, old_bnum, new_bnum);
return OK;
}
@@ -733,20 +679,16 @@ static int mf_trans_add(memfile_T *mfp, bhdr_T *hp)
/// The old number When not found.
blocknr_T mf_trans_del(memfile_T *mfp, blocknr_T old_nr)
{
- mf_blocknr_trans_item_T *np =
- (mf_blocknr_trans_item_T *)mf_hash_find(&mfp->mf_trans, old_nr);
-
- if (np == NULL) { // not found
+ blocknr_T *num = map_ref(int64_t, int64_t)(&mfp->mf_trans, old_nr, false);
+ if (num == NULL) { // not found
return old_nr;
}
mfp->mf_neg_count--;
- blocknr_T new_bnum = np->nt_new_bnum;
+ blocknr_T new_bnum = *num;
// remove entry from the trans list
- mf_hash_rem_item(&mfp->mf_trans, (mf_hashitem_T *)np);
-
- xfree(np);
+ map_del(int64_t, int64_t)(&mfp->mf_trans, old_nr, NULL);
return new_bnum;
}
@@ -810,7 +752,7 @@ static bool mf_do_open(memfile_T *mfp, char *fname, int flags)
emsg(_("E300: Swap file already exists (symlink attack?)"));
} else {
// try to open the file
- mfp->mf_fd = MCH_OPEN_RW(mfp->mf_fname, flags | O_NOFOLLOW);
+ mfp->mf_fd = os_open(mfp->mf_fname, flags | O_NOFOLLOW, S_IREAD | S_IWRITE);
}
// If the file cannot be opened, use memory only
@@ -823,152 +765,3 @@ static bool mf_do_open(memfile_T *mfp, char *fname, int flags)
return true;
}
-
-//
-// Implementation of mf_hashtab_T.
-//
-
-/// The number of buckets in the hashtable is increased by a factor of
-/// MHT_GROWTH_FACTOR when the average number of items per bucket
-/// exceeds 2 ^ MHT_LOG_LOAD_FACTOR.
-enum {
- MHT_LOG_LOAD_FACTOR = 6,
- MHT_GROWTH_FACTOR = 2, // must be a power of two
-};
-
-/// Initialize an empty hash table.
-static void mf_hash_init(mf_hashtab_T *mht)
-{
- CLEAR_POINTER(mht);
- mht->mht_buckets = mht->mht_small_buckets;
- mht->mht_mask = MHT_INIT_SIZE - 1;
-}
-
-/// Free the array of a hash table. Does not free the items it contains!
-/// The hash table must not be used again without another mf_hash_init() call.
-static void mf_hash_free(mf_hashtab_T *mht)
-{
- if (mht->mht_buckets != mht->mht_small_buckets) {
- xfree(mht->mht_buckets);
- }
-}
-
-/// Free the array of a hash table and all the items it contains.
-static void mf_hash_free_all(mf_hashtab_T *mht)
-{
- for (size_t idx = 0; idx <= mht->mht_mask; idx++) {
- mf_hashitem_T *next;
- for (mf_hashitem_T *mhi = mht->mht_buckets[idx]; mhi != NULL; mhi = next) {
- next = mhi->mhi_next;
- xfree(mhi);
- }
- }
-
- mf_hash_free(mht);
-}
-
-/// Find by key.
-///
-/// @return A pointer to a mf_hashitem_T or NULL if the item was not found.
-static mf_hashitem_T *mf_hash_find(mf_hashtab_T *mht, blocknr_T key)
-{
- mf_hashitem_T *mhi = mht->mht_buckets[(size_t)key & mht->mht_mask];
- while (mhi != NULL && mhi->mhi_key != key) {
- mhi = mhi->mhi_next;
- }
- return mhi;
-}
-
-/// Add item to hashtable. Item must not be NULL.
-static void mf_hash_add_item(mf_hashtab_T *mht, mf_hashitem_T *mhi)
-{
- size_t idx = (size_t)mhi->mhi_key & mht->mht_mask;
- mhi->mhi_next = mht->mht_buckets[idx];
- mhi->mhi_prev = NULL;
- if (mhi->mhi_next != NULL) {
- mhi->mhi_next->mhi_prev = mhi;
- }
- mht->mht_buckets[idx] = mhi;
-
- mht->mht_count++;
-
- /// Grow hashtable when we have more thank 2^MHT_LOG_LOAD_FACTOR
- /// items per bucket on average.
- if ((mht->mht_count >> MHT_LOG_LOAD_FACTOR) > mht->mht_mask) {
- mf_hash_grow(mht);
- }
-}
-
-/// Remove item from hashtable. Item must be non NULL and within hashtable.
-static void mf_hash_rem_item(mf_hashtab_T *mht, mf_hashitem_T *mhi)
-{
- if (mhi->mhi_prev == NULL) {
- mht->mht_buckets[(size_t)mhi->mhi_key & mht->mht_mask] =
- mhi->mhi_next;
- } else {
- mhi->mhi_prev->mhi_next = mhi->mhi_next;
- }
-
- if (mhi->mhi_next != NULL) {
- mhi->mhi_next->mhi_prev = mhi->mhi_prev;
- }
-
- mht->mht_count--;
-
- // We could shrink the table here, but it typically takes little memory,
- // so why bother?
-}
-
-/// Increase number of buckets in the hashtable by MHT_GROWTH_FACTOR and
-/// rehash items.
-static void mf_hash_grow(mf_hashtab_T *mht)
-{
- size_t size = (mht->mht_mask + 1) * MHT_GROWTH_FACTOR * sizeof(void *);
- mf_hashitem_T **buckets = xcalloc(1, size);
-
- int shift = 0;
- while ((mht->mht_mask >> shift) != 0) {
- shift++;
- }
-
- for (size_t i = 0; i <= mht->mht_mask; i++) {
- /// Traverse the items in the i-th original bucket and move them into
- /// MHT_GROWTH_FACTOR new buckets, preserving their relative order
- /// within each new bucket. Preserving the order is important because
- /// mf_get() tries to keep most recently used items at the front of
- /// each bucket.
- ///
- /// Here we strongly rely on the fact that hashes are computed modulo
- /// a power of two.
-
- mf_hashitem_T *tails[MHT_GROWTH_FACTOR];
- CLEAR_FIELD(tails);
-
- for (mf_hashitem_T *mhi = mht->mht_buckets[i];
- mhi != NULL; mhi = mhi->mhi_next) {
- size_t j = (mhi->mhi_key >> shift) & (MHT_GROWTH_FACTOR - 1);
- if (tails[j] == NULL) {
- buckets[i + (j << shift)] = mhi;
- tails[j] = mhi;
- mhi->mhi_prev = NULL;
- } else {
- tails[j]->mhi_next = mhi;
- mhi->mhi_prev = tails[j];
- tails[j] = mhi;
- }
- }
-
- for (size_t j = 0; j < MHT_GROWTH_FACTOR; j++) {
- if (tails[j] != NULL) {
- tails[j]->mhi_next = NULL;
- }
- }
- }
-
- if (mht->mht_buckets != mht->mht_small_buckets) {
- xfree(mht->mht_buckets);
- }
-
- mht->mht_buckets = buckets;
- mht->mht_mask = (mht->mht_mask + 1) * MHT_GROWTH_FACTOR - 1;
-}
diff --git a/src/nvim/memfile_defs.h b/src/nvim/memfile_defs.h
index 917dd6a905..bf9bb208a4 100644
--- a/src/nvim/memfile_defs.h
+++ b/src/nvim/memfile_defs.h
@@ -5,6 +5,7 @@
#include <stdint.h>
#include <stdlib.h>
+#include "nvim/map.h"
#include "nvim/pos.h"
#include "nvim/types.h"
@@ -15,57 +16,20 @@
/// with negative numbers are currently in memory only.
typedef int64_t blocknr_T;
-/// A hash item.
-///
-/// Items' keys are block numbers.
-/// Items in the same bucket are organized into a doubly-linked list.
-///
-/// Therefore, items can be arbitrary data structures beginning with pointers
-/// for the list and and a block number key.
-typedef struct mf_hashitem {
- struct mf_hashitem *mhi_next;
- struct mf_hashitem *mhi_prev;
- blocknr_T mhi_key;
-} mf_hashitem_T;
-
-/// Initial size for a hashtable.
-#define MHT_INIT_SIZE 64
-
-/// A chained hashtable with block numbers as keys and arbitrary data structures
-/// as items.
-///
-/// This is an intrusive data structure: we require that items begin with
-/// mf_hashitem_T which contains the key and linked list pointers. List of items
-/// in each bucket is doubly-linked.
-typedef struct mf_hashtab {
- size_t mht_mask; ///< mask used to mod hash value to array index
- ///< (nr of items in array is 'mht_mask + 1')
- size_t mht_count; ///< number of items inserted
- mf_hashitem_T **mht_buckets; ///< points to the array of buckets (can be
- ///< mht_small_buckets or a newly allocated array
- ///< when mht_small_buckets becomes too small)
- mf_hashitem_T *mht_small_buckets[MHT_INIT_SIZE]; ///< initial buckets
-} mf_hashtab_T;
-
/// A block header.
///
/// There is a block header for each previously used block in the memfile.
///
/// The block may be linked in the used list OR in the free list.
-/// The used blocks are also kept in hash lists.
///
/// The used list is a doubly linked list, most recently used block first.
/// The blocks in the used list have a block of memory allocated.
-/// The hash lists are used to quickly find a block in the used list.
/// The free list is a single linked list, not sorted.
/// The blocks in the free list have no block of memory allocated and
/// the contents of the block in the file (if any) is irrelevant.
typedef struct bhdr {
- mf_hashitem_T bh_hashitem; ///< header for hash table and key
-#define bh_bnum bh_hashitem.mhi_key ///< block number, part of bh_hashitem
+ blocknr_T bh_bnum; ///< key used in hash table
- struct bhdr *bh_next; ///< next block header in free or used list
- struct bhdr *bh_prev; ///< previous block header in used list
void *bh_data; ///< pointer to memory (for used block)
unsigned bh_page_count; ///< number of pages in this block
@@ -74,18 +38,6 @@ typedef struct bhdr {
unsigned bh_flags; ///< BH_DIRTY or BH_LOCKED
} bhdr_T;
-/// A block number translation list item.
-///
-/// When a block with a negative number is flushed to the file, it gets
-/// a positive number. Because the reference to the block is still the negative
-/// number, we remember the translation to the new positive number in the
-/// double linked trans lists. The structure is the same as the hash lists.
-typedef struct mf_blocknr_trans_item {
- mf_hashitem_T nt_hashitem; ///< header for hash table and key
-#define nt_old_bnum nt_hashitem.mhi_key ///< old, negative, number
- blocknr_T nt_new_bnum; ///< new, positive, number
-} mf_blocknr_trans_item_T;
-
typedef enum {
MF_DIRTY_NO = 0, ///< no dirty blocks
MF_DIRTY_YES, ///< there are dirty blocks
@@ -98,10 +50,16 @@ typedef struct memfile {
char *mf_ffname; ///< idem, full path
int mf_fd; ///< file descriptor
bhdr_T *mf_free_first; ///< first block header in free list
- bhdr_T *mf_used_first; ///< mru block header in used list
- bhdr_T *mf_used_last; ///< lru block header in used list
- mf_hashtab_T mf_hash; ///< hash lists
- mf_hashtab_T mf_trans; ///< trans lists
+
+ /// The used blocks are kept in mf_hash.
+ /// mf_hash are used to quickly find a block in the used list.
+ PMap(int64_t) mf_hash;
+
+ /// When a block with a negative number is flushed to the file, it gets
+ /// a positive number. Because the reference to the block is still the negative
+ /// number, we remember the translation to the new positive number.
+ Map(int64_t, int64_t) mf_trans;
+
blocknr_T mf_blocknr_max; ///< highest positive block number + 1
blocknr_T mf_blocknr_min; ///< lowest negative block number - 1
blocknr_T mf_neg_count; ///< number of negative blocks numbers
diff --git a/src/nvim/memline.c b/src/nvim/memline.c
index ff5f621611..dc9173910e 100644
--- a/src/nvim/memline.c
+++ b/src/nvim/memline.c
@@ -92,11 +92,6 @@
# include <time.h>
#endif
-typedef struct block0 ZERO_BL; // contents of the first block
-typedef struct pointer_block PTR_BL; // contents of a pointer block
-typedef struct data_block DATA_BL; // contents of a data block
-typedef struct pointer_entry PTR_EN; // block/line-count pair
-
enum {
DATA_ID = (('d' << 8) + 'a'), // data block id
PTR_ID = (('p' << 8) + 't'), // pointer block id
@@ -105,32 +100,32 @@ enum {
};
// pointer to a block, used in a pointer block
-struct pointer_entry {
+typedef struct {
blocknr_T pe_bnum; // block number
linenr_T pe_line_count; // number of lines in this branch
linenr_T pe_old_lnum; // lnum for this block (for recovery)
int pe_page_count; // number of pages in block pe_bnum
-};
+} PointerEntry;
// A pointer block contains a list of branches in the tree.
-struct pointer_block {
+typedef struct {
uint16_t pb_id; // ID for pointer block: PTR_ID
uint16_t pb_count; // number of pointers in this block
uint16_t pb_count_max; // maximum value for pb_count
- PTR_EN pb_pointer[]; // list of pointers to blocks
+ PointerEntry pb_pointer[]; // list of pointers to blocks
// followed by empty space until end of page
-};
+} PointerBlock;
// Value for pb_count_max.
#define PB_COUNT_MAX(mfp) \
- (uint16_t)((mfp->mf_page_size - offsetof(PTR_BL, pb_pointer)) / sizeof(PTR_EN))
+ (uint16_t)((mfp->mf_page_size - offsetof(PointerBlock, pb_pointer)) / sizeof(PointerEntry))
// A data block is a leaf in the tree.
//
// The text of the lines is at the end of the block. The text of the first line
// in the block is put at the end, the text of the second line in front of it,
// etc. Thus the order of the lines is the opposite of the line number.
-struct data_block {
+typedef struct {
uint16_t db_id; // ID for data block: DATA_ID
unsigned db_free; // free space available
unsigned db_txt_start; // byte where text starts
@@ -141,7 +136,7 @@ struct data_block {
// followed by empty space up to db_txt_start
// followed by the text in the lines until
// end of page
-};
+} DataBlock;
// The low bits of db_index hold the actual index. The topmost bit is
// used for the global command to be able to mark a line.
@@ -153,7 +148,7 @@ struct data_block {
#define DB_INDEX_MASK (~DB_MARKED)
#define INDEX_SIZE (sizeof(unsigned)) // size of one db_index entry
-#define HEADER_SIZE (offsetof(DATA_BL, db_index)) // size of data block header
+#define HEADER_SIZE (offsetof(DataBlock, db_index)) // size of data block header
enum {
B0_FNAME_SIZE_ORG = 900, // what it was in older versions
@@ -172,7 +167,8 @@ enum {
B0_MAGIC_CHAR = 0x55,
};
-// Block zero holds all info about the swap file.
+// Block zero holds all info about the swap file. This is the first block in
+// the file.
//
// NOTE: DEFINITION OF BLOCK 0 SHOULD NOT CHANGE! It would make all existing
// swap files unusable!
@@ -182,7 +178,7 @@ enum {
// This block is built up of single bytes, to make it portable across
// different machines. b0_magic_* is used to check the byte order and size of
// variables, because the rest of the swap file is not portable.
-struct block0 {
+typedef struct {
char b0_id[2]; ///< ID for block 0: BLOCK0_ID0 and BLOCK0_ID1.
char b0_version[10]; // Vim version string
char b0_page_size[4]; // number of bytes per page
@@ -196,7 +192,7 @@ struct block0 {
int b0_magic_int; // check for byte order of int
int16_t b0_magic_short; // check for byte order of short
char b0_magic_char; // check for last char
-};
+} ZeroBlock;
// Note: b0_dirty and b0_flags are put at the end of the file name. For very
// long file names in older versions of Vim they are invalid.
@@ -315,7 +311,7 @@ int ml_open(buf_T *buf)
iemsg(_("E298: Didn't get block nr 0?"));
goto error;
}
- ZERO_BL *b0p = hp->bh_data;
+ ZeroBlock *b0p = hp->bh_data;
b0p->b0_id[0] = BLOCK0_ID0;
b0p->b0_id[1] = BLOCK0_ID1;
@@ -354,7 +350,7 @@ int ml_open(buf_T *buf)
iemsg(_("E298: Didn't get block nr 1?"));
goto error;
}
- PTR_BL *pp = hp->bh_data;
+ PointerBlock *pp = hp->bh_data;
pp->pb_count = 1;
pp->pb_pointer[0].pe_bnum = 2;
pp->pb_pointer[0].pe_page_count = 1;
@@ -369,7 +365,7 @@ int ml_open(buf_T *buf)
goto error;
}
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
dp->db_index[0] = --dp->db_txt_start; // at end of block
dp->db_free -= 1 + (unsigned)INDEX_SIZE;
dp->db_line_count = 1;
@@ -608,14 +604,14 @@ void ml_timestamp(buf_T *buf)
}
/// Checks whether the IDs in b0 are valid.
-static bool ml_check_b0_id(ZERO_BL *b0p)
+static bool ml_check_b0_id(ZeroBlock *b0p)
FUNC_ATTR_NONNULL_ALL
{
return b0p->b0_id[0] == BLOCK0_ID0 && b0p->b0_id[1] == BLOCK0_ID1;
}
/// Checks whether all strings in b0 are valid (i.e. nul-terminated).
-static bool ml_check_b0_strings(ZERO_BL *b0p)
+static bool ml_check_b0_strings(ZeroBlock *b0p)
FUNC_ATTR_NONNULL_ALL
{
return (memchr(b0p->b0_version, NUL, 10)
@@ -633,7 +629,7 @@ static void ml_upd_block0(buf_T *buf, upd_block0_T what)
if (mfp == NULL || (hp = mf_get(mfp, 0, 1)) == NULL) {
return;
}
- ZERO_BL *b0p = hp->bh_data;
+ ZeroBlock *b0p = hp->bh_data;
if (ml_check_b0_id(b0p) == FAIL) {
iemsg(_("E304: ml_upd_block0(): Didn't get block 0??"));
} else {
@@ -649,7 +645,7 @@ static void ml_upd_block0(buf_T *buf, upd_block0_T what)
/// Write file name and timestamp into block 0 of a swap file.
/// Also set buf->b_mtime.
/// Don't use NameBuff[]!!!
-static void set_b0_fname(ZERO_BL *b0p, buf_T *buf)
+static void set_b0_fname(ZeroBlock *b0p, buf_T *buf)
{
if (buf->b_ffname == NULL) {
b0p->b0_fname[0] = NUL;
@@ -702,7 +698,7 @@ static void set_b0_fname(ZERO_BL *b0p, buf_T *buf)
/// swapfile for "buf" are in the same directory.
/// This is fail safe: if we are not sure the directories are equal the flag is
/// not set.
-static void set_b0_dir_flag(ZERO_BL *b0p, buf_T *buf)
+static void set_b0_dir_flag(ZeroBlock *b0p, buf_T *buf)
{
if (same_directory(buf->b_ml.ml_mfp->mf_fname, buf->b_ffname)) {
b0p->b0_flags |= B0_SAME_DIR;
@@ -712,7 +708,7 @@ static void set_b0_dir_flag(ZERO_BL *b0p, buf_T *buf)
}
/// When there is room, add the 'fileencoding' to block zero.
-static void add_b0_fenc(ZERO_BL *b0p, buf_T *buf)
+static void add_b0_fenc(ZeroBlock *b0p, buf_T *buf)
{
const int size = B0_FNAME_SIZE_NOCRYPT;
@@ -730,7 +726,7 @@ static void add_b0_fenc(ZERO_BL *b0p, buf_T *buf)
/// Return true if the process with number "b0p->b0_pid" is still running.
/// "swap_fname" is the name of the swap file, if it's from before a reboot then
/// the result is false;
-static bool swapfile_process_running(const ZERO_BL *b0p, const char *swap_fname)
+static bool swapfile_process_running(const ZeroBlock *b0p, const char *swap_fname)
{
FileInfo st;
double uptime;
@@ -754,11 +750,11 @@ void ml_recover(bool checkext)
memfile_T *mfp = NULL;
char *fname_used = NULL;
bhdr_T *hp = NULL;
- ZERO_BL *b0p;
+ ZeroBlock *b0p;
int b0_ff;
char *b0_fenc = NULL;
- PTR_BL *pp;
- DATA_BL *dp;
+ PointerBlock *pp;
+ DataBlock *dp;
infoptr_T *ip;
bool directly;
char *p;
@@ -1467,7 +1463,7 @@ static bool process_still_running;
void get_b0_dict(const char *fname, dict_T *d)
{
int fd;
- struct block0 b0;
+ ZeroBlock b0;
if ((fd = os_open(fname, O_RDONLY, 0)) >= 0) {
if (read_eintr(fd, &b0, sizeof(b0)) == sizeof(b0)) {
@@ -1506,7 +1502,7 @@ static time_t swapfile_info(char *fname)
{
assert(fname != NULL);
int fd;
- struct block0 b0;
+ ZeroBlock b0;
time_t x = (time_t)0;
#ifdef UNIX
char uname[B0_UNAME_SIZE];
@@ -1596,7 +1592,7 @@ static time_t swapfile_info(char *fname)
/// can be safely deleted.
static bool swapfile_unchanged(char *fname)
{
- struct block0 b0;
+ ZeroBlock b0;
// Swap file must exist.
if (!os_path_exists(fname)) {
@@ -1905,7 +1901,7 @@ errorret:
goto errorret;
}
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
char *ptr = (char *)dp + (dp->db_index[lnum - buf->b_ml.ml_locked_low] & DB_INDEX_MASK);
buf->b_ml.ml_line_ptr = ptr;
@@ -2037,7 +2033,7 @@ static int ml_append_int(buf_T *buf, linenr_T lnum, char *line, colnr_T len, boo
// get line count (number of indexes in current block) before the insertion
int line_count = buf->b_ml.ml_locked_high - buf->b_ml.ml_locked_low;
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
// If
// - there is not enough room in the current block
@@ -2120,13 +2116,13 @@ static int ml_append_int(buf_T *buf, linenr_T lnum, char *line, colnr_T len, boo
int lines_moved;
int data_moved = 0; // init to shut up gcc
int total_moved = 0; // init to shut up gcc
- DATA_BL *dp_right, *dp_left;
+ DataBlock *dp_right, *dp_left;
int stack_idx;
bool in_left;
int lineadd;
blocknr_T bnum_left, bnum_right;
linenr_T lnum_left, lnum_right;
- PTR_BL *pp_new;
+ PointerBlock *pp_new;
// We are going to allocate a new data block. Depending on the
// situation it will be put to the left or right of the existing
@@ -2263,7 +2259,7 @@ static int ml_append_int(buf_T *buf, linenr_T lnum, char *line, colnr_T len, boo
if ((hp = mf_get(mfp, ip->ip_bnum, 1)) == NULL) {
return FAIL;
}
- PTR_BL *pp = hp->bh_data; // must be pointer block
+ PointerBlock *pp = hp->bh_data; // must be pointer block
if (pp->pb_id != PTR_ID) {
iemsg(_(e_pointer_block_id_wrong_three));
mf_put(mfp, hp, false, false);
@@ -2276,7 +2272,7 @@ static int ml_append_int(buf_T *buf, linenr_T lnum, char *line, colnr_T len, boo
if (pb_idx + 1 < (int)pp->pb_count) {
memmove(&pp->pb_pointer[pb_idx + 2],
&pp->pb_pointer[pb_idx + 1],
- (size_t)(pp->pb_count - pb_idx - 1) * sizeof(PTR_EN));
+ (size_t)(pp->pb_count - pb_idx - 1) * sizeof(PointerEntry));
}
pp->pb_count++;
pp->pb_pointer[pb_idx].pe_line_count = line_count_left;
@@ -2349,7 +2345,7 @@ static int ml_append_int(buf_T *buf, linenr_T lnum, char *line, colnr_T len, boo
if (total_moved) {
memmove(&pp_new->pb_pointer[0],
&pp->pb_pointer[pb_idx + 1],
- (size_t)(total_moved) * sizeof(PTR_EN));
+ (size_t)(total_moved) * sizeof(PointerEntry));
pp_new->pb_count = (uint16_t)total_moved;
pp->pb_count = (uint16_t)(pp->pb_count - (total_moved - 1));
pp->pb_pointer[pb_idx + 1].pe_bnum = bnum_right;
@@ -2542,7 +2538,7 @@ static int ml_delete_int(buf_T *buf, linenr_T lnum, bool message)
return FAIL;
}
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
// compute line count (number of entries in block) before the delete
int count = buf->b_ml.ml_locked_high - buf->b_ml.ml_locked_low + 2;
int idx = lnum - buf->b_ml.ml_locked_low;
@@ -2579,7 +2575,7 @@ static int ml_delete_int(buf_T *buf, linenr_T lnum, bool message)
if ((hp = mf_get(mfp, ip->ip_bnum, 1)) == NULL) {
return FAIL;
}
- PTR_BL *pp = hp->bh_data; // must be pointer block
+ PointerBlock *pp = hp->bh_data; // must be pointer block
if (pp->pb_id != PTR_ID) {
iemsg(_(e_pointer_block_id_wrong_four));
mf_put(mfp, hp, false, false);
@@ -2591,7 +2587,7 @@ static int ml_delete_int(buf_T *buf, linenr_T lnum, bool message)
} else {
if (count != idx) { // move entries after the deleted one
memmove(&pp->pb_pointer[idx], &pp->pb_pointer[idx + 1],
- (size_t)(count - idx) * sizeof(PTR_EN));
+ (size_t)(count - idx) * sizeof(PointerEntry));
}
mf_put(mfp, hp, true, false);
@@ -2651,7 +2647,7 @@ void ml_setmarked(linenr_T lnum)
if ((hp = ml_find_line(curbuf, lnum, ML_FIND)) == NULL) {
return; // give error message?
}
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
dp->db_index[lnum - curbuf->b_ml.ml_locked_low] |= DB_MARKED;
curbuf->b_ml.ml_flags |= ML_LOCKED_DIRTY;
}
@@ -2673,7 +2669,7 @@ linenr_T ml_firstmarked(void)
if ((hp = ml_find_line(curbuf, lnum, ML_FIND)) == NULL) {
return 0; // give error message?
}
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
for (int i = lnum - curbuf->b_ml.ml_locked_low;
lnum <= curbuf->b_ml.ml_locked_high; i++, lnum++) {
@@ -2705,7 +2701,7 @@ void ml_clearmarked(void)
if ((hp = ml_find_line(curbuf, lnum, ML_FIND)) == NULL) {
return; // give error message?
}
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
for (int i = lnum - curbuf->b_ml.ml_locked_low;
lnum <= curbuf->b_ml.ml_locked_high; i++, lnum++) {
@@ -2754,7 +2750,7 @@ static void ml_flush_line(buf_T *buf)
if (hp == NULL) {
siemsg(_("E320: Cannot find line %" PRId64), (int64_t)lnum);
} else {
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
int idx = lnum - buf->b_ml.ml_locked_low;
int start = ((dp->db_index[idx]) & DB_INDEX_MASK);
char *old_line = (char *)dp + start;
@@ -2822,7 +2818,7 @@ static bhdr_T *ml_new_data(memfile_T *mfp, bool negative, int page_count)
{
assert(page_count >= 0);
bhdr_T *hp = mf_new(mfp, negative, (unsigned)page_count);
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
dp->db_id = DATA_ID;
dp->db_txt_start = dp->db_txt_end = (unsigned)page_count * mfp->mf_page_size;
dp->db_free = dp->db_txt_start - (unsigned)HEADER_SIZE;
@@ -2835,7 +2831,7 @@ static bhdr_T *ml_new_data(memfile_T *mfp, bool negative, int page_count)
static bhdr_T *ml_new_ptr(memfile_T *mfp)
{
bhdr_T *hp = mf_new(mfp, false, 1);
- PTR_BL *pp = hp->bh_data;
+ PointerBlock *pp = hp->bh_data;
pp->pb_id = PTR_ID;
pp->pb_count = 0;
pp->pb_count_max = PB_COUNT_MAX(mfp);
@@ -2858,7 +2854,7 @@ static bhdr_T *ml_new_ptr(memfile_T *mfp)
/// @return NULL for failure, pointer to block header otherwise
static bhdr_T *ml_find_line(buf_T *buf, linenr_T lnum, int action)
{
- PTR_BL *pp;
+ PointerBlock *pp;
bhdr_T *hp;
int top;
@@ -2935,7 +2931,7 @@ static bhdr_T *ml_find_line(buf_T *buf, linenr_T lnum, int action)
high--;
}
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
if (dp->db_id == DATA_ID) { // data block
buf->b_ml.ml_locked = hp;
buf->b_ml.ml_locked_low = low;
@@ -2945,7 +2941,7 @@ static bhdr_T *ml_find_line(buf_T *buf, linenr_T lnum, int action)
return hp;
}
- pp = (PTR_BL *)(dp); // must be pointer block
+ pp = (PointerBlock *)(dp); // must be pointer block
if (pp->pb_id != PTR_ID) {
iemsg(_(e_pointer_block_id_wrong));
goto error_block;
@@ -3055,7 +3051,7 @@ static void ml_lineadd(buf_T *buf, int count)
if ((hp = mf_get(mfp, ip->ip_bnum, 1)) == NULL) {
break;
}
- PTR_BL *pp = hp->bh_data; // must be pointer block
+ PointerBlock *pp = hp->bh_data; // must be pointer block
if (pp->pb_id != PTR_ID) {
mf_put(mfp, hp, false, false);
iemsg(_(e_pointer_block_id_wrong_two));
@@ -3371,7 +3367,7 @@ static char *findswapname(buf_T *buf, char **dirp, char *old_fname, bool *found_
if (!recoverymode && buf_fname != NULL
&& !buf->b_help && !(buf->b_flags & BF_DUMMY)) {
int fd;
- struct block0 b0;
+ ZeroBlock b0;
int differ = false;
// Try to read block 0 from the swap file to get the original
@@ -3548,7 +3544,7 @@ static char *findswapname(buf_T *buf, char **dirp, char *old_fname, bool *found_
return fname;
}
-static int b0_magic_wrong(ZERO_BL *b0p)
+static int b0_magic_wrong(ZeroBlock *b0p)
{
return b0p->b0_magic_long != B0_MAGIC_LONG
|| b0p->b0_magic_int != B0_MAGIC_INT
@@ -3684,22 +3680,19 @@ static long char_to_long(const char *s_in)
/// - 'fileencoding'
void ml_setflags(buf_T *buf)
{
- bhdr_T *hp;
- ZERO_BL *b0p;
+ ZeroBlock *b0p;
if (!buf->b_ml.ml_mfp) {
return;
}
- for (hp = buf->b_ml.ml_mfp->mf_used_last; hp != NULL; hp = hp->bh_prev) {
- if (hp->bh_bnum == 0) {
- b0p = hp->bh_data;
- b0p->b0_dirty = buf->b_changed ? B0_DIRTY : 0;
- b0p->b0_flags = (char)((b0p->b0_flags & ~B0_FF_MASK) | (uint8_t)(get_fileformat(buf) + 1));
- add_b0_fenc(b0p, buf);
- hp->bh_flags |= BH_DIRTY;
- mf_sync(buf->b_ml.ml_mfp, MFS_ZERO);
- break;
- }
+ bhdr_T *hp = pmap_get(int64_t)(&buf->b_ml.ml_mfp->mf_hash, 0);
+ if (hp) {
+ b0p = hp->bh_data;
+ b0p->b0_dirty = buf->b_changed ? B0_DIRTY : 0;
+ b0p->b0_flags = (char)((b0p->b0_flags & ~B0_FF_MASK) | (uint8_t)(get_fileformat(buf) + 1));
+ add_b0_fenc(b0p, buf);
+ hp->bh_flags |= BH_DIRTY;
+ mf_sync(buf->b_ml.ml_mfp, MFS_ZERO);
}
}
@@ -3772,7 +3765,7 @@ static void ml_updatechunk(buf_T *buf, linenr_T line, long len, int updtype)
curchnk->mlcs_totalsize += len;
if (updtype == ML_CHNK_ADDLINE) {
int rest;
- DATA_BL *dp;
+ DataBlock *dp;
curchnk->mlcs_numlines++;
// May resize here so we don't have to do it in both cases below
@@ -3975,7 +3968,7 @@ long ml_find_line_or_offset(buf_T *buf, linenr_T lnum, long *offp, bool no_ff)
|| (hp = ml_find_line(buf, curline, ML_FIND)) == NULL) {
return -1;
}
- DATA_BL *dp = hp->bh_data;
+ DataBlock *dp = hp->bh_data;
int count
= buf->b_ml.ml_locked_high - buf->b_ml.ml_locked_low + 1; // number of entries in block
int idx;
diff --git a/src/nvim/menu.c b/src/nvim/menu.c
index 1d35c97b39..3b2e45e2a4 100644
--- a/src/nvim/menu.c
+++ b/src/nvim/menu.c
@@ -618,8 +618,6 @@ static void free_menu(vimmenu_T **menup)
{
vimmenu_T *menu = *menup;
- // Don't change *menup until after calling gui_mch_destroy_menu(). The
- // MacOS code needs the original structure to properly delete the menu.
*menup = menu->next;
xfree(menu->name);
xfree(menu->dname);
diff --git a/src/nvim/mouse.c b/src/nvim/mouse.c
index 76ac191a68..b8c80cadf5 100644
--- a/src/nvim/mouse.c
+++ b/src/nvim/mouse.c
@@ -220,7 +220,10 @@ static int get_fpos_of_mouse(pos_T *mpos)
// compute the position in the buffer line from the posn on the screen
bool below_buffer = mouse_comp_pos(wp, &row, &col, &mpos->lnum);
- if (!below_buffer && *wp->w_p_stc != NUL && mouse_col < win_col_off(wp)) {
+ if (!below_buffer && *wp->w_p_stc != NUL
+ && (wp->w_p_rl
+ ? wincol >= wp->w_width_inner - win_col_off(wp)
+ : wincol < win_col_off(wp))) {
return MOUSE_STATUSCOL;
}
@@ -675,6 +678,10 @@ popupexit:
click_col = mouse_col;
}
+ if (in_statuscol && wp->w_p_rl) {
+ click_col = wp->w_width_inner - click_col - 1;
+ }
+
if (click_defs != NULL) {
switch (click_defs[click_col].type) {
case kStlClickDisabled:
@@ -1254,7 +1261,10 @@ retnomove:
on_sep_line = grid == DEFAULT_GRID_HANDLE && col >= wp->w_width && col - wp->w_width + 1 == 1;
on_winbar = row == -1 && wp->w_winbar_height != 0;
on_statuscol = !below_window && !on_status_line && !on_sep_line && !on_winbar
- && *wp->w_p_stc != NUL && col < win_col_off(wp);
+ && *wp->w_p_stc != NUL
+ && (wp->w_p_rl
+ ? col >= wp->w_width_inner - win_col_off(wp)
+ : col < win_col_off(wp));
// The rightmost character of the status line might be a vertical
// separator character if there is no connecting window to the right.
diff --git a/src/nvim/msgpack_rpc/channel.c b/src/nvim/msgpack_rpc/channel.c
index 3a9b36a914..b753d46d64 100644
--- a/src/nvim/msgpack_rpc/channel.c
+++ b/src/nvim/msgpack_rpc/channel.c
@@ -207,9 +207,15 @@ Object rpc_send_call(uint64_t id, const char *method_name, Array args, ArenaMem
// Push the frame
ChannelCallFrame frame = { request_id, false, false, NIL, NULL };
kv_push(rpc->call_stack, &frame);
- LOOP_PROCESS_EVENTS_UNTIL(&main_loop, channel->events, -1, frame.returned);
+ LOOP_PROCESS_EVENTS_UNTIL(&main_loop, channel->events, -1, frame.returned || rpc->closed);
(void)kv_pop(rpc->call_stack);
+ if (rpc->closed) {
+ api_set_error(err, kErrorTypeException, "Invalid channel: %" PRIu64, id);
+ channel_decref(channel);
+ return NIL;
+ }
+
if (frame.errored) {
if (frame.result.type == kObjectTypeString) {
api_set_error(err, kErrorTypeException, "%s",
diff --git a/src/nvim/option_defs.h b/src/nvim/option_defs.h
index 1007925ccb..14f29682e1 100644
--- a/src/nvim/option_defs.h
+++ b/src/nvim/option_defs.h
@@ -630,6 +630,7 @@ EXTERN unsigned rdb_flags;
#define RDB_NODELTA 0x008
#define RDB_LINE 0x010
#define RDB_FLUSH 0x020
+#define RDB_INTERSECT 0x040
EXTERN long p_rdt; // 'redrawtime'
EXTERN long p_re; // 'regexpengine'
diff --git a/src/nvim/options.lua b/src/nvim/options.lua
index b774df476f..3221e5b6e9 100644
--- a/src/nvim/options.lua
+++ b/src/nvim/options.lua
@@ -566,7 +566,7 @@ return {
backups if you don't care about losing the file.
Note that environment variables are not expanded. If you want to use
- $HOME you must expand it explicitly, e.g.: >
+ $HOME you must expand it explicitly, e.g.: >vim
:let &backupskip = escape(expand('$HOME'), '\') .. '/tmp/*'
< Note that the default also makes sure that "crontab -e" works (when a
@@ -3441,10 +3441,10 @@ return {
n-v-c-sm:block,i-ci-ve:ver25-Cursor,r-cr-o:hor20
In Normal et al. modes, use a block cursor
with the default colors defined by the host
- terminal. In Insert-likes modes, use
+ terminal. In Insert-like modes, use
a vertical bar cursor with colors from
- "Cursor" highlight group. In Replace-likes
- modes, use a underline cursor with
+ "Cursor" highlight group. In Replace-like
+ modes, use an underline cursor with
default colors.
i-ci:ver30-iCursor-blinkwait300-blinkon200-blinkoff150
In Insert and Command-line Insert mode, use a
diff --git a/src/nvim/os/shell.c b/src/nvim/os/shell.c
index 979c6153aa..81e15bf841 100644
--- a/src/nvim/os/shell.c
+++ b/src/nvim/os/shell.c
@@ -1026,7 +1026,7 @@ static bool out_data_decide_throttle(size_t size)
started = os_hrtime();
} else {
uint64_t since = os_hrtime() - started;
- if (since < (visit * 0.1L * NS_1_SECOND)) {
+ if (since < (visit * (NS_1_SECOND / 10))) {
return true;
}
if (since > (3 * NS_1_SECOND)) {
diff --git a/src/nvim/plines.c b/src/nvim/plines.c
index 82554c7785..99f666ef3f 100644
--- a/src/nvim/plines.c
+++ b/src/nvim/plines.c
@@ -133,7 +133,7 @@ void init_chartabsize_arg(chartabsize_T *cts, win_T *wp, linenr_T lnum, colnr_T
if (cts->cts_row >= 0 && wp->w_buffer->b_virt_text_inline > 0) {
marktree_itr_get(wp->w_buffer->b_marktree, cts->cts_row, 0, cts->cts_iter);
- mtkey_t mark = marktree_itr_current(cts->cts_iter);
+ MTKey mark = marktree_itr_current(cts->cts_iter);
if (mark.pos.row == cts->cts_row) {
cts->cts_has_virt_text = true;
}
@@ -222,7 +222,7 @@ int win_lbr_chartabsize(chartabsize_T *cts, int *headp)
int tab_size = size;
int col = (int)(s - line);
while (true) {
- mtkey_t mark = marktree_itr_current(cts->cts_iter);
+ MTKey mark = marktree_itr_current(cts->cts_iter);
if (mark.pos.row != cts->cts_row || mark.pos.col > col) {
break;
} else if (mark.pos.col == col) {
diff --git a/src/nvim/spell.c b/src/nvim/spell.c
index 72e21a9130..38e045a08b 100644
--- a/src/nvim/spell.c
+++ b/src/nvim/spell.c
@@ -1280,7 +1280,7 @@ static TriState decor_spell_nav_col(win_T *wp, linenr_T lnum, linenr_T *decor_ln
decor_redraw_line(wp, lnum - 1, &decor_state);
*decor_lnum = lnum;
}
- decor_redraw_col(wp, col, col, false, &decor_state);
+ decor_redraw_col(wp, col, 0, false, &decor_state);
return decor_state.spell;
}
diff --git a/src/nvim/usercmd.c b/src/nvim/usercmd.c
index e585818e75..7ccd13c3a7 100644
--- a/src/nvim/usercmd.c
+++ b/src/nvim/usercmd.c
@@ -130,23 +130,21 @@ static struct {
char *find_ucmd(exarg_T *eap, char *p, int *full, expand_T *xp, int *complp)
{
int len = (int)(p - eap->cmd);
- int j, k, matchlen = 0;
- ucmd_T *uc;
+ int matchlen = 0;
bool found = false;
bool possible = false;
- char *cp, *np; // Point into typed cmd and test name
- garray_T *gap;
bool amb_local = false; // Found ambiguous buffer-local command,
// only full match global is accepted.
// Look for buffer-local user commands first, then global ones.
- gap = &prevwin_curwin()->w_buffer->b_ucmds;
+ garray_T *gap = &prevwin_curwin()->w_buffer->b_ucmds;
while (true) {
+ int j;
for (j = 0; j < gap->ga_len; j++) {
- uc = USER_CMD_GA(gap, j);
- cp = eap->cmd;
- np = uc->uc_name;
- k = 0;
+ ucmd_T *uc = USER_CMD_GA(gap, j);
+ char *cp = eap->cmd;
+ char *np = uc->uc_name;
+ int k = 0;
while (k < len && *np != NUL && *cp++ == *np++) {
k++;
}
@@ -447,17 +445,15 @@ int cmdcomplete_str_to_type(const char *complete_str)
static void uc_list(char *name, size_t name_len)
{
- int i, j;
bool found = false;
- ucmd_T *cmd;
- uint32_t a;
// In cmdwin, the alternative buffer should be used.
const garray_T *gap = &prevwin_curwin()->w_buffer->b_ucmds;
while (true) {
+ int i;
for (i = 0; i < gap->ga_len; i++) {
- cmd = USER_CMD_GA(gap, i);
- a = cmd->uc_argt;
+ ucmd_T *cmd = USER_CMD_GA(gap, i);
+ uint32_t a = cmd->uc_argt;
// Skip commands which don't match the requested prefix and
// commands filtered out.
@@ -559,7 +555,7 @@ static void uc_list(char *name, size_t name_len)
} while ((int64_t)len < 8 - over);
// Address Type
- for (j = 0; addr_type_complete[j].expand != ADDR_NONE; j++) {
+ for (int j = 0; addr_type_complete[j].expand != ADDR_NONE; j++) {
if (addr_type_complete[j].expand != ADDR_LINES
&& addr_type_complete[j].expand == cmd->uc_addr_type) {
int rc = snprintf(IObuff + len, IOSIZE - len, "%s", addr_type_complete[j].shortname);
@@ -623,11 +619,11 @@ static void uc_list(char *name, size_t name_len)
int parse_addr_type_arg(char *value, int vallen, cmd_addr_T *addr_type_arg)
FUNC_ATTR_NONNULL_ALL
{
- int i, a, b;
+ int i;
for (i = 0; addr_type_complete[i].expand != ADDR_NONE; i++) {
- a = (int)strlen(addr_type_complete[i].name) == vallen;
- b = strncmp(value, addr_type_complete[i].name, (size_t)vallen) == 0;
+ int a = (int)strlen(addr_type_complete[i].name) == vallen;
+ int b = strncmp(value, addr_type_complete[i].name, (size_t)vallen) == 0;
if (a && b) {
*addr_type_arg = addr_type_complete[i].expand;
break;
@@ -657,11 +653,10 @@ int parse_compl_arg(const char *value, int vallen, int *complp, uint32_t *argt,
{
const char *arg = NULL;
size_t arglen = 0;
- int i;
int valend = vallen;
// Look for any argument part - which is the part after any ','
- for (i = 0; i < vallen; i++) {
+ for (int i = 0; i < vallen; i++) {
if (value[i] == ',') {
arg = (char *)&value[i + 1];
arglen = (size_t)(vallen - i - 1);
@@ -670,6 +665,7 @@ int parse_compl_arg(const char *value, int vallen, int *complp, uint32_t *argt,
}
}
+ int i;
for (i = 0; i < (int)ARRAY_SIZE(command_complete); i++) {
if (get_command_complete(i) == NULL) {
continue;
@@ -713,8 +709,6 @@ static int uc_scan_attr(char *attr, size_t len, uint32_t *argt, long *def, int *
char **compl_arg, cmd_addr_T *addr_type_arg)
FUNC_ATTR_NONNULL_ALL
{
- char *p;
-
if (len == 0) {
emsg(_("E175: No attribute specified"));
return FAIL;
@@ -732,13 +726,12 @@ static int uc_scan_attr(char *attr, size_t len, uint32_t *argt, long *def, int *
} else if (STRNICMP(attr, "bar", len) == 0) {
*argt |= EX_TRLBAR;
} else {
- int i;
char *val = NULL;
size_t vallen = 0;
size_t attrlen = len;
// Look for the attribute name - which is the part before any '='
- for (i = 0; i < (int)len; i++) {
+ for (int i = 0; i < (int)len; i++) {
if (attr[i] == '=') {
val = &attr[i + 1];
vallen = len - (size_t)i - 1;
@@ -772,7 +765,7 @@ wrong_nargs:
if (vallen == 1 && *val == '%') {
*argt |= EX_DFLALL;
} else if (val != NULL) {
- p = val;
+ char *p = val;
if (*def >= 0) {
two_count:
emsg(_("E177: Count cannot be specified twice"));
@@ -800,7 +793,7 @@ invalid_count:
}
if (val != NULL) {
- p = val;
+ char *p = val;
if (*def >= 0) {
goto two_count;
}
@@ -878,7 +871,6 @@ int uc_add_command(char *name, size_t name_len, const char *rep, uint32_t argt,
FUNC_ATTR_NONNULL_ARG(1, 3)
{
ucmd_T *cmd = NULL;
- int i;
int cmp = 1;
char *rep_buf = NULL;
garray_T *gap;
@@ -899,12 +891,12 @@ int uc_add_command(char *name, size_t name_len, const char *rep, uint32_t argt,
gap = &ucmds;
}
+ int i;
+
// Search for the command in the already defined commands.
for (i = 0; i < gap->ga_len; i++) {
- size_t len;
-
cmd = USER_CMD_GA(gap, i);
- len = strlen(cmd->uc_name);
+ size_t len = strlen(cmd->uc_name);
cmp = strncmp(name, cmd->uc_name, name_len);
if (cmp == 0) {
if (name_len < len) {
@@ -980,9 +972,7 @@ fail:
/// ":command ..."
void ex_command(exarg_T *eap)
{
- char *name;
char *end;
- char *p;
uint32_t argt = 0;
long def = -1;
int flags = 0;
@@ -990,9 +980,8 @@ void ex_command(exarg_T *eap)
char *compl_arg = NULL;
cmd_addr_T addr_type_arg = ADDR_NONE;
int has_attr = (eap->arg[0] == '-');
- size_t name_len;
- p = eap->arg;
+ char *p = eap->arg;
// Check for attributes
while (*p == '-') {
@@ -1006,13 +995,13 @@ void ex_command(exarg_T *eap)
}
// Get the name (if any) and skip to the following argument.
- name = p;
+ char *name = p;
end = uc_validate_name(name);
if (!end) {
emsg(_("E182: Invalid command name"));
goto theend;
}
- name_len = (size_t)(end - name);
+ size_t name_len = (size_t)(end - name);
// If there is nothing after the name, and no attributes were specified,
// we are listing commands
@@ -1065,7 +1054,6 @@ void ex_delcommand(exarg_T *eap)
int i = 0;
ucmd_T *cmd = NULL;
int res = -1;
- garray_T *gap;
const char *arg = eap->arg;
bool buffer_only = false;
@@ -1074,7 +1062,7 @@ void ex_delcommand(exarg_T *eap)
arg = skipwhite(arg + 7);
}
- gap = &curbuf->b_ucmds;
+ garray_T *gap = &curbuf->b_ucmds;
while (true) {
for (i = 0; i < gap->ga_len; i++) {
cmd = USER_CMD_GA(gap, i);
@@ -1153,15 +1141,10 @@ bool uc_split_args_iter(const char *arg, size_t arglen, size_t *end, char *buf,
static char *uc_split_args(const char *arg, char **args, const size_t *arglens, size_t argc,
size_t *lenp)
{
- char *buf;
- const char *p;
- char *q;
- int len;
-
// Precalculate length
- len = 2; // Initial and final quotes
+ int len = 2; // Initial and final quotes
if (args == NULL) {
- p = arg;
+ const char *p = arg;
while (*p) {
if (p[0] == '\\' && p[1] == '\\') {
@@ -1188,7 +1171,7 @@ static char *uc_split_args(const char *arg, char **args, const size_t *arglens,
}
} else {
for (size_t i = 0; i < argc; i++) {
- p = args[i];
+ const char *p = args[i];
const char *arg_end = args[i] + arglens[i];
while (p < arg_end) {
@@ -1209,13 +1192,13 @@ static char *uc_split_args(const char *arg, char **args, const size_t *arglens,
}
}
- buf = xmalloc((size_t)len + 1);
+ char *buf = xmalloc((size_t)len + 1);
- q = buf;
+ char *q = buf;
*q++ = '"';
if (args == NULL) {
- p = arg;
+ const char *p = arg;
while (*p) {
if (p[0] == '\\' && p[1] == '\\') {
*q++ = '\\';
@@ -1242,7 +1225,7 @@ static char *uc_split_args(const char *arg, char **args, const size_t *arglens,
}
} else {
for (size_t i = 0; i < argc; i++) {
- p = args[i];
+ const char *p = args[i];
const char *arg_end = args[i] + arglens[i];
while (p < arg_end) {
@@ -1622,14 +1605,7 @@ static size_t uc_check_code(char *code, size_t len, char *buf, ucmd_T *cmd, exar
int do_ucmd(exarg_T *eap, bool preview)
{
- char *buf;
- char *p;
- char *q;
-
- char *start;
char *end = NULL;
- char *ksp;
- size_t len, totlen;
size_t split_len = 0;
char *split_buf = NULL;
@@ -1654,18 +1630,19 @@ int do_ucmd(exarg_T *eap, bool preview)
// Replace <> in the command by the arguments.
// First round: "buf" is NULL, compute length, allocate "buf".
// Second round: copy result into "buf".
- buf = NULL;
+ char *buf = NULL;
while (true) {
- p = cmd->uc_rep; // source
- q = buf; // destination
- totlen = 0;
+ char *p = cmd->uc_rep; // source
+ char *q = buf; // destination
+ size_t totlen = 0;
while (true) {
- start = vim_strchr(p, '<');
+ char *start = vim_strchr(p, '<');
if (start != NULL) {
end = vim_strchr(start + 1, '>');
}
if (buf != NULL) {
+ char *ksp;
for (ksp = p; *ksp != NUL && (uint8_t)(*ksp) != K_SPECIAL; ksp++) {}
if ((uint8_t)(*ksp) == K_SPECIAL
&& (start == NULL || ksp < start || end == NULL)
@@ -1673,7 +1650,7 @@ int do_ucmd(exarg_T *eap, bool preview)
// K_SPECIAL has been put in the buffer as K_SPECIAL
// KS_SPECIAL KE_FILLER, like for mappings, but
// do_cmdline() doesn't handle that, so convert it back.
- len = (size_t)(ksp - p);
+ size_t len = (size_t)(ksp - p);
if (len > 0) {
memmove(q, p, len);
q += len;
@@ -1693,7 +1670,7 @@ int do_ucmd(exarg_T *eap, bool preview)
end++;
// Take everything up to the '<'
- len = (size_t)(start - p);
+ size_t len = (size_t)(start - p);
if (buf == NULL) {
totlen += len;
} else {
diff --git a/src/nvim/version.h b/src/nvim/version.h
index 484350edee..e0c7b76700 100644
--- a/src/nvim/version.h
+++ b/src/nvim/version.h
@@ -7,6 +7,9 @@
// defined in version.c
extern char *Version;
extern char *longVersion;
+#ifndef NDEBUG
+extern char *version_cflags;
+#endif
//
// Vim version number, name, etc. Patchlevel is defined in version.c.
diff --git a/src/nvim/window.c b/src/nvim/window.c
index e72c32700d..c0d399c4c4 100644
--- a/src/nvim/window.c
+++ b/src/nvim/window.c
@@ -965,7 +965,13 @@ void ui_ext_win_position(win_T *wp, bool validate)
if (c.relative == kFloatRelativeWindow) {
Error dummy = ERROR_INIT;
win_T *win = find_window_by_handle(c.window, &dummy);
- if (win) {
+ api_clear_error(&dummy);
+ if (win != NULL) {
+ // When a floating window is anchored to another window,
+ // update the position of its anchored window first.
+ if (win->w_pos_changed && win->w_grid_alloc.chars != NULL && win_valid(win)) {
+ ui_ext_win_position(win, validate);
+ }
grid = &win->w_grid;
int row_off = 0, col_off = 0;
grid_adjust(&grid, &row_off, &col_off);
@@ -979,7 +985,6 @@ void ui_ext_win_position(win_T *wp, bool validate)
col += tcol - 1;
}
}
- api_clear_error(&dummy);
}
wp->w_grid_alloc.zindex = wp->w_float_config.zindex;
@@ -1884,6 +1889,10 @@ static void win_exchange(int Prenum)
beep_flush();
return;
}
+ if (text_or_buf_locked()) {
+ beep_flush();
+ return;
+ }
frame_T *frp;
@@ -5167,7 +5176,6 @@ static void win_free(win_T *wp, tabpage_T *tp)
alist_unlink(wp->w_alist);
// Don't execute autocommands while the window is halfway being deleted.
- // gui_mch_destroy_scrollbar() may trigger a FocusGained event.
block_autocmds();
clear_winopt(&wp->w_onebuf_opt);