aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xclint.py257
-rw-r--r--src/nvim/shada.c303
2 files changed, 371 insertions, 189 deletions
diff --git a/clint.py b/clint.py
index 0eaab6e3f6..d507837e58 100755
--- a/clint.py
+++ b/clint.py
@@ -198,6 +198,8 @@ _ERROR_CATEGORIES = [
'runtime/printf',
'runtime/printf_format',
'runtime/threadsafe_fn',
+ 'syntax/parenthesis',
+ 'whitespace/alignment',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
@@ -213,7 +215,7 @@ _ERROR_CATEGORIES = [
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
- 'whitespace/todo'
+ 'whitespace/todo',
]
# The default state of the category filter. This is overrided by the --filter=
@@ -826,9 +828,9 @@ def Error(filename, linenum, category, confidence, message):
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
-_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
+_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"([^"]*)"')
# Matches characters. Escape codes should already be removed by ESCAPES.
-_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
+_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'(.)'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
@@ -923,39 +925,48 @@ def CleanseComments(line):
class CleansedLines(object):
- """Holds 3 copies of all lines with different preprocessing applied to them.
+ """Holds 5 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
- 3) raw_lines member contains all the lines without processing.
+ 3) raw_lines member contains all the lines with multiline comments replaced.
+ 4) init_lines member contains all the lines without processing.
+ 5) elided_with_space_strings is like elided, but with string literals
+ looking like `" "`.
All these three members are of <type 'list'>, and of the same length.
"""
- def __init__(self, lines):
+ def __init__(self, lines, init_lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
+ self.init_lines = init_lines
self.lines_without_raw_strings = lines
+ self.elided_with_space_strings = []
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(
self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
+ elided = CleanseComments(self._CollapseStrings(
+ self.lines_without_raw_strings[linenum], True))
+ self.elided_with_space_strings.append(elided)
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
- def _CollapseStrings(elided):
+ def _CollapseStrings(elided, keep_spaces=False):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
+ keep_spaces: If true, collapse to
Returns:
The line with collapsed strings.
@@ -964,12 +975,75 @@ class CleansedLines(object):
# Remove escaped characters first to make quote/single quote
# collapsing basic. Things that look like escaped characters
# shouldn't occur outside of strings and chars.
- elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
- elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
- elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub(
+ '' if not keep_spaces else lambda m: ' ' * len(m.group(0)),
+ elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub(
+ "''" if not keep_spaces
+ else lambda m: "'" + (' ' * len(m.group(1))) + "'",
+ elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub(
+ '""' if not keep_spaces
+ else lambda m: '"' + (' ' * len(m.group(1))) + '"',
+ elided)
return elided
+BRACES = {
+ '(': ')',
+ '{': '}',
+ '[': ']',
+ # '<': '>', C++-specific pair removed
+}
+
+
+CLOSING_BRACES = dict(((v, k) for k, v in BRACES.items()))
+
+
+def GetExprBracesPosition(clean_lines, linenum, pos):
+ """List positions of all kinds of braces
+
+ If input points to ( or { or [ then function proceeds until finding the
+ position which closes it.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ pos: A position on the line.
+
+ Yields:
+ A tuple (linenum, pos, brace, depth) that points to each brace.
+ Additionally each new line (linenum, pos, 's', depth) is yielded, for each
+ line end (linenum, pos, 'e', depth) is yielded and at the very end it
+ yields (linenum, pos, None, None).
+ """
+ depth = 0
+ yielded_line_start = True
+ startpos = pos
+ while linenum < clean_lines.NumLines() - 1:
+ line = clean_lines.elided_with_space_strings[linenum]
+ if not line.startswith('#') or yielded_line_start:
+ # Ignore #ifdefs, but not if it is macros that are checked
+ for i, brace in enumerate(line[startpos:]):
+ pos = i + startpos
+ if brace != ' ' and not yielded_line_start:
+ yield (linenum, pos, 's', depth)
+ yielded_line_start = True
+ if brace in BRACES:
+ depth += 1
+ yield (linenum, pos, brace, depth)
+ elif brace in CLOSING_BRACES:
+ yield (linenum, pos, brace, depth)
+ depth -= 1
+ if depth == 0:
+ yield (linenum, pos, None, None)
+ return
+ yield (linenum, len(line) - 1, 'e', depth)
+ yielded_line_start = False
+ startpos = 0
+ linenum += 1
+
+
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
"""Find the position just after the matching endchar.
@@ -995,9 +1069,9 @@ def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
def CloseExpression(clean_lines, linenum, pos):
- """If input points to ( or { or [ or <, finds the position that closes it.
+ """If input points to ( or { or [, finds the position that closes it.
- If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
+ If lines[linenum][pos] points to a '(' or '{' or '[', finds the
linenum/pos that correspond to the closing of the expression.
Args:
@@ -1014,16 +1088,9 @@ def CloseExpression(clean_lines, linenum, pos):
line = clean_lines.elided[linenum]
startchar = line[pos]
- if startchar not in '({[<':
+ if startchar not in BRACES:
return (line, clean_lines.NumLines(), -1)
- if startchar == '(':
- endchar = ')'
- if startchar == '[':
- endchar = ']'
- if startchar == '{':
- endchar = '}'
- if startchar == '<':
- endchar = '>'
+ endchar = BRACES[startchar]
# Check first line
(end_pos, num_open) = FindEndOfExpressionInLine(
@@ -1300,6 +1367,23 @@ def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
'Use C++11 raw strings or concatenation instead.')
+def CheckForOldStyleComments(filename, line, linenum, error):
+ """Logs an error if we see /*-style comment
+
+ Args:
+ filename: The name of the current file.
+ line: The text of the line to check.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ if line.find('/*') >= 0 and line[-1] != '\\':
+ error(filename, linenum, 'readability/old_style_comment', 5,
+ '/*-style comment found, it should be replaced with //-style. '
+ '/*-style comments are only allowed inside macros. '
+ 'Note that you should not use /*-style comments to document '
+ 'macros itself, use doxygen-style comments for this.')
+
+
threading_list = (
('asctime(', 'os_asctime_r('),
('ctime(', 'os_ctime_r('),
@@ -1968,6 +2052,92 @@ def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
return False
+def CheckExpressionAlignment(filename, clean_lines, linenum, error, startpos=0):
+ """Checks for the correctness of alignment inside expressions
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ startpos: Position where to start searching for expression start.
+ """
+ level_starts = {}
+ line = clean_lines.elided_with_space_strings[linenum]
+ prev_line_start = Search(r'\S', line).start()
+ depth_line_starts = {}
+ pos = min([
+ idx
+ for idx in (
+ line.find(k, startpos)
+ for k in BRACES
+ if k != '{'
+ )
+ if idx >= 0
+ ] + [len(line) + 1])
+ if pos == len(line) + 1:
+ return
+ ignore_error_levels = set()
+ firstlinenum = linenum
+ for linenum, pos, brace, depth in GetExprBracesPosition(
+ clean_lines, linenum, pos
+ ):
+ line = clean_lines.elided_with_space_strings[linenum]
+ if depth is None:
+ if pos < len(line) - 1:
+ CheckExpressionAlignment(filename, clean_lines, linenum, error,
+ pos + 1)
+ return
+ elif depth <= 0:
+ error(filename, linenum, 'syntax/parenthesis', 4,
+ 'Unbalanced parenthesis')
+ return
+ if brace == 's':
+ assert firstlinenum != linenum
+ if level_starts[depth][1]:
+ if line[pos] == BRACES[depth_line_starts[depth][1]]:
+ if pos != depth_line_starts[depth][0]:
+ if depth not in ignore_error_levels:
+ error(filename, linenum, 'whitespace/indent', 2,
+ 'End of the inner expression should have '
+ 'the same indent as start')
+ else:
+ if (pos != depth_line_starts[depth][0] + 4
+ and not (depth_line_starts[depth][1] == '{'
+ and pos == depth_line_starts[depth][0] + 2)):
+ if depth not in ignore_error_levels:
+ error(filename, linenum, 'whitespace/indent', 2,
+ 'Inner expression indentation should be 4')
+ else:
+ if (pos != level_starts[depth][0] + 1
+ + (level_starts[depth][2] == '{')):
+ if depth not in ignore_error_levels:
+ error(filename, linenum, 'whitespace/alignment', 2,
+ 'Inner expression should be aligned '
+ 'as opening brace + 1 (+ 2 in case of {)')
+ prev_line_start = pos
+ elif brace == 'e':
+ pass
+ else:
+ opening = brace in BRACES
+ if opening:
+ # Only treat {} as part of the expression if it is preceded by
+ # "=" (brace initializer) or "(type)" (construct like (struct
+ # foo) { ... }).
+ if brace == '{' and not (Search(
+ r'(?:= *|\((?:struct )?\w+(\s*\[\w*\])?\)) *$',
+ line[:pos])
+ ):
+ ignore_error_levels.add(depth)
+ line_ended_with_opening = (
+ pos == len(line) - 2 * (line.endswith(' \\')) - 1)
+ level_starts[depth] = (pos, line_ended_with_opening, brace)
+ if line_ended_with_opening:
+ depth_line_starts[depth] = (prev_line_start, brace)
+ else:
+ del level_starts[depth]
+
+
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
@@ -1975,7 +2145,8 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
- after public/protected/private, don't have too many blank lines in a row.
+ after public/protected/private, don't have too many blank lines in a row,
+ spaces after {, spaces before }.
Args:
filename: The name of the current file.
@@ -2236,6 +2407,10 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
+ # Check whether everything inside expressions is aligned correctly
+ if any((line.find(k) >= 0 for k in BRACES if k != '{')):
+ CheckExpressionAlignment(filename, clean_lines, linenum, error)
+
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
@@ -2292,8 +2467,6 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
- # There's a special case for "for" since the style guide allows space before
- # the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
@@ -2301,12 +2474,18 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty'
' statement, use {} instead.')
- elif (Search(r'\s+;\s*$', line) and
- not Search(r'\bfor\b', line)):
+ elif Search(r'\s+;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
+ if Search(r'\{(?!\})\S', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space after {')
+ if Search(r'\S(?<!\{)\}', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before }')
+
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
@@ -2361,11 +2540,27 @@ def CheckBraces(filename, clean_lines, linenum, error):
' of the previous line')
# An else clause should be on the same line as the preceding closing brace.
+ # If there is no preceding closing brace, there should be one.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
+ else:
+ error(filename, linenum, 'readability/braces', 5,
+ 'An else should always have braces before it')
+
+ # If should always have a brace
+ for blockstart in ('if', 'while', 'for'):
+ if Match(r'\s*{0}[^{{]*$'.format(blockstart), line):
+ pos = line.find(blockstart)
+ pos = line.find('(', pos)
+ if pos > 0:
+ (endline, _, endpos) = CloseExpression(
+ clean_lines, linenum, pos)
+ if endline[endpos:].find('{') == -1:
+ error(filename, linenum, 'readability/braces', 5,
+ '{0} should always use braces'.format(blockstart))
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
@@ -3026,12 +3221,14 @@ def ProcessLine(filename, file_extension, clean_lines, line,
arguments : filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
+ init_lines = clean_lines.init_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
+ CheckForOldStyleComments(filename, init_lines[line], line, error)
CheckStyle(
filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
@@ -3072,12 +3269,12 @@ def ProcessFileData(filename, file_extension, lines, error,
for line in range(1, len(lines)):
ParseKnownErrorSuppressions(filename, lines, line)
- if _cpplint_state.record_errors_file:
- raw_lines = lines[:]
+ init_lines = lines[:]
+ if _cpplint_state.record_errors_file:
def RecordedError(filename, linenum, category, confidence, message):
if not IsErrorSuppressedByNolint(category, linenum):
- key = raw_lines[linenum - 1 if linenum else 0:linenum + 2]
+ key = init_lines[linenum - 1 if linenum else 0:linenum + 2]
err = [filename, key, category]
json.dump(err, _cpplint_state.record_errors_file)
_cpplint_state.record_errors_file.write('\n')
@@ -3089,7 +3286,7 @@ def ProcessFileData(filename, file_extension, lines, error,
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
- clean_lines = CleansedLines(lines)
+ clean_lines = CleansedLines(lines, init_lines)
for line in range(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
diff --git a/src/nvim/shada.c b/src/nvim/shada.c
index 20fe27c370..340c14066a 100644
--- a/src/nvim/shada.c
+++ b/src/nvim/shada.c
@@ -205,11 +205,11 @@ enum SRNIFlags {
kSDReadHeader = (1 << kSDItemHeader), ///< Determines whether header should
///< be read (it is usually ignored).
kSDReadUndisableableData = (
- (1 << kSDItemSearchPattern)
- | (1 << kSDItemSubString)
- | (1 << kSDItemJump)), ///< Data reading which cannot be disabled by &shada
- ///< or other options except for disabling reading
- ///< ShaDa as a whole.
+ (1 << kSDItemSearchPattern)
+ | (1 << kSDItemSubString)
+ | (1 << kSDItemJump)), ///< Data reading which cannot be disabled by
+ ///< &shada or other options except for disabling
+ ///< reading ShaDa as a whole.
kSDReadRegisters = (1 << kSDItemRegister), ///< Determines whether registers
///< should be read (may only be
///< disabled when writing, but
@@ -446,7 +446,7 @@ typedef struct sd_write_def {
.attr = { __VA_ARGS__ } \
} \
}
-#define DEFAULT_POS {1, 0, 0}
+#define DEFAULT_POS { 1, 0, 0 }
static const pos_T default_pos = DEFAULT_POS;
static const ShadaEntry sd_default_values[] = {
[kSDItemMissing] = { .type = kSDItemMissing, .timestamp = 0 },
@@ -533,11 +533,14 @@ static inline void hmll_init(HMLList *const hmll, const size_t size)
///
/// @param hmll Pointer to the list.
/// @param cur_entry Name of the variable to iterate over.
+/// @param code Code to execute on each iteration.
///
/// @return `for` cycle header (use `HMLL_FORALL(hmll, cur_entry) {body}`).
-#define HMLL_FORALL(hmll, cur_entry) \
+#define HMLL_FORALL(hmll, cur_entry, code) \
for (HMLListEntry *cur_entry = (hmll)->first; cur_entry != NULL; \
- cur_entry = cur_entry->next)
+ cur_entry = cur_entry->next) { \
+ code \
+ } \
/// Remove entry from the linked list
///
@@ -633,11 +636,14 @@ static inline void hmll_insert(HMLList *const hmll,
/// @param hmll Pointer to the list.
/// @param cur_entry Name of the variable to iterate over, must be already
/// defined.
+/// @param code Code to execute on each iteration.
///
/// @return `for` cycle header (use `HMLL_FORALL(hmll, cur_entry) {body}`).
-#define HMLL_ITER_BACK(hmll, cur_entry) \
+#define HMLL_ITER_BACK(hmll, cur_entry, code) \
for (cur_entry = (hmll)->last; cur_entry != NULL; \
- cur_entry = cur_entry->prev)
+ cur_entry = cur_entry->prev) { \
+ code \
+ }
/// Free linked list
///
@@ -959,11 +965,11 @@ static int shada_read_file(const char *const file, const int flags)
if (p_verbose > 0) {
verbose_enter();
smsg(_("Reading ShaDa file \"%s\"%s%s%s"),
- fname,
- (flags & kShaDaWantInfo) ? _(" info") : "",
- (flags & kShaDaWantMarks) ? _(" marks") : "",
- (flags & kShaDaGetOldfiles) ? _(" oldfiles") : "",
- of_ret != 0 ? _(" FAILED") : "");
+ fname,
+ (flags & kShaDaWantInfo) ? _(" info") : "",
+ (flags & kShaDaWantMarks) ? _(" marks") : "",
+ (flags & kShaDaGetOldfiles) ? _(" oldfiles") : "",
+ of_ret != 0 ? _(" FAILED") : "");
verbose_leave();
}
@@ -1011,8 +1017,8 @@ static const void *shada_hist_iter(const void *const iter,
.histtype = history_type,
.string = (char *) hist_he.hisstr,
.sep = (char) (history_type == HIST_SEARCH
- ? (char) hist_he.hisstr[STRLEN(hist_he.hisstr) + 1]
- : 0),
+ ? (char) hist_he.hisstr[STRLEN(hist_he.hisstr) + 1]
+ : 0),
.additional_elements = hist_he.additional_elements,
}
}
@@ -1074,11 +1080,11 @@ static void hms_insert(HistoryMergerState *const hms_p, const ShadaEntry entry,
}
}
HMLListEntry *insert_after;
- HMLL_ITER_BACK(hmll, insert_after) {
+ HMLL_ITER_BACK(hmll, insert_after, {
if (insert_after->data.timestamp <= entry.timestamp) {
break;
}
- }
+ })
hmll_insert(hmll, insert_after, entry, can_free_entry);
}
@@ -1136,14 +1142,14 @@ static inline void hms_to_he_array(const HistoryMergerState *const hms_p,
FUNC_ATTR_NONNULL_ALL
{
histentry_T *hist = hist_array;
- HMLL_FORALL(&hms_p->hmll, cur_entry) {
+ HMLL_FORALL(&hms_p->hmll, cur_entry, {
hist->timestamp = cur_entry->data.timestamp;
hist->hisnum = (int) (hist - hist_array) + 1;
hist->hisstr = (char_u *) cur_entry->data.data.history_item.string;
hist->additional_elements =
cur_entry->data.data.history_item.additional_elements;
hist++;
- }
+ })
*new_hisnum = (int) (hist - hist_array);
*new_hisidx = *new_hisnum - 1;
}
@@ -1161,10 +1167,11 @@ static inline void hms_dealloc(HistoryMergerState *const hms_p)
///
/// @param[in] hms_p Merger structure to iterate over.
/// @param[out] cur_entry Name of the iterator variable.
+/// @param code Code to execute on each iteration.
///
/// @return for cycle header. Use `HMS_ITER(hms_p, cur_entry) {body}`.
-#define HMS_ITER(hms_p, cur_entry) \
- HMLL_FORALL(&((hms_p)->hmll), cur_entry)
+#define HMS_ITER(hms_p, cur_entry, code) \
+ HMLL_FORALL(&((hms_p)->hmll), cur_entry, code)
/// Find buffer for given buffer name (cached)
///
@@ -1341,18 +1348,18 @@ static void shada_read(ShaDaReadDef *const sd_reader, const int flags)
(cur_entry.data.search_pattern.is_substitute_pattern
? &set_substitute_pattern
: &set_search_pattern)((SearchPattern) {
- .magic = cur_entry.data.search_pattern.magic,
- .no_scs = !cur_entry.data.search_pattern.smartcase,
- .off = {
- .dir = cur_entry.data.search_pattern.search_backward ? '?' : '/',
- .line = cur_entry.data.search_pattern.has_line_offset,
- .end = cur_entry.data.search_pattern.place_cursor_at_end,
- .off = cur_entry.data.search_pattern.offset,
- },
- .pat = (char_u *) cur_entry.data.search_pattern.pat,
- .additional_data = cur_entry.data.search_pattern.additional_data,
- .timestamp = cur_entry.timestamp,
- });
+ .magic = cur_entry.data.search_pattern.magic,
+ .no_scs = !cur_entry.data.search_pattern.smartcase,
+ .off = {
+ .dir = cur_entry.data.search_pattern.search_backward ? '?' : '/',
+ .line = cur_entry.data.search_pattern.has_line_offset,
+ .end = cur_entry.data.search_pattern.place_cursor_at_end,
+ .off = cur_entry.data.search_pattern.offset,
+ },
+ .pat = (char_u *) cur_entry.data.search_pattern.pat,
+ .additional_data = cur_entry.data.search_pattern.additional_data,
+ .timestamp = cur_entry.timestamp,
+ });
if (cur_entry.data.search_pattern.is_last_used) {
set_last_used_pattern(
cur_entry.data.search_pattern.is_substitute_pattern);
@@ -2430,13 +2437,13 @@ static ShaDaWriteResult shada_write(ShaDaWriteDef *const sd_writer,
}
const unsigned srni_flags = (unsigned) (
- kSDReadUndisableableData
- | kSDReadUnknown
- | (dump_history ? kSDReadHistory : 0)
- | (dump_registers ? kSDReadRegisters : 0)
- | (dump_global_vars ? kSDReadVariables : 0)
- | (dump_global_marks ? kSDReadGlobalMarks : 0)
- | (num_marked_files ? kSDReadLocalMarks | kSDReadChanges : 0));
+ kSDReadUndisableableData
+ | kSDReadUnknown
+ | (dump_history ? kSDReadHistory : 0)
+ | (dump_registers ? kSDReadRegisters : 0)
+ | (dump_global_vars ? kSDReadVariables : 0)
+ | (dump_global_marks ? kSDReadGlobalMarks : 0)
+ | (num_marked_files ? kSDReadLocalMarks | kSDReadChanges : 0));
msgpack_packer *const packer = msgpack_packer_new(sd_writer,
&msgpack_sd_writer_write);
@@ -2895,16 +2902,16 @@ static ShaDaWriteResult shada_write(ShaDaWriteDef *const sd_writer,
for (size_t i = 0; i < HIST_COUNT; i++) {
if (dump_one_history[i]) {
hms_insert_whole_neovim_history(&wms->hms[i]);
- HMS_ITER(&wms->hms[i], cur_entry) {
+ HMS_ITER(&wms->hms[i], cur_entry, {
if (!shada_pack_encoded_entry(
- packer, &sd_writer->sd_conv, (PossiblyFreedShadaEntry) {
- .data = cur_entry->data,
- .can_free_entry = cur_entry->can_free_entry,
- }, max_kbyte)) {
+ packer, &sd_writer->sd_conv, (PossiblyFreedShadaEntry) {
+ .data = cur_entry->data,
+ .can_free_entry = cur_entry->can_free_entry,
+ }, max_kbyte)) {
ret = kSDWriteFailed;
break;
}
- }
+ })
hms_dealloc(&wms->hms[i]);
if (ret == kSDWriteFailed) {
goto shada_write_exit;
@@ -3355,8 +3362,8 @@ static inline char *get_converted_string(const vimconv_T *const sd_conv,
entry_name " entry at position %" PRIu64 " " \
error_desc
#define CHECK_KEY(key, expected) ( \
- key.via.str.size == sizeof(expected) - 1 \
- && STRNCMP(key.via.str.ptr, expected, sizeof(expected) - 1) == 0)
+ key.via.str.size == sizeof(expected) - 1 \
+ && STRNCMP(key.via.str.ptr, expected, sizeof(expected) - 1) == 0)
#define CLEAR_GA_AND_ERROR_OUT(ga) \
do { \
ga_clear(&ga); \
@@ -3379,18 +3386,17 @@ static inline char *get_converted_string(const vimconv_T *const sd_conv,
tgt = proc(obj.via.attr); \
} while (0)
#define CHECK_KEY_IS_STR(entry_name) \
- do { \
- if (unpacked.data.via.map.ptr[i].key.type != MSGPACK_OBJECT_STR) { \
- emsgu(_(READERR(entry_name, "has key which is not a string")), \
- initial_fpos); \
- CLEAR_GA_AND_ERROR_OUT(ad_ga); \
- } else if (unpacked.data.via.map.ptr[i].key.via.str.size == 0) { \
- emsgu(_(READERR(entry_name, "has empty key")), initial_fpos); \
- CLEAR_GA_AND_ERROR_OUT(ad_ga); \
- } \
- } while (0)
+ if (unpacked.data.via.map.ptr[i].key.type != MSGPACK_OBJECT_STR) { \
+ emsgu(_(READERR(entry_name, "has key which is not a string")), \
+ initial_fpos); \
+ CLEAR_GA_AND_ERROR_OUT(ad_ga); \
+ } else if (unpacked.data.via.map.ptr[i].key.via.str.size == 0) { \
+ emsgu(_(READERR(entry_name, "has empty key")), initial_fpos); \
+ CLEAR_GA_AND_ERROR_OUT(ad_ga); \
+ }
#define CHECKED_KEY(entry_name, name, error_desc, tgt, condition, attr, proc) \
- if (CHECK_KEY(unpacked.data.via.map.ptr[i].key, name)) { \
+ else if (CHECK_KEY( /* NOLINT(readability/braces) */ \
+ unpacked.data.via.map.ptr[i].key, name)) { \
CHECKED_ENTRY( \
condition, "has " name " key value " error_desc, \
entry_name, unpacked.data.via.map.ptr[i].val, \
@@ -3410,17 +3416,17 @@ static inline char *get_converted_string(const vimconv_T *const sd_conv,
#define INT_KEY(entry_name, name, tgt, proc) \
CHECKED_KEY( \
entry_name, name, "which is not an integer", tgt, \
- (unpacked.data.via.map.ptr[i].val.type \
- == MSGPACK_OBJECT_POSITIVE_INTEGER \
- || unpacked.data.via.map.ptr[i].val.type \
- == MSGPACK_OBJECT_NEGATIVE_INTEGER), \
+ ((unpacked.data.via.map.ptr[i].val.type \
+ == MSGPACK_OBJECT_POSITIVE_INTEGER) \
+ || (unpacked.data.via.map.ptr[i].val.type \
+ == MSGPACK_OBJECT_NEGATIVE_INTEGER)), \
i64, proc)
#define INTEGER_KEY(entry_name, name, tgt) \
INT_KEY(entry_name, name, tgt, TOINT)
#define LONG_KEY(entry_name, name, tgt) \
INT_KEY(entry_name, name, tgt, TOLONG)
#define ADDITIONAL_KEY \
- { \
+ else { /* NOLINT(readability/braces) */ \
ga_grow(&ad_ga, 1); \
memcpy(((char *)ad_ga.ga_data) + ((size_t) ad_ga.ga_len \
* sizeof(*unpacked.data.via.map.ptr)), \
@@ -3429,9 +3435,9 @@ static inline char *get_converted_string(const vimconv_T *const sd_conv,
ad_ga.ga_len++; \
}
#define CONVERTED(str, len) ( \
- sd_reader->sd_conv.vc_type != CONV_NONE \
- ? get_converted_string(&sd_reader->sd_conv, (str), (len)) \
- : xmemdupz((str), (len)))
+ sd_reader->sd_conv.vc_type != CONV_NONE \
+ ? get_converted_string(&sd_reader->sd_conv, (str), (len)) \
+ : xmemdupz((str), (len)))
#define BIN_CONVERTED(b) CONVERTED(b.ptr, b.size)
#define SET_ADDITIONAL_DATA(tgt, name) \
do { \
@@ -3625,38 +3631,28 @@ shada_read_next_item_start:
garray_T ad_ga;
ga_init(&ad_ga, sizeof(*(unpacked.data.via.map.ptr)), 1);
for (size_t i = 0; i < unpacked.data.via.map.size; i++) {
- CHECK_KEY_IS_STR("search pattern");
+ CHECK_KEY_IS_STR("search pattern")
BOOLEAN_KEY("search pattern", SEARCH_KEY_MAGIC,
entry->data.search_pattern.magic)
- else
- BOOLEAN_KEY("search pattern", SEARCH_KEY_SMARTCASE,
- entry->data.search_pattern.smartcase)
- else
- BOOLEAN_KEY("search pattern", SEARCH_KEY_HAS_LINE_OFFSET,
- entry->data.search_pattern.has_line_offset)
- else
- BOOLEAN_KEY("search pattern", SEARCH_KEY_PLACE_CURSOR_AT_END,
- entry->data.search_pattern.place_cursor_at_end)
- else
- BOOLEAN_KEY("search pattern", SEARCH_KEY_IS_LAST_USED,
- entry->data.search_pattern.is_last_used)
- else
- BOOLEAN_KEY("search pattern", SEARCH_KEY_IS_SUBSTITUTE_PATTERN,
- entry->data.search_pattern.is_substitute_pattern)
- else
- BOOLEAN_KEY("search pattern", SEARCH_KEY_HIGHLIGHTED,
- entry->data.search_pattern.highlighted)
- else
- BOOLEAN_KEY("search pattern", SEARCH_KEY_BACKWARD,
- entry->data.search_pattern.search_backward)
- else
- INTEGER_KEY("search pattern", SEARCH_KEY_OFFSET,
- entry->data.search_pattern.offset)
- else
- CONVERTED_STRING_KEY("search pattern", SEARCH_KEY_PAT,
- entry->data.search_pattern.pat)
- else
- ADDITIONAL_KEY
+ BOOLEAN_KEY("search pattern", SEARCH_KEY_SMARTCASE,
+ entry->data.search_pattern.smartcase)
+ BOOLEAN_KEY("search pattern", SEARCH_KEY_HAS_LINE_OFFSET,
+ entry->data.search_pattern.has_line_offset)
+ BOOLEAN_KEY("search pattern", SEARCH_KEY_PLACE_CURSOR_AT_END,
+ entry->data.search_pattern.place_cursor_at_end)
+ BOOLEAN_KEY("search pattern", SEARCH_KEY_IS_LAST_USED,
+ entry->data.search_pattern.is_last_used)
+ BOOLEAN_KEY("search pattern", SEARCH_KEY_IS_SUBSTITUTE_PATTERN,
+ entry->data.search_pattern.is_substitute_pattern)
+ BOOLEAN_KEY("search pattern", SEARCH_KEY_HIGHLIGHTED,
+ entry->data.search_pattern.highlighted)
+ BOOLEAN_KEY("search pattern", SEARCH_KEY_BACKWARD,
+ entry->data.search_pattern.search_backward)
+ INTEGER_KEY("search pattern", SEARCH_KEY_OFFSET,
+ entry->data.search_pattern.offset)
+ CONVERTED_STRING_KEY("search pattern", SEARCH_KEY_PAT,
+ entry->data.search_pattern.pat)
+ ADDITIONAL_KEY
}
if (entry->data.search_pattern.pat == NULL) {
emsgu(_(READERR("search pattern", "has no pattern")), initial_fpos);
@@ -3677,7 +3673,7 @@ shada_read_next_item_start:
garray_T ad_ga;
ga_init(&ad_ga, sizeof(*(unpacked.data.via.map.ptr)), 1);
for (size_t i = 0; i < unpacked.data.via.map.size; i++) {
- CHECK_KEY_IS_STR("mark");
+ CHECK_KEY_IS_STR("mark")
if (CHECK_KEY(unpacked.data.via.map.ptr[i].key, KEY_NAME_CHAR)) {
if (type_u64 == kSDItemJump || type_u64 == kSDItemChange) {
emsgu(_(READERR("mark", "has n key which is only valid for "
@@ -3690,15 +3686,11 @@ shada_read_next_item_start:
"has n key value which is not an unsigned integer",
"mark", unpacked.data.via.map.ptr[i].val,
entry->data.filemark.name, u64, TOCHAR);
- } else {
- LONG_KEY("mark", KEY_LNUM, entry->data.filemark.mark.lnum)
- else
- INTEGER_KEY("mark", KEY_COL, entry->data.filemark.mark.col)
- else
- STRING_KEY("mark", KEY_FILE, entry->data.filemark.fname)
- else
- ADDITIONAL_KEY
}
+ LONG_KEY("mark", KEY_LNUM, entry->data.filemark.mark.lnum)
+ INTEGER_KEY("mark", KEY_COL, entry->data.filemark.mark.col)
+ STRING_KEY("mark", KEY_FILE, entry->data.filemark.fname)
+ ADDITIONAL_KEY
}
if (entry->data.filemark.fname == NULL) {
emsgu(_(READERR("mark", "is missing file name")), initial_fpos);
@@ -3723,48 +3715,44 @@ shada_read_next_item_start:
garray_T ad_ga;
ga_init(&ad_ga, sizeof(*(unpacked.data.via.map.ptr)), 1);
for (size_t i = 0; i < unpacked.data.via.map.size; i++) {
- CHECK_KEY_IS_STR("register");
- TYPED_KEY("register", REG_KEY_TYPE, "an unsigned integer",
- entry->data.reg.type, POSITIVE_INTEGER, u64, TOU8)
- else
- TYPED_KEY("register", KEY_NAME_CHAR, "an unsigned integer",
- entry->data.reg.name, POSITIVE_INTEGER, u64, TOCHAR)
- else
- TYPED_KEY("register", REG_KEY_WIDTH, "an unsigned integer",
- entry->data.reg.width, POSITIVE_INTEGER, u64, TOSIZE)
- else
- if (CHECK_KEY(unpacked.data.via.map.ptr[i].key,
- REG_KEY_CONTENTS)) {
- if (unpacked.data.via.map.ptr[i].val.type != MSGPACK_OBJECT_ARRAY) {
- emsgu(_(READERR(
- "register",
- "has " REG_KEY_CONTENTS " key with non-array value")),
- initial_fpos);
- CLEAR_GA_AND_ERROR_OUT(ad_ga);
- }
- if (unpacked.data.via.map.ptr[i].val.via.array.size == 0) {
- emsgu(_(READERR("register",
- "has " REG_KEY_CONTENTS " key with empty array")),
- initial_fpos);
+ CHECK_KEY_IS_STR("register")
+ if (CHECK_KEY(unpacked.data.via.map.ptr[i].key,
+ REG_KEY_CONTENTS)) {
+ if (unpacked.data.via.map.ptr[i].val.type != MSGPACK_OBJECT_ARRAY) {
+ emsgu(_(READERR("register",
+ "has " REG_KEY_CONTENTS
+ " key with non-array value")),
+ initial_fpos);
+ CLEAR_GA_AND_ERROR_OUT(ad_ga);
+ }
+ if (unpacked.data.via.map.ptr[i].val.via.array.size == 0) {
+ emsgu(_(READERR("register",
+ "has " REG_KEY_CONTENTS " key with empty array")),
+ initial_fpos);
+ CLEAR_GA_AND_ERROR_OUT(ad_ga);
+ }
+ const msgpack_object_array arr =
+ unpacked.data.via.map.ptr[i].val.via.array;
+ for (size_t i = 0; i < arr.size; i++) {
+ if (arr.ptr[i].type != MSGPACK_OBJECT_BIN) {
+ emsgu(_(READERR("register", "has " REG_KEY_CONTENTS " array "
+ "with non-binary value")), initial_fpos);
CLEAR_GA_AND_ERROR_OUT(ad_ga);
}
- const msgpack_object_array arr =
- unpacked.data.via.map.ptr[i].val.via.array;
- for (size_t i = 0; i < arr.size; i++) {
- if (arr.ptr[i].type != MSGPACK_OBJECT_BIN) {
- emsgu(_(READERR("register", "has " REG_KEY_CONTENTS " array "
- "with non-binary value")), initial_fpos);
- CLEAR_GA_AND_ERROR_OUT(ad_ga);
- }
- }
- entry->data.reg.contents_size = arr.size;
- entry->data.reg.contents = xmalloc(arr.size * sizeof(char *));
- for (size_t i = 0; i < arr.size; i++) {
- entry->data.reg.contents[i] = BIN_CONVERTED(arr.ptr[i].via.bin);
- }
- } else {
- ADDITIONAL_KEY
}
+ entry->data.reg.contents_size = arr.size;
+ entry->data.reg.contents = xmalloc(arr.size * sizeof(char *));
+ for (size_t i = 0; i < arr.size; i++) {
+ entry->data.reg.contents[i] = BIN_CONVERTED(arr.ptr[i].via.bin);
+ }
+ }
+ TYPED_KEY("register", REG_KEY_TYPE, "an unsigned integer",
+ entry->data.reg.type, POSITIVE_INTEGER, u64, TOU8)
+ TYPED_KEY("register", KEY_NAME_CHAR, "an unsigned integer",
+ entry->data.reg.name, POSITIVE_INTEGER, u64, TOCHAR)
+ TYPED_KEY("register", REG_KEY_WIDTH, "an unsigned integer",
+ entry->data.reg.width, POSITIVE_INTEGER, u64, TOSIZE)
+ ADDITIONAL_KEY
}
if (entry->data.reg.contents == NULL) {
emsgu(_(READERR("register", "has missing " REG_KEY_CONTENTS " array")),
@@ -3832,8 +3820,8 @@ shada_read_next_item_hist_no_conv:
+ 1); // Separator character
entry->data.history_item.string = xmalloc(strsize);
memcpy(entry->data.history_item.string,
- unpacked.data.via.array.ptr[1].via.bin.ptr,
- unpacked.data.via.array.ptr[1].via.bin.size);
+ unpacked.data.via.array.ptr[1].via.bin.ptr,
+ unpacked.data.via.array.ptr[1].via.bin.size);
} else {
size_t len = unpacked.data.via.array.ptr[1].via.bin.size;
char *const converted = string_convert(
@@ -3953,17 +3941,14 @@ shada_read_next_item_hist_no_conv:
const size_t j = i;
{
for (size_t i = 0; i < unpacked.data.via.map.size; i++) {
- CHECK_KEY_IS_STR("buffer list entry");
+ CHECK_KEY_IS_STR("buffer list entry")
LONG_KEY("buffer list entry", KEY_LNUM,
- entry->data.buffer_list.buffers[j].pos.lnum)
- else
- INTEGER_KEY("buffer list entry", KEY_COL,
- entry->data.buffer_list.buffers[j].pos.col)
- else
- STRING_KEY("buffer list entry", KEY_FILE,
- entry->data.buffer_list.buffers[j].fname)
- else
- ADDITIONAL_KEY
+ entry->data.buffer_list.buffers[j].pos.lnum)
+ INTEGER_KEY("buffer list entry", KEY_COL,
+ entry->data.buffer_list.buffers[j].pos.col)
+ STRING_KEY("buffer list entry", KEY_FILE,
+ entry->data.buffer_list.buffers[j].fname)
+ ADDITIONAL_KEY
}
}
}