aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/.asan-blacklist3
-rw-r--r--src/.valgrind.supp16
-rw-r--r--src/Doxyfile1890
-rwxr-xr-xsrc/clint.py3544
-rw-r--r--src/nvim/CMakeLists.txt5
-rw-r--r--src/nvim/api/private/defs.h4
-rw-r--r--src/nvim/api/private/handle.c36
-rw-r--r--src/nvim/api/private/handle.h6
-rw-r--r--src/nvim/api/private/helpers.h94
-rw-r--r--src/nvim/api/vim.c24
-rw-r--r--src/nvim/buffer.c3
-rw-r--r--src/nvim/buffer.h16
-rw-r--r--src/nvim/buffer_defs.h20
-rw-r--r--src/nvim/charset.c4
-rw-r--r--src/nvim/edit.c2
-rw-r--r--src/nvim/eval.c347
-rw-r--r--src/nvim/eval/decode.c18
-rw-r--r--src/nvim/eval/encode.c622
-rw-r--r--src/nvim/eval/typval_encode.h563
-rw-r--r--src/nvim/event/defs.h26
-rw-r--r--src/nvim/event/libuv_process.c2
-rw-r--r--src/nvim/event/loop.h64
-rw-r--r--src/nvim/event/process.c12
-rw-r--r--src/nvim/event/process.h2
-rw-r--r--src/nvim/ex_docmd.c28
-rw-r--r--src/nvim/ex_docmd.h1
-rw-r--r--src/nvim/ex_getln.c2
-rw-r--r--src/nvim/file_search.c2
-rw-r--r--src/nvim/func_attr.h251
-rw-r--r--src/nvim/garray.h28
-rw-r--r--src/nvim/getchar.c7
-rw-r--r--src/nvim/globals.h1
-rw-r--r--src/nvim/lib/klist.h154
-rw-r--r--src/nvim/lib/kvec.h257
-rw-r--r--src/nvim/lib/queue.h158
-rw-r--r--src/nvim/lib/ringbuf.h388
-rw-r--r--src/nvim/log.c63
-rw-r--r--src/nvim/main.c13
-rw-r--r--src/nvim/main.h3
-rw-r--r--src/nvim/map.c160
-rw-r--r--src/nvim/map.h28
-rw-r--r--src/nvim/mbyte.c2
-rw-r--r--src/nvim/msgpack_rpc/channel.c30
-rw-r--r--src/nvim/msgpack_rpc/helpers.c66
-rw-r--r--src/nvim/msgpack_rpc/server.c3
-rw-r--r--src/nvim/normal.c4
-rw-r--r--src/nvim/option.c2
-rw-r--r--src/nvim/os/fs.c1
-rw-r--r--src/nvim/os/input.c12
-rw-r--r--src/nvim/os/pty_process.h9
-rw-r--r--src/nvim/os/pty_process_unix.c (renamed from src/nvim/event/pty_process.c)18
-rw-r--r--src/nvim/os/pty_process_unix.h (renamed from src/nvim/event/pty_process.h)9
-rw-r--r--src/nvim/os/pty_process_win.h28
-rw-r--r--src/nvim/os/shell.c7
-rw-r--r--src/nvim/os/signal.c11
-rw-r--r--src/nvim/os/time.c3
-rw-r--r--src/nvim/po/eo.po62
-rw-r--r--src/nvim/po/it.po106
-rw-r--r--src/nvim/po/ja.euc-jp.po180
-rw-r--r--src/nvim/po/ja.po179
-rw-r--r--src/nvim/po/ja.sjis.po186
-rw-r--r--src/nvim/rbuffer.h42
-rw-r--r--src/nvim/regexp_nfa.c34
-rw-r--r--src/nvim/spell.c111
-rw-r--r--src/nvim/state.c5
-rw-r--r--src/nvim/syntax.c130
-rw-r--r--src/nvim/terminal.c25
-rw-r--r--src/nvim/testdir/Makefile9
-rw-r--r--src/nvim/testdir/runtest.vim3
-rw-r--r--src/nvim/testdir/test10.in110
-rw-r--r--src/nvim/testdir/test10.ok23
-rw-r--r--src/nvim/testdir/test10a.in72
-rw-r--r--src/nvim/testdir/test10a.ok23
-rw-r--r--src/nvim/testdir/test34.in86
-rw-r--r--src/nvim/testdir/test34.ok10
-rw-r--r--src/nvim/testdir/test55.in600
-rw-r--r--src/nvim/testdir/test55.ok199
-rw-r--r--src/nvim/testdir/test_hardcopy.vim58
-rw-r--r--src/nvim/testdir/test_langmap.vim24
-rw-r--r--src/nvim/testdir/test_syntax.vim63
-rw-r--r--src/nvim/testdir/test_unlet.vim26
-rw-r--r--src/nvim/tui/input.c13
-rw-r--r--src/nvim/tui/tui.c5
-rw-r--r--src/nvim/ugrid.h20
-rw-r--r--src/nvim/ui.c28
-rw-r--r--src/nvim/ui_bridge.c9
-rw-r--r--src/nvim/ui_bridge.h14
-rw-r--r--src/nvim/version.c36
88 files changed, 8505 insertions, 3058 deletions
diff --git a/src/.asan-blacklist b/src/.asan-blacklist
new file mode 100644
index 0000000000..63558170b3
--- /dev/null
+++ b/src/.asan-blacklist
@@ -0,0 +1,3 @@
+# libuv queue.h pointer arithmetic is not accepted by asan
+fun:queue_node_data
+fun:dictwatcher_node_data
diff --git a/src/.valgrind.supp b/src/.valgrind.supp
new file mode 100644
index 0000000000..8b630fcaaf
--- /dev/null
+++ b/src/.valgrind.supp
@@ -0,0 +1,16 @@
+{
+ nss_parse_service_list
+ Memcheck:Leak
+ fun:malloc
+ fun:nss_parse_service_list
+ fun:__nss_database_lookup
+}
+{
+ uv_spawn_with_optimizations
+ Memcheck:Leak
+ fun:malloc
+ fun:uv_spawn
+ fun:pipe_process_spawn
+ fun:process_spawn
+ fun:job_start
+}
diff --git a/src/Doxyfile b/src/Doxyfile
new file mode 100644
index 0000000000..de31c8355f
--- /dev/null
+++ b/src/Doxyfile
@@ -0,0 +1,1890 @@
+# Doxyfile 1.8.4
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed
+# in front of the TAG it is preceding .
+# All text after a hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or sequence of words) that should
+# identify the project. Note that if you do not use Doxywizard you need
+# to put quotes around the project name if it contains spaces.
+
+PROJECT_NAME = "Neovim"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer
+# a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify a logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO = contrib/doxygen/logo-devdoc.png
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = build/doxygen
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian,
+# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic,
+# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip. Note that you specify absolute paths here, but also
+# relative paths, which will be relative from the directory where doxygen is
+# started.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding
+# "class=itcl::class" will allow you to use the command class in the
+# itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension,
+# and language is one of the parsers supported by doxygen: IDL, Java,
+# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
+# C++. For instance to make doxygen treat .inc files as Fortran files (default
+# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
+# that for custom extensions you also need to set FILE_PATTERNS otherwise the
+# files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
+# comments according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you
+# can mix doxygen, HTML, and XML commands with Markdown formatting.
+# Disable only in case of backward compatibilities issues.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES (the
+# default) will make doxygen replace the get and set methods by a property in
+# the documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
+# unions are shown inside the group in which they are included (e.g. using
+# @ingroup) instead of on a separate page (for HTML and Man pages) or
+# section (for LaTeX and RTF).
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
+# unions with only public data fields or simple typedef fields will be shown
+# inline in the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO (the default), structs, classes, and unions are shown on a separate
+# page (for HTML and Man pages) or section (for LaTeX and RTF).
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can
+# be an expensive process and often the same symbol appear multiple times in
+# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too
+# small doxygen will become slower. If the cache is too large, memory is wasted.
+# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid
+# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536
+# symbols.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
+# do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even
+# if there is only one candidate or it is obvious which candidate to choose
+# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if section-label ... \endif
+# and \cond section-label ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files
+# containing the references data. This must be a list of .bib files. The
+# .bib extension is automatically appended if omitted. Using this command
+# requires the bibtex tool to be installed. See also
+# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
+# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
+# feature you need bibtex and perl available in the search path. Do not use
+# file names with spaces, bibtex cannot handle them.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = src/
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS = *.h *.c
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be ignored.
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C, C++ and Fortran comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header. Note that when using a custom header you are responsible
+# for the proper inclusion of any scripts and style sheets that doxygen
+# needs, which is dependent on the configuration options used.
+# It is advised to generate a default header using "doxygen -w html
+# header.html footer.html stylesheet.css YourConfigFile" and then modify
+# that header. Note that the header is subject to change so you typically
+# have to redo this when upgrading to a newer version of doxygen or when
+# changing the value of configuration settings such as GENERATE_TREEVIEW!
+
+HTML_HEADER = contrib/doxygen/header.html
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER = contrib/doxygen/footer.html
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If left blank doxygen will
+# generate a default style sheet. Note that it is recommended to use
+# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
+# tag will in the future become obsolete.
+
+HTML_STYLESHEET = contrib/doxygen/customdoxygen.css
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
+# user-defined cascading style sheet that is included after the standard
+# style sheets created by doxygen. Using this option one can overrule
+# certain style aspects. This is preferred over using HTML_STYLESHEET
+# since it does not replace the standard style sheet and is therefor more
+# robust against future updates. Doxygen will copy the style sheet file to
+# the output directory.
+
+HTML_EXTRA_STYLESHEET = contrib/doxygen/extra.css
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that
+# the files will be copied as-is; there are no commands or markers available.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the style sheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
+# entries shown in the various tree structured indices initially; the user
+# can expand and collapse entries dynamically later on. Doxygen will expand
+# the tree to such a level that at most the specified number of entries are
+# visible (unless a fully collapsed tree already exceeds this amount).
+# So setting the number of entries 1 will produce a full collapsed tree by
+# default. 0 is a special value representing an infinite number of entries
+# and will result in a full expanded tree by default.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
+# identify the documentation publisher. This should be a reverse domain-name
+# style string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
+# at top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it. Since the tabs have the same information as the
+# navigation tree you can set this option to NO if you already set
+# GENERATE_TREEVIEW to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+# Since the tree basically has the same information as the tab index you
+# could consider to set DISABLE_INDEX to NO when enabling this option.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
+# (range [0,1..20]) that doxygen will group on one line in the generated HTML
+# documentation. Note that a value of 0 will completely suppress the enum
+# values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you may also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and
+# SVG. The default value is HTML-CSS, which is slower, but has the best
+# compatibility.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to
+# the MathJax Content Delivery Network so you can quickly see the result without
+# installing MathJax.
+# However, it is strongly recommended to install a local
+# copy of MathJax from http://www.mathjax.org before deployment.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
+# names that should be enabled during MathJax rendering.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript
+# pieces of code that will be used on startup of the MathJax code.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript.
+# There are two flavours of web server based search depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools.
+# See the manual for details.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain
+# the search results. Doxygen ships with an example indexer (doxyindexer) and
+# search engine (doxysearch.cgi) which are based on the open source search
+# engine library Xapian. See the manual for configuration details.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will returned the search results when EXTERNAL_SEARCH is enabled.
+# Doxygen ships with an example search engine (doxysearch) which is based on
+# the open source search engine library Xapian. See the manual for configuration
+# details.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id
+# of to a relative location where the documentation can be found.
+# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ...
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4 will be used.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
+# the generated latex document. The footer should contain everything after
+# the last chapter. If it is left blank doxygen will generate a
+# standard footer. Notice: only use this tag if you know what you are doing!
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images
+# or other source files which should be copied to the LaTeX output directory.
+# Note that the files will be copied as-is; there are no commands or markers
+# available.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
+# http://en.wikipedia.org/wiki/BibTeX for more info.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load style sheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files
+# that can be used to generate PDF.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it. If left blank docbook will be used as the default path.
+
+DOCBOOK_OUTPUT = docbook
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# pointed to by INCLUDE_PATH will be searched when a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that
+# overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. For each
+# tag file the location of the external documentation should be added. The
+# format of a tag file without this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths
+# or URLs. Note that each tag file must have a unique name (where the name does
+# NOT include the path). If a tag file is not located in the directory in which
+# doxygen is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed
+# in the related pages index. If set to NO, only the current project's
+# pages will be listed.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate an inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will use the Helvetica font for all dot files that
+# doxygen generates. When you want a differently looking font you can specify
+# the font name using DOT_FONTNAME. You need to make sure dot is able to find
+# the font, which can be done by putting it in a standard location or by setting
+# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the Helvetica font.
+# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
+# set the path where dot can find it.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside
+# the class node. If there are many fields or methods and many nodes the
+# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
+# threshold limits the number of items for each type to make the size more
+# manageable. Set this to 0 for no limit. Note that the threshold may be
+# exceeded by 50% before the limit is enforced.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are svg, png, jpg, or gif.
+# If left blank png will be used. If you choose svg you need to set
+# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible in IE 9+ (other browsers do not have this requirement).
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+# Note that this requires a modern browser other than Internet Explorer.
+# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
+# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible. Older versions of IE do not have SVG support.
+
+INTERACTIVE_SVG = NO
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/src/clint.py b/src/clint.py
new file mode 100755
index 0000000000..80e4c4ac39
--- /dev/null
+++ b/src/clint.py
@@ -0,0 +1,3544 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8
+#
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Does neovim-lint on c files.
+
+The goal of this script is to identify places in the code that *may*
+be in non-compliance with neovim style. It does not attempt to fix
+up these problems -- the point is to educate. It does also not
+attempt to find all problems, or to ensure that everything it does
+find is legitimately a problem.
+
+In particular, we can get very confused by /* and // inside strings!
+We do a small hack, which is to ignore //'s with "'s after them on the
+same line, but it is far from perfect (in either direction).
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import codecs
+import copy
+import getopt
+import math # for log
+import os
+import re
+import sre_compile
+import string
+import sys
+import unicodedata
+import json
+import collections # for defaultdict
+
+
+_USAGE = """
+Syntax: clint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
+ [--counting=total|toplevel|detailed] [--root=subdir]
+ [--linelength=digits] [--record-errors=file]
+ [--suppress-errors=file]
+ <file> [file] ...
+
+ The style guidelines this tries to follow are those in
+ http://neovim.io/development-wiki/style-guide/style-guide.xml
+
+ Note: This is Google's cpplint.py modified for use with the Neovim project,
+ which follows the Google C++ coding convention except with the following
+ modifications:
+
+ * Function names are lower_case.
+ * Struct and enum names that are not typedef-ed are struct lower_case and
+ enum lower_case.
+ * The opening brace for functions appear on the next line.
+ * All control structures must always use braces.
+
+ Neovim is a C project. As a result, for .c and .h files, the following rules
+ are suppressed:
+
+ * [whitespace/braces] { should almost always be at the end of the previous
+ line
+ * [build/include] Include the directory when naming .h files
+ * [runtime/int] Use int16/int64/etc, rather than the C type.
+
+ Every problem is given a confidence score from 1-5, with 5 meaning we are
+ certain of the problem, and 1 meaning it could be a legitimate construct.
+ This will miss some errors, and is not a substitute for a code review.
+
+ To suppress false-positive errors of a certain category, add a
+ 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
+ suppresses errors of all categories on that line.
+
+ The files passed in will be linted; at least one file must be provided.
+ Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
+ extensions with the --extensions flag.
+
+ Flags:
+
+ output=vs7
+ By default, the output is formatted to ease emacs parsing. Visual Studio
+ compatible output (vs7) may also be used. Other formats are unsupported.
+
+ verbose=#
+ Specify a number 0-5 to restrict errors to certain verbosity levels.
+
+ filter=-x,+y,...
+ Specify a comma-separated list of category-filters to apply: only
+ error messages whose category names pass the filters will be printed.
+ (Category names are printed with the message and look like
+ "[whitespace/indent]".) Filters are evaluated left to right.
+ "-FOO" and "FOO" means "do not print categories that start with FOO".
+ "+FOO" means "do print categories that start with FOO".
+
+ Examples: --filter=-whitespace,+whitespace/braces
+ --filter=whitespace,runtime/printf,+runtime/printf_format
+ --filter=-,+build/include_what_you_use
+
+ To see a list of all the categories used in cpplint, pass no arg:
+ --filter=
+
+ counting=total|toplevel|detailed
+ The total number of errors found is always printed. If
+ 'toplevel' is provided, then the count of errors in each of
+ the top-level categories like 'build' and 'whitespace' will
+ also be printed. If 'detailed' is provided, then a count
+ is provided for each category.
+
+ root=subdir
+ The root directory used for deriving header guard CPP variable.
+ By default, the header guard CPP variable is calculated as the relative
+ path to the directory that contains .git, .hg, or .svn. When this flag
+ is specified, the relative path is calculated from the specified
+ directory. If the specified directory does not exist, this flag is
+ ignored.
+
+ Examples:
+ Assuing that src/.git exists, the header guard CPP variables for
+ src/chrome/browser/ui/browser.h are:
+
+ No flag => CHROME_BROWSER_UI_BROWSER_H_
+ --root=chrome => BROWSER_UI_BROWSER_H_
+ --root=chrome/browser => UI_BROWSER_H_
+
+ linelength=digits
+ This is the allowed line length for the project. The default value is
+ 80 characters.
+
+ Examples:
+ --linelength=120
+
+ extensions=extension,extension,...
+ The allowed file extensions that cpplint will check
+
+ Examples:
+ --extensions=hpp,cpp
+
+ record-errors=file
+ Record errors to the given location. This file may later be used for error
+ suppression using suppress-errors flag.
+
+ suppress-errors=file
+ Errors listed in the given file will not be reported.
+"""
+
+# We categorize each error message we print. Here are the categories.
+# We want an explicit list so we can list them all in cpplint --filter=.
+# If you add a new error message with a new category, add it to the list
+# here! cpplint_unittest.py should tell you if you forget to do this.
+_ERROR_CATEGORIES = [
+ 'build/deprecated',
+ 'build/endif_comment',
+ 'build/header_guard',
+ 'build/include',
+ 'build/include_alpha',
+ 'build/include_order',
+ 'build/printf_format',
+ 'build/storage_class',
+ 'readability/alt_tokens',
+ 'readability/bool',
+ 'readability/braces',
+ 'readability/fn_size',
+ 'readability/multiline_comment',
+ 'readability/multiline_string',
+ 'readability/nolint',
+ 'readability/nul',
+ 'readability/todo',
+ 'readability/utf8',
+ 'readability/increment',
+ 'runtime/arrays',
+ 'runtime/int',
+ 'runtime/invalid_increment',
+ 'runtime/memset',
+ 'runtime/printf',
+ 'runtime/printf_format',
+ 'runtime/threadsafe_fn',
+ 'syntax/parenthesis',
+ 'whitespace/alignment',
+ 'whitespace/blank_line',
+ 'whitespace/braces',
+ 'whitespace/comma',
+ 'whitespace/comments',
+ 'whitespace/empty_conditional_body',
+ 'whitespace/empty_loop_body',
+ 'whitespace/end_of_line',
+ 'whitespace/ending_newline',
+ 'whitespace/indent',
+ 'whitespace/line_length',
+ 'whitespace/newline',
+ 'whitespace/operators',
+ 'whitespace/parens',
+ 'whitespace/semicolon',
+ 'whitespace/tab',
+ 'whitespace/todo',
+ 'whitespace/line_continuation',
+ 'whitespace/cast',
+]
+
+# The default state of the category filter. This is overrided by the --filter=
+# flag. By default all errors are on, so only add here categories that should be
+# off by default (i.e., categories that must be enabled by the --filter= flags).
+# All entries here should start with a '-' or '+', as in the --filter= flag.
+_DEFAULT_FILTERS = ['-build/include_alpha']
+
+# We used to check for high-bit characters, but after much discussion we
+# decided those were OK, as long as they were in UTF-8 and didn't represent
+# hard-coded international strings, which belong in a separate i18n file.
+
+# Alternative tokens and their replacements. For full list, see section 2.5
+# Alternative tokens [lex.digraph] in the C++ standard.
+#
+# Digraphs (such as '%:') are not included here since it's a mess to
+# match those on a word boundary.
+_ALT_TOKEN_REPLACEMENT = {
+ 'and': '&&',
+ 'bitor': '|',
+ 'or': '||',
+ 'xor': '^',
+ 'compl': '~',
+ 'bitand': '&',
+ 'and_eq': '&=',
+ 'or_eq': '|=',
+ 'xor_eq': '^=',
+ 'not': '!',
+ 'not_eq': '!='
+}
+
+# Compile regular expression that matches all the above keywords. The "[ =()]"
+# bit is meant to avoid matching these keywords outside of boolean expressions.
+#
+# False positives include C-style multi-line comments and multi-line strings
+# but those have always been troublesome for cpplint.
+_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
+ r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
+
+
+# These constants define types of headers for use with
+# _IncludeState.CheckNextIncludeOrder().
+_C_SYS_HEADER = 1
+_OTHER_HEADER = 5
+
+# These constants define the current inline assembly state
+_NO_ASM = 0 # Outside of inline assembly block
+_INSIDE_ASM = 1 # Inside inline assembly block
+_END_ASM = 2 # Last line of inline assembly block
+_BLOCK_ASM = 3 # The whole block is an inline assembly block
+
+# Match start of assembly blocks
+_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
+ r'(?:\s+(volatile|__volatile__))?'
+ r'\s*[{(]')
+
+
+_regexp_compile_cache = {}
+
+# Finds occurrences of NOLINT or NOLINT(...).
+_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
+
+# {str, set(int)}: a map from error categories to sets of linenumbers
+# on which those errors are expected and should be suppressed.
+_error_suppressions = {}
+
+# {(str, int)}: a set of error categories and line numbers which are expected to
+# be suppressed
+_error_suppressions_2 = set()
+
+# The allowed line length of files.
+# This is set by --linelength flag.
+_line_length = 80
+
+# The allowed extensions for file names
+# This is set by --extensions flag.
+_valid_extensions = set(['c', 'h'])
+
+
+def ParseNolintSuppressions(filename, raw_line, linenum, error):
+ """Updates the global list of error-suppressions.
+
+ Parses any NOLINT comments on the current line, updating the global
+ error_suppressions store. Reports an error if the NOLINT comment
+ was malformed.
+
+ Args:
+ filename: str, the name of the input file.
+ raw_line: str, the line of input text, with comments.
+ linenum: int, the number of the current line.
+ error: function, an error handler.
+ """
+ # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
+ matched = _RE_SUPPRESSION.search(raw_line)
+ if matched:
+ category = matched.group(1)
+ if category in (None, '(*)'): # => "suppress all"
+ _error_suppressions.setdefault(None, set()).add(linenum)
+ else:
+ if category.startswith('(') and category.endswith(')'):
+ category = category[1:-1]
+ if category in _ERROR_CATEGORIES:
+ _error_suppressions.setdefault(
+ category, set()).add(linenum)
+ else:
+ error(filename, linenum, 'readability/nolint', 5,
+ 'Unknown NOLINT error category: %s' % category)
+
+
+def ParseKnownErrorSuppressions(filename, raw_lines, linenum):
+ """Updates the global list of error-suppressions from suppress-file.
+
+ Args:
+ filename: str, the name of the input file.
+ raw_lines: list, all file lines
+ linenum: int, the number of the current line.
+ """
+ key = tuple(raw_lines[linenum - 1 if linenum else 0:linenum + 2])
+ if key in _cpplint_state.suppressed_errors[filename]:
+ for category in _cpplint_state.suppressed_errors[filename][key]:
+ _error_suppressions_2.add((category, linenum))
+
+
+def ResetNolintSuppressions():
+ "Resets the set of NOLINT suppressions to empty."
+ _error_suppressions.clear()
+
+
+def ResetKnownErrorSuppressions():
+ "Resets the set of suppress-errors=file suppressions to empty."
+ _error_suppressions_2.clear()
+
+
+def IsErrorSuppressedByNolint(category, linenum):
+ """Returns true if the specified error category is suppressed on this line.
+
+ Consults the global error_suppressions map populated by
+ ParseNolintSuppressions/ResetNolintSuppressions.
+
+ Args:
+ category: str, the category of the error.
+ linenum: int, the current line number.
+ Returns:
+ bool, True iff the error should be suppressed due to a NOLINT comment.
+ """
+ return (linenum in _error_suppressions.get(category, set()) or
+ linenum in _error_suppressions.get(None, set()))
+
+
+def IsErrorInSuppressedErrorsList(category, linenum):
+ """Returns true if the specified error is suppressed by suppress-errors=file
+
+ Args:
+ category: str, the category of the error.
+ linenum: int, the current line number.
+ Returns:
+ bool, True iff the error should be suppressed due to presense in
+ suppressions file.
+ """
+ return (category, linenum) in _error_suppressions_2
+
+
+def Match(pattern, s):
+ """Matches the string with the pattern, caching the compiled regexp."""
+ # The regexp compilation caching is inlined in both Match and Search for
+ # performance reasons; factoring it out into a separate function turns out
+ # to be noticeably expensive.
+ if pattern not in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].match(s)
+
+
+def Search(pattern, s):
+ """Searches the string for the pattern, caching the compiled regexp."""
+ if pattern not in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].search(s)
+
+
+class _IncludeState(dict):
+
+ """Tracks line numbers for includes, and the order in which includes appear.
+
+ As a dict, an _IncludeState object serves as a mapping between include
+ filename and line number on which that file was included.
+
+ Call CheckNextIncludeOrder() once for each header in the file, passing
+ in the type constants defined above.
+
+ """
+ # self._section will move monotonically through this set. If it ever
+ # needs to move backwards, CheckNextIncludeOrder will raise an error.
+ _INITIAL_SECTION = 0
+ _C_SECTION = 2
+ _OTHER_H_SECTION = 4
+
+ _TYPE_NAMES = {
+ _C_SYS_HEADER: 'C system header',
+ _OTHER_HEADER: 'other header',
+ }
+ _SECTION_NAMES = {
+ _INITIAL_SECTION: "... nothing. (This can't be an error.)",
+ _C_SECTION: 'C system header',
+ _OTHER_H_SECTION: 'other header',
+ }
+
+ def __init__(self):
+ dict.__init__(self)
+ self.ResetSection()
+
+ def ResetSection(self):
+ # The name of the current section.
+ self._section = self._INITIAL_SECTION
+ # The path of last found header.
+ self._last_header = ''
+
+ def SetLastHeader(self, header_path):
+ self._last_header = header_path
+
+ def CanonicalizeAlphabeticalOrder(self, header_path):
+ """Returns a path canonicalized for alphabetical comparison.
+
+ - replaces "-" with "_" so they both cmp the same.
+ - lowercase everything, just in case.
+
+ Args:
+ header_path: Path to be canonicalized.
+
+ Returns:
+ Canonicalized path.
+ """
+ return header_path.replace('-', '_').lower()
+
+ def CheckNextIncludeOrder(self, header_type):
+ """Returns a non-empty error message if the next header is out of order.
+
+ This function also updates the internal state to be ready to check
+ the next include.
+
+ Args:
+ header_type: One of the _XXX_HEADER constants defined above.
+
+ Returns:
+ The empty string if the header is in the right order, or an
+ error message describing what's wrong.
+
+ """
+ error_message = ('Found %s after %s' %
+ (self._TYPE_NAMES[header_type],
+ self._SECTION_NAMES[self._section]))
+
+ last_section = self._section
+
+ if header_type == _C_SYS_HEADER:
+ if self._section <= self._C_SECTION:
+ self._section = self._C_SECTION
+ else:
+ self._last_header = ''
+ return error_message
+ else:
+ assert header_type == _OTHER_HEADER
+ self._section = self._OTHER_H_SECTION
+
+ if last_section != self._section:
+ self._last_header = ''
+
+ return ''
+
+
+class _CppLintState(object):
+
+ """Maintains module-wide state.."""
+
+ def __init__(self):
+ self.verbose_level = 1 # global setting.
+ self.error_count = 0 # global count of reported errors
+ # filters to apply when emitting error messages
+ self.filters = _DEFAULT_FILTERS[:]
+ self.counting = 'total' # In what way are we counting errors?
+ self.errors_by_category = {} # string to int dict storing error counts
+
+ # output format:
+ # "emacs" - format that emacs can parse (default)
+ # "vs7" - format that Microsoft Visual Studio 7 can parse
+ self.output_format = 'emacs'
+
+ self.record_errors_file = None
+ self.suppressed_errors = collections.defaultdict(
+ lambda: collections.defaultdict(set))
+
+ def SetOutputFormat(self, output_format):
+ """Sets the output format for errors."""
+ self.output_format = output_format
+
+ def SetVerboseLevel(self, level):
+ """Sets the module's verbosity, and returns the previous setting."""
+ last_verbose_level = self.verbose_level
+ self.verbose_level = level
+ return last_verbose_level
+
+ def SetCountingStyle(self, counting_style):
+ """Sets the module's counting options."""
+ self.counting = counting_style
+
+ def SetFilters(self, filters):
+ """Sets the error-message filters.
+
+ These filters are applied when deciding whether to emit a given
+ error message.
+
+ Args:
+ filters: A string of comma-separated filters.
+ E.g. "+whitespace/indent".
+ Each filter should start with + or -; else we die.
+
+ Raises:
+ ValueError: The comma-separated filters did not all start with
+ '+' or '-'.
+ E.g. "-,+whitespace,-whitespace/indent,whitespace/bad"
+ """
+ # Default filters always have less priority than the flag ones.
+ self.filters = _DEFAULT_FILTERS[:]
+ for filt in filters.split(','):
+ clean_filt = filt.strip()
+ if clean_filt:
+ self.filters.append(clean_filt)
+ for filt in self.filters:
+ if not (filt.startswith('+') or filt.startswith('-')):
+ raise ValueError('Every filter in --filters must start with '
+ '+ or - (%s does not)' % filt)
+
+ def ResetErrorCounts(self):
+ """Sets the module's error statistic back to zero."""
+ self.error_count = 0
+ self.errors_by_category = {}
+
+ def IncrementErrorCount(self, category):
+ """Bumps the module's error statistic."""
+ self.error_count += 1
+ if self.counting in ('toplevel', 'detailed'):
+ if self.counting != 'detailed':
+ category = category.split('/')[0]
+ if category not in self.errors_by_category:
+ self.errors_by_category[category] = 0
+ self.errors_by_category[category] += 1
+
+ def PrintErrorCounts(self):
+ """Print a summary of errors by category, and the total."""
+ for category, count in self.errors_by_category.items():
+ sys.stderr.write('Category \'%s\' errors found: %d\n' %
+ (category, count))
+ sys.stderr.write('Total errors found: %d\n' % self.error_count)
+
+ def SuppressErrorsFrom(self, fname):
+ """Open file and read a list of suppressed errors from it"""
+ if fname is None:
+ return
+ try:
+ with open(fname) as fp:
+ for line in fp:
+ fname, lines, category = json.loads(line)
+ lines = tuple(lines)
+ self.suppressed_errors[fname][lines].add(category)
+ except IOError:
+ pass
+
+ def RecordErrorsTo(self, fname):
+ """Open file with suppressed errors for writing"""
+ if fname is None:
+ return
+ self.record_errors_file = open(fname, 'w')
+
+_cpplint_state = _CppLintState()
+
+
+def _OutputFormat():
+ """Gets the module's output format."""
+ return _cpplint_state.output_format
+
+
+def _SetOutputFormat(output_format):
+ """Sets the module's output format."""
+ _cpplint_state.SetOutputFormat(output_format)
+
+
+def _VerboseLevel():
+ """Returns the module's verbosity setting."""
+ return _cpplint_state.verbose_level
+
+
+def _SetVerboseLevel(level):
+ """Sets the module's verbosity, and returns the previous setting."""
+ return _cpplint_state.SetVerboseLevel(level)
+
+
+def _SetCountingStyle(level):
+ """Sets the module's counting options."""
+ _cpplint_state.SetCountingStyle(level)
+
+
+def _SuppressErrorsFrom(fname):
+ """Sets the file containing suppressed errors."""
+ _cpplint_state.SuppressErrorsFrom(fname)
+
+
+def _RecordErrorsTo(fname):
+ """Sets the file containing suppressed errors to write to."""
+ _cpplint_state.RecordErrorsTo(fname)
+
+
+def _Filters():
+ """Returns the module's list of output filters, as a list."""
+ return _cpplint_state.filters
+
+
+def _SetFilters(filters):
+ """Sets the module's error-message filters.
+
+ These filters are applied when deciding whether to emit a given
+ error message.
+
+ Args:
+ filters: A string of comma-separated filters (eg "whitespace/indent").
+ Each filter should start with + or -; else we die.
+ """
+ _cpplint_state.SetFilters(filters)
+
+
+class _FunctionState(object):
+
+ """Tracks current function name and the number of lines in its body."""
+
+ _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
+ _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
+
+ def __init__(self):
+ self.in_a_function = False
+ self.lines_in_function = 0
+ self.current_function = ''
+
+ def Begin(self, function_name):
+ """Start analyzing function body.
+
+ Args:
+ function_name: The name of the function being tracked.
+ """
+ self.in_a_function = True
+ self.lines_in_function = 0
+ self.current_function = function_name
+
+ def Count(self):
+ """Count line in current function body."""
+ if self.in_a_function:
+ self.lines_in_function += 1
+
+ def Check(self, error, filename, linenum):
+ """Report if too many lines in function body.
+
+ Args:
+ error: The function to call with any errors found.
+ filename: The name of the current file.
+ linenum: The number of the line to check.
+ """
+ if Match(r'T(EST|est)', self.current_function):
+ base_trigger = self._TEST_TRIGGER
+ else:
+ base_trigger = self._NORMAL_TRIGGER
+ trigger = base_trigger * 2**_VerboseLevel()
+
+ if self.lines_in_function > trigger:
+ error_level = int(
+ math.log(self.lines_in_function / base_trigger, 2))
+ # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
+ if error_level > 5:
+ error_level = 5
+ error(filename, linenum, 'readability/fn_size', error_level,
+ 'Small and focused functions are preferred:'
+ ' %s has %d non-comment lines'
+ ' (error triggered by exceeding %d lines).' % (
+ self.current_function, self.lines_in_function, trigger))
+
+ def End(self):
+ """Stop analyzing function body."""
+ self.in_a_function = False
+
+
+class FileInfo:
+
+ """Provides utility functions for filenames.
+
+ FileInfo provides easy access to the components of a file's path
+ relative to the project root.
+ """
+
+ def __init__(self, filename):
+ self._filename = filename
+
+ def FullName(self):
+ """Make Windows paths like Unix."""
+ return os.path.abspath(self._filename).replace('\\', '/')
+
+ def RelativePath(self):
+ """FullName with <prefix>/src/nvim/ chopped off."""
+ fullname = self.FullName()
+
+ if os.path.exists(fullname):
+ project_dir = os.path.dirname(fullname)
+
+ root_dir = os.path.dirname(fullname)
+ while (root_dir != os.path.dirname(root_dir) and
+ not os.path.exists(os.path.join(root_dir, ".git"))):
+ root_dir = os.path.dirname(root_dir)
+
+ if os.path.exists(os.path.join(root_dir, ".git")):
+ root_dir = os.path.join(root_dir, "src", "nvim")
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
+
+ # Don't know what to do; header guard warnings may be wrong...
+ return fullname
+
+ def Split(self):
+ """Splits the file into the directory, basename, and extension.
+
+ For 'chrome/browser/browser.cc', Split() would
+ return ('chrome/browser', 'browser', '.cc')
+
+ Returns:
+ A tuple of (directory, basename, extension).
+ """
+
+ googlename = self.RelativePath()
+ project, rest = os.path.split(googlename)
+ return (project,) + os.path.splitext(rest)
+
+ def BaseName(self):
+ """File base name - text after the final slash, before final period."""
+ return self.Split()[1]
+
+ def Extension(self):
+ """File extension - text following the final period."""
+ return self.Split()[2]
+
+
+def _ShouldPrintError(category, confidence, linenum):
+ """If confidence >= verbose, category passes filter and isn't suppressed."""
+
+ # There are three ways we might decide not to print an error message:
+ # a "NOLINT(category)" comment appears in the source,
+ # the verbosity level isn't high enough, or the filters filter it out.
+ if IsErrorSuppressedByNolint(category, linenum):
+ return False
+ if IsErrorInSuppressedErrorsList(category, linenum):
+ return False
+ if confidence < _cpplint_state.verbose_level:
+ return False
+
+ is_filtered = False
+ for one_filter in _Filters():
+ if one_filter.startswith('-'):
+ if category.startswith(one_filter[1:]):
+ is_filtered = True
+ elif one_filter.startswith('+'):
+ if category.startswith(one_filter[1:]):
+ is_filtered = False
+ else:
+ assert False # should have been checked for in SetFilter.
+ if is_filtered:
+ return False
+
+ return True
+
+
+def Error(filename, linenum, category, confidence, message):
+ """Logs the fact we've found a lint error.
+
+ We log where the error was found, and also our confidence in the error,
+ that is, how certain we are this is a legitimate style regression, and
+ not a misidentification or a use that's sometimes justified.
+
+ False positives can be suppressed by the use of
+ "cpplint(category)" comments on the offending line. These are
+ parsed into _error_suppressions.
+
+ Args:
+ filename: The name of the file containing the error.
+ linenum: The number of the line containing the error.
+ category: A string used to describe the "category" this bug
+ falls under: "whitespace", say, or "runtime". Categories
+ may have a hierarchy separated by slashes: "whitespace/indent".
+ confidence: A number from 1-5 representing a confidence score for
+ the error, with 5 meaning that we are certain of the problem,
+ and 1 meaning that it could be a legitimate construct.
+ message: The error message.
+ """
+ if _ShouldPrintError(category, confidence, linenum):
+ _cpplint_state.IncrementErrorCount(category)
+ if _cpplint_state.output_format == 'vs7':
+ sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence))
+ elif _cpplint_state.output_format == 'eclipse':
+ sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence))
+ else:
+ sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence))
+
+
+# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
+_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
+ r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
+# Matches strings. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"([^"]*)"')
+# Matches characters. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'(.)'")
+# Matches multi-line C++ comments.
+# This RE is a little bit more complicated than one might expect, because we
+# have to take care of space removals tools so we can handle comments inside
+# statements better.
+# The current rule is: We only clear spaces from both sides when we're at the
+# end of the line. Otherwise, we try to remove spaces from the right side,
+# if this doesn't work we try on left side but only if there's a non-character
+# on the right.
+_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
+ r"""(\s*/\*.*\*/\s*$|
+ /\*.*\*/\s+|
+ \s+/\*.*\*/(?=\W)|
+ /\*.*\*/)""", re.VERBOSE)
+
+
+def IsCppString(line):
+ """Does line terminate so, that the next symbol is in string constant.
+
+ This function does not consider single-line nor multi-line comments.
+
+ Args:
+ line: is a partial line of code starting from the 0..n.
+
+ Returns:
+ True, if next character appended to 'line' is inside a
+ string constant.
+ """
+
+ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
+ return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
+
+
+def FindNextMultiLineCommentStart(lines, lineix):
+ """Find the beginning marker for a multiline comment."""
+ while lineix < len(lines):
+ if lines[lineix].strip().startswith('/*'):
+ # Only return this marker if the comment goes beyond this line
+ if lines[lineix].strip().find('*/', 2) < 0:
+ return lineix
+ lineix += 1
+ return len(lines)
+
+
+def FindNextMultiLineCommentEnd(lines, lineix):
+ """We are inside a comment, find the end marker."""
+ while lineix < len(lines):
+ if lines[lineix].strip().endswith('*/'):
+ return lineix
+ lineix += 1
+ return len(lines)
+
+
+def RemoveMultiLineCommentsFromRange(lines, begin, end):
+ """Clears a range of lines for multi-line comments."""
+ # Having // dummy comments makes the lines non-empty, so we will not get
+ # unnecessary blank line warnings later in the code.
+ for i in range(begin, end):
+ lines[i] = '// dummy'
+
+
+def RemoveMultiLineComments(filename, lines, error):
+ """Removes multiline (c-style) comments from lines."""
+ lineix = 0
+ while lineix < len(lines):
+ lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
+ if lineix_begin >= len(lines):
+ return
+ lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
+ if lineix_end >= len(lines):
+ error(filename, lineix_begin + 1, 'readability/multiline_comment',
+ 5, 'Could not find end of multi-line comment')
+ return
+ RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
+ lineix = lineix_end + 1
+
+
+def CleanseComments(line):
+ """Removes //-comments and single-line C-style /* */ comments.
+
+ Args:
+ line: A line of C++ source.
+
+ Returns:
+ The line with single-line comments removed.
+ """
+ commentpos = line.find('//')
+ if commentpos != -1 and not IsCppString(line[:commentpos]):
+ line = line[:commentpos].rstrip()
+ # get rid of /* ... */
+ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
+
+
+class CleansedLines(object):
+
+ """Holds 5 copies of all lines with different preprocessing applied to them.
+
+ 1) elided member contains lines without strings and comments,
+ 2) lines member contains lines without comments, and
+ 3) raw_lines member contains all the lines with multiline comments replaced.
+ 4) init_lines member contains all the lines without processing.
+ 5) elided_with_space_strings is like elided, but with string literals
+ looking like `" "`.
+ All these three members are of <type 'list'>, and of the same length.
+ """
+
+ def __init__(self, lines, init_lines):
+ self.elided = []
+ self.lines = []
+ self.raw_lines = lines
+ self.num_lines = len(lines)
+ self.init_lines = init_lines
+ self.lines_without_raw_strings = lines
+ self.elided_with_space_strings = []
+ for linenum in range(len(self.lines_without_raw_strings)):
+ self.lines.append(CleanseComments(
+ self.lines_without_raw_strings[linenum]))
+ elided = self._CollapseStrings(
+ self.lines_without_raw_strings[linenum])
+ self.elided.append(CleanseComments(elided))
+ elided = CleanseComments(self._CollapseStrings(
+ self.lines_without_raw_strings[linenum], True))
+ self.elided_with_space_strings.append(elided)
+
+ def NumLines(self):
+ """Returns the number of lines represented."""
+ return self.num_lines
+
+ @staticmethod
+ def _CollapseStrings(elided, keep_spaces=False):
+ """Collapses strings and chars on a line to simple "" or '' blocks.
+
+ We nix strings first so we're not fooled by text like '"http://"'
+
+ Args:
+ elided: The line being processed.
+ keep_spaces: If true, collapse to
+
+ Returns:
+ The line with collapsed strings.
+ """
+ if not _RE_PATTERN_INCLUDE.match(elided):
+ # Remove escaped characters first to make quote/single quote
+ # collapsing basic. Things that look like escaped characters
+ # shouldn't occur outside of strings and chars.
+ elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub(
+ '' if not keep_spaces else lambda m: ' ' * len(m.group(0)),
+ elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub(
+ "''" if not keep_spaces
+ else lambda m: "'" + (' ' * len(m.group(1))) + "'",
+ elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub(
+ '""' if not keep_spaces
+ else lambda m: '"' + (' ' * len(m.group(1))) + '"',
+ elided)
+ return elided
+
+
+BRACES = {
+ '(': ')',
+ '{': '}',
+ '[': ']',
+ # '<': '>', C++-specific pair removed
+}
+
+
+CLOSING_BRACES = dict(((v, k) for k, v in BRACES.items()))
+
+
+def GetExprBracesPosition(clean_lines, linenum, pos):
+ """List positions of all kinds of braces
+
+ If input points to ( or { or [ then function proceeds until finding the
+ position which closes it.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ pos: A position on the line.
+
+ Yields:
+ A tuple (linenum, pos, brace, depth) that points to each brace.
+ Additionally each new line (linenum, pos, 's', depth) is yielded, for each
+ line end (linenum, pos, 'e', depth) is yielded and at the very end it
+ yields (linenum, pos, None, None).
+ """
+ depth = 0
+ yielded_line_start = True
+ startpos = pos
+ while linenum < clean_lines.NumLines() - 1:
+ line = clean_lines.elided_with_space_strings[linenum]
+ if not line.startswith('#') or yielded_line_start:
+ # Ignore #ifdefs, but not if it is macros that are checked
+ for i, brace in enumerate(line[startpos:]):
+ pos = i + startpos
+ if brace != ' ' and not yielded_line_start:
+ yield (linenum, pos, 's', depth)
+ yielded_line_start = True
+ if brace in BRACES:
+ depth += 1
+ yield (linenum, pos, brace, depth)
+ elif brace in CLOSING_BRACES:
+ yield (linenum, pos, brace, depth)
+ depth -= 1
+ if depth == 0:
+ yield (linenum, pos, None, None)
+ return
+ yield (linenum, len(line) - 1, 'e', depth)
+ yielded_line_start = False
+ startpos = 0
+ linenum += 1
+
+
+def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
+ """Find the position just after the matching endchar.
+
+ Args:
+ line: a CleansedLines line.
+ startpos: start searching at this position.
+ depth: nesting level at startpos.
+ startchar: expression opening character.
+ endchar: expression closing character.
+
+ Returns:
+ On finding matching endchar: (index just after matching endchar, 0)
+ Otherwise: (-1, new depth at end of this line)
+ """
+ for i in range(startpos, len(line)):
+ if line[i] == startchar:
+ depth += 1
+ elif line[i] == endchar:
+ depth -= 1
+ if depth == 0:
+ return (i + 1, 0)
+ return (-1, depth)
+
+
+def CloseExpression(clean_lines, linenum, pos):
+ """If input points to ( or { or [, finds the position that closes it.
+
+ If lines[linenum][pos] points to a '(' or '{' or '[', finds the
+ linenum/pos that correspond to the closing of the expression.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ pos: A position on the line.
+
+ Returns:
+ A tuple (line, linenum, pos) pointer *past* the closing brace, or
+ (line, len(lines), -1) if we never find a close. Note we ignore
+ strings and comments when matching; and the line we return is the
+ 'cleansed' line at linenum.
+ """
+
+ line = clean_lines.elided[linenum]
+ startchar = line[pos]
+ if startchar not in BRACES:
+ return (line, clean_lines.NumLines(), -1)
+ endchar = BRACES[startchar]
+
+ # Check first line
+ (end_pos, num_open) = FindEndOfExpressionInLine(
+ line, pos, 0, startchar, endchar)
+ if end_pos > -1:
+ return (line, linenum, end_pos)
+
+ # Continue scanning forward
+ while linenum < clean_lines.NumLines() - 1:
+ linenum += 1
+ line = clean_lines.elided[linenum]
+ (end_pos, num_open) = FindEndOfExpressionInLine(
+ line, 0, num_open, startchar, endchar)
+ if end_pos > -1:
+ return (line, linenum, end_pos)
+
+ # Did not find endchar before end of file, give up
+ return (line, clean_lines.NumLines(), -1)
+
+
+def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
+ """Find position at the matching startchar.
+
+ This is almost the reverse of FindEndOfExpressionInLine, but note
+ that the input position and returned position differs by 1.
+
+ Args:
+ line: a CleansedLines line.
+ endpos: start searching at this position.
+ depth: nesting level at endpos.
+ startchar: expression opening character.
+ endchar: expression closing character.
+
+ Returns:
+ On finding matching startchar: (index at matching startchar, 0)
+ Otherwise: (-1, new depth at beginning of this line)
+ """
+ for i in range(endpos, -1, -1):
+ if line[i] == endchar:
+ depth += 1
+ elif line[i] == startchar:
+ depth -= 1
+ if depth == 0:
+ return (i, 0)
+ return (-1, depth)
+
+
+def ReverseCloseExpression(clean_lines, linenum, pos):
+ """If input points to ) or } or ] or >, finds the position that opens it.
+
+ If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
+ linenum/pos that correspond to the opening of the expression.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ pos: A position on the line.
+
+ Returns:
+ A tuple (line, linenum, pos) pointer *at* the opening brace, or
+ (line, 0, -1) if we never find the matching opening brace. Note
+ we ignore strings and comments when matching; and the line we
+ return is the 'cleansed' line at linenum.
+ """
+ line = clean_lines.elided[linenum]
+ endchar = line[pos]
+ if endchar not in ')}]>':
+ return (line, 0, -1)
+ if endchar == ')':
+ startchar = '('
+ if endchar == ']':
+ startchar = '['
+ if endchar == '}':
+ startchar = '{'
+ if endchar == '>':
+ startchar = '<'
+
+ # Check last line
+ (start_pos, num_open) = FindStartOfExpressionInLine(
+ line, pos, 0, startchar, endchar)
+ if start_pos > -1:
+ return (line, linenum, start_pos)
+
+ # Continue scanning backward
+ while linenum > 0:
+ linenum -= 1
+ line = clean_lines.elided[linenum]
+ (start_pos, num_open) = FindStartOfExpressionInLine(
+ line, len(line) - 1, num_open, startchar, endchar)
+ if start_pos > -1:
+ return (line, linenum, start_pos)
+
+ # Did not find startchar before beginning of file, give up
+ return (line, 0, -1)
+
+
+def GetHeaderGuardCPPVariable(filename):
+ """Returns the CPP variable that should be used as a header guard.
+
+ Args:
+ filename: The name of a C++ header file.
+
+ Returns:
+ The CPP variable that should be used as a header guard in the
+ named file.
+
+ """
+
+ # Restores original filename in case that cpplint is invoked from Emacs's
+ # flymake.
+ filename = re.sub(r'_flymake\.h$', '.h', filename)
+ filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
+
+ fileinfo = FileInfo(filename)
+ file_path_from_root = fileinfo.RelativePath()
+ return 'NVIM_' + re.sub(r'[-./\s]', '_', file_path_from_root).upper()
+
+
+def CheckForHeaderGuard(filename, lines, error):
+ """Checks that the file contains a header guard.
+
+ Logs an error if no #ifndef header guard is present. For other
+ headers, checks that the full pathname is used.
+
+ Args:
+ filename: The name of the C++ header file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ cppvar = GetHeaderGuardCPPVariable(filename)
+
+ ifndef = None
+ ifndef_linenum = 0
+ define = None
+ endif = None
+ endif_linenum = 0
+ for linenum, line in enumerate(lines):
+ linesplit = line.split()
+ if len(linesplit) >= 2:
+ # find the first occurrence of #ifndef and #define, save arg
+ if not ifndef and linesplit[0] == '#ifndef':
+ # set ifndef to the header guard presented on the #ifndef line.
+ ifndef = linesplit[1]
+ ifndef_linenum = linenum
+ if not define and linesplit[0] == '#define':
+ define = linesplit[1]
+ # find the last occurrence of #endif, save entire line
+ if line.startswith('#endif'):
+ endif = line
+ endif_linenum = linenum
+
+ if not ifndef:
+ error(filename, 0, 'build/header_guard', 5,
+ 'No #ifndef header guard found, suggested CPP variable is: %s' %
+ cppvar)
+ return
+
+ if not define:
+ error(filename, 0, 'build/header_guard', 5,
+ 'No #define header guard found, suggested CPP variable is: %s' %
+ cppvar)
+ return
+
+ # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
+ # for backward compatibility.
+ if ifndef != cppvar:
+ error_level = 0
+ if ifndef != cppvar + '_':
+ error_level = 5
+
+ ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
+ error)
+ error(filename, ifndef_linenum, 'build/header_guard', error_level,
+ '#ifndef header guard has wrong style, please use: %s' % cppvar)
+
+ if define != ifndef:
+ error(filename, 0, 'build/header_guard', 5,
+ '#ifndef and #define don\'t match, suggested CPP variable is: %s'
+ % cppvar)
+ return
+
+ if endif != ('#endif // %s' % cppvar):
+ error_level = 0
+ if endif != ('#endif // %s' % (cppvar + '_')):
+ error_level = 5
+
+ ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
+ error)
+ error(filename, endif_linenum, 'build/header_guard', error_level,
+ '#endif line should be "#endif // %s"' % cppvar)
+
+
+def CheckForBadCharacters(filename, lines, error):
+ """Logs an error for each line containing bad characters.
+
+ Two kinds of bad characters:
+
+ 1. Unicode replacement characters: These indicate that either the file
+ contained invalid UTF-8 (likely) or Unicode replacement characters (which
+ it shouldn't). Note that it's possible for this to throw off line
+ numbering if the invalid UTF-8 occurred adjacent to a newline.
+
+ 2. NUL bytes. These are problematic for some tools.
+
+ Args:
+ filename: The name of the current file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+ for linenum, line in enumerate(lines):
+ if u'\ufffd' in line:
+ error(filename, linenum, 'readability/utf8', 5,
+ 'Line contains invalid UTF-8'
+ ' (or Unicode replacement character).')
+ if '\0' in line:
+ error(filename, linenum, 'readability/nul',
+ 5, 'Line contains NUL byte.')
+
+
+def CheckForNewlineAtEOF(filename, lines, error):
+ """Logs an error if there is no newline char at the end of the file.
+
+ Args:
+ filename: The name of the current file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ # The array lines() was created by adding two newlines to the
+ # original file (go figure), then splitting on \n.
+ # To verify that the file ends in \n, we just have to make sure the
+ # last-but-two element of lines() exists and is empty.
+ if len(lines) < 3 or lines[-2]:
+ error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
+ 'Could not find a newline character at the end of the file.')
+
+
+def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
+ """Logs an error if we see /* ... */ or "..." that extend past one line.
+
+ /* ... */ comments are legit inside macros, for one line.
+ Otherwise, we prefer // comments, so it's ok to warn about the
+ other. Likewise, it's ok for strings to extend across multiple
+ lines, as long as a line continuation character (backslash)
+ terminates each line. Although not currently prohibited by the C++
+ style guide, it's ugly and unnecessary. We don't do well with either
+ in this lint program, so we warn about both.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Remove all \\ (escaped backslashes) from the line. They are OK, and the
+ # second (escaped) slash may trigger later \" detection erroneously.
+ line = line.replace('\\\\', '')
+
+ if line.count('/*') > line.count('*/'):
+ error(filename, linenum, 'readability/multiline_comment', 5,
+ 'Complex multi-line /*...*/-style comment found. '
+ 'Lint may give bogus warnings. '
+ 'Consider replacing these with //-style comments, '
+ 'with #if 0...#endif, '
+ 'or with more clearly structured multi-line comments.')
+
+ if (line.count('"') - line.count('\\"')) % 2:
+ error(filename, linenum, 'readability/multiline_string', 5,
+ 'Multi-line string ("...") found. This lint script doesn\'t '
+ 'do well with such strings, and may give bogus warnings. '
+ 'Use C++11 raw strings or concatenation instead.')
+
+
+def CheckForOldStyleComments(filename, line, linenum, error):
+ """Logs an error if we see /*-style comment
+
+ Args:
+ filename: The name of the current file.
+ line: The text of the line to check.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ if line.find('/*') >= 0 and line[-1] != '\\':
+ error(filename, linenum, 'readability/old_style_comment', 5,
+ '/*-style comment found, it should be replaced with //-style. '
+ '/*-style comments are only allowed inside macros. '
+ 'Note that you should not use /*-style comments to document '
+ 'macros itself, use doxygen-style comments for this.')
+
+
+threading_list = (
+ ('asctime(', 'os_asctime_r('),
+ ('ctime(', 'os_ctime_r('),
+ ('getgrgid(', 'os_getgrgid_r('),
+ ('getgrnam(', 'os_getgrnam_r('),
+ ('getlogin(', 'os_getlogin_r('),
+ ('getpwnam(', 'os_getpwnam_r('),
+ ('getpwuid(', 'os_getpwuid_r('),
+ ('gmtime(', 'os_gmtime_r('),
+ ('localtime(', 'os_localtime_r('),
+ ('strtok(', 'os_strtok_r('),
+ ('ttyname(', 'os_ttyname_r('),
+ ('asctime_r(', 'os_asctime_r('),
+ ('ctime_r(', 'os_ctime_r('),
+ ('getgrgid_r(', 'os_getgrgid_r('),
+ ('getgrnam_r(', 'os_getgrnam_r('),
+ ('getlogin_r(', 'os_getlogin_r('),
+ ('getpwnam_r(', 'os_getpwnam_r('),
+ ('getpwuid_r(', 'os_getpwuid_r('),
+ ('gmtime_r(', 'os_gmtime_r('),
+ ('localtime_r(', 'os_localtime_r('),
+ ('strtok_r(', 'os_strtok_r('),
+ ('ttyname_r(', 'os_ttyname_r('),
+)
+
+
+def CheckPosixThreading(filename, clean_lines, linenum, error):
+ """Checks for calls to thread-unsafe functions.
+
+ Much code has been originally written without consideration of
+ multi-threading. Also, engineers are relying on their old experience;
+ they have learned posix before threading extensions were added. These
+ tests guide the engineers to use thread-safe functions (when using
+ posix directly).
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+ for single_thread_function, multithread_safe_function in threading_list:
+ ix = line.find(single_thread_function)
+ # Comparisons made explicit for clarity -- pylint:
+ # disable=g-explicit-bool-comparison
+ if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
+ line[ix - 1] not in ('_', '.', '>'))):
+ error(filename, linenum, 'runtime/threadsafe_fn', 2,
+ 'Use ' + multithread_safe_function +
+ '...) instead of ' + single_thread_function +
+ '...). If it is missing, consider implementing it;' +
+ ' see os_localtime_r for an example.')
+
+
+memory_functions = (
+ ('malloc(', 'xmalloc('),
+ ('calloc(', 'xcalloc('),
+ ('realloc(', 'xrealloc('),
+ ('strdup(', 'xstrdup('),
+ ('free(', 'xfree('),
+)
+memory_ignore_pattern = re.compile(r'src/nvim/memory.c$')
+
+
+def CheckMemoryFunctions(filename, clean_lines, linenum, error):
+ """Checks for calls to invalid functions.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ if memory_ignore_pattern.search(filename):
+ return
+ line = clean_lines.elided[linenum]
+ for function, suggested_function in memory_functions:
+ ix = line.find(function)
+ # Comparisons made explicit for clarity -- pylint:
+ # disable=g-explicit-bool-comparison
+ if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
+ line[ix - 1] not in ('_', '.', '>'))):
+ error(filename, linenum, 'runtime/memory_fn', 2,
+ 'Use ' + suggested_function +
+ '...) instead of ' + function + '...).')
+
+
+# Matches invalid increment: *count++, which moves pointer instead of
+# incrementing a value.
+_RE_PATTERN_INVALID_INCREMENT = re.compile(
+ r'^\s*\*\w+(\+\+|--);')
+
+
+class _BlockInfo(object):
+
+ """Stores information about a generic block of code."""
+
+ def __init__(self, seen_open_brace):
+ self.seen_open_brace = seen_open_brace
+ self.open_parentheses = 0
+ self.inline_asm = _NO_ASM
+
+
+class _PreprocessorInfo(object):
+
+ """Stores checkpoints of nesting stacks when #if/#else is seen."""
+
+ def __init__(self, stack_before_if):
+ # The entire nesting stack before #if
+ self.stack_before_if = stack_before_if
+
+ # The entire nesting stack up to #else
+ self.stack_before_else = []
+
+ # Whether we have already seen #else or #elif
+ self.seen_else = False
+
+
+class _NestingState(object):
+
+ """Holds states related to parsing braces."""
+
+ def __init__(self):
+ # Stack for tracking all braces. An object is pushed whenever we
+ # see a "{", and popped when we see a "}". Only 1 type of
+ # object is possible:
+ # - _BlockInfo: some type of block.
+ self.stack = []
+
+ # Stack of _PreprocessorInfo objects.
+ self.pp_stack = []
+
+ def SeenOpenBrace(self):
+ """Check if we have seen the opening brace for the innermost block.
+
+ Returns:
+ True if we have seen the opening brace, False if the innermost
+ block is still expecting an opening brace.
+ """
+ return (not self.stack) or self.stack[-1].seen_open_brace
+
+ def UpdatePreprocessor(self, line):
+ """Update preprocessor stack.
+
+ We need to handle preprocessors due to classes like this:
+ #ifdef SWIG
+ struct ResultDetailsPageElementExtensionPoint {
+ #else
+ struct ResultDetailsPageElementExtensionPoint : public Extension {
+ #endif
+
+ We make the following assumptions (good enough for most files):
+ - Preprocessor condition evaluates to true from #if up to first
+ #else/#elif/#endif.
+
+ - Preprocessor condition evaluates to false from #else/#elif up
+ to #endif. We still perform lint checks on these lines, but
+ these do not affect nesting stack.
+
+ Args:
+ line: current line to check.
+ """
+ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
+ # Beginning of #if block, save the nesting stack here. The saved
+ # stack will allow us to restore the parsing state in the #else
+ # case.
+ self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
+ elif Match(r'^\s*#\s*(else|elif)\b', line):
+ # Beginning of #else block
+ if self.pp_stack:
+ if not self.pp_stack[-1].seen_else:
+ # This is the first #else or #elif block. Remember the
+ # whole nesting stack up to this point. This is what we
+ # keep after the #endif.
+ self.pp_stack[-1].seen_else = True
+ self.pp_stack[-1].stack_before_else = copy.deepcopy(
+ self.stack)
+
+ # Restore the stack to how it was before the #if
+ self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
+ else:
+ # TODO(unknown): unexpected #else, issue warning?
+ pass
+ elif Match(r'^\s*#\s*endif\b', line):
+ # End of #if or #else blocks.
+ if self.pp_stack:
+ # If we saw an #else, we will need to restore the nesting
+ # stack to its former state before the #else, otherwise we
+ # will just continue from where we left off.
+ if self.pp_stack[-1].seen_else:
+ # Here we can just use a shallow copy since we are the last
+ # reference to it.
+ self.stack = self.pp_stack[-1].stack_before_else
+ # Drop the corresponding #if
+ self.pp_stack.pop()
+ else:
+ # TODO(unknown): unexpected #endif, issue warning?
+ pass
+
+ def Update(self, filename, clean_lines, linenum, error):
+ """Update nesting state with current line.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Update pp_stack first
+ self.UpdatePreprocessor(line)
+
+ # Count parentheses. This is to avoid adding struct arguments to
+ # the nesting stack.
+ if self.stack:
+ inner_block = self.stack[-1]
+ depth_change = line.count('(') - line.count(')')
+ inner_block.open_parentheses += depth_change
+
+ # Also check if we are starting or ending an inline assembly block.
+ if inner_block.inline_asm in (_NO_ASM, _END_ASM):
+ if (depth_change != 0 and
+ inner_block.open_parentheses == 1 and
+ _MATCH_ASM.match(line)):
+ # Enter assembly block
+ inner_block.inline_asm = _INSIDE_ASM
+ else:
+ # Not entering assembly block. If previous line was
+ # _END_ASM, we will now shift to _NO_ASM state.
+ inner_block.inline_asm = _NO_ASM
+ elif (inner_block.inline_asm == _INSIDE_ASM and
+ inner_block.open_parentheses == 0):
+ # Exit assembly block
+ inner_block.inline_asm = _END_ASM
+
+ # Consume braces or semicolons from what's left of the line
+ while True:
+ # Match first brace, semicolon, or closed parenthesis.
+ matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
+ if not matched:
+ break
+
+ token = matched.group(1)
+ if token == '{':
+ # If namespace or class hasn't seen an opening brace yet, mark
+ # namespace/class head as complete. Push a new block onto the
+ # stack otherwise.
+ if not self.SeenOpenBrace():
+ self.stack[-1].seen_open_brace = True
+ else:
+ self.stack.append(_BlockInfo(True))
+ if _MATCH_ASM.match(line):
+ self.stack[-1].inline_asm = _BLOCK_ASM
+ elif token == ';' or token == ')':
+ # If we haven't seen an opening brace yet, but we already saw
+ # a semicolon, this is probably a forward declaration. Pop
+ # the stack for these.
+ #
+ # Similarly, if we haven't seen an opening brace yet, but we
+ # already saw a closing parenthesis, then these are probably
+ # function arguments with extra "class" or "struct" keywords.
+ # Also pop these stack for these.
+ if not self.SeenOpenBrace():
+ self.stack.pop()
+ else: # token == '}'
+ # Perform end of block checks and pop the stack.
+ if self.stack:
+ self.stack.pop()
+ line = matched.group(2)
+
+
+def CheckForNonStandardConstructs(filename, clean_lines, linenum,
+ nesting_state, error):
+ r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
+
+ Complain about several constructs which gcc-2 accepts, but which are
+ not standard C++. Warning about these in lint is one way to ease the
+ transition to new compilers.
+ - put storage class first (e.g. "static const" instead of "const static").
+ - "%" PRId64 instead of %qd" in printf-type functions.
+ - "%1$d" is non-standard in printf-type functions.
+ - "\%" is an undefined character escape sequence.
+ - text after #endif is not allowed.
+ - invalid inner-style forward declaration.
+ - >? and <? operators, and their >?= and <?= cousins.
+
+ Additionally, check for constructor/destructor style violations and
+ reference members, as it is very convenient to do so while checking for
+ gcc-2 compliance.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ filename, line number, error level, and message
+ """
+
+ # Remove comments from the line, but leave in strings for now.
+ line = clean_lines.lines[linenum]
+
+ if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
+ error(filename, linenum, 'runtime/printf_format', 3,
+ '"%q" in format strings is deprecated. Use "%" PRId64 instead.')
+
+ if Search(r'printf\s*\(.*".*%\d+\$', line):
+ error(filename, linenum, 'runtime/printf_format', 2,
+ '%N$ formats are unconventional. Try rewriting to avoid them.')
+
+ # Remove escaped backslashes before looking for undefined escapes.
+ line = line.replace('\\\\', '')
+
+ if Search(r'("|\').*\\(%|\[|\(|{)', line):
+ error(filename, linenum, 'build/printf_format', 3,
+ '%, [, (, and { are undefined character escapes. Unescape them.')
+
+ # For the rest, work with both comments and strings removed.
+ line = clean_lines.elided[linenum]
+
+ if Search(r'\b(const|volatile|void|char|short|int|long'
+ r'|float|double|signed|unsigned'
+ r'|u?int8_t|u?int16_t|u?int32_t|u?int64_t'
+ r'|u?int_least8_t|u?int_least16_t|u?int_least32_t'
+ r'|u?int_least64_t'
+ r'|u?int_fast8_t|u?int_fast16_t|u?int_fast32_t'
+ r'|u?int_fast64_t'
+ r'|u?intptr_t|u?intmax_t)'
+ r'\s+(register|static|extern|typedef)\b',
+ line):
+ error(filename, linenum, 'build/storage_class', 5,
+ 'Storage class (static, extern, typedef, etc) should be first.')
+
+ if Match(r'\s*#\s*endif\s*[^/\s]+', line):
+ error(filename, linenum, 'build/endif_comment', 5,
+ 'Uncommented text after #endif is non-standard. Use a comment.')
+
+ if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
+ line):
+ error(filename, linenum, 'build/deprecated', 3,
+ '>? and <? (max and min) operators are'
+ ' non-standard and deprecated.')
+
+
+def CheckSpacingForFunctionCall(filename, line, linenum, error):
+ """Checks for the correctness of various spacing around function calls.
+
+ Args:
+ filename: The name of the current file.
+ line: The text of the line to check.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Since function calls often occur inside if/for/while/switch
+ # expressions - which have their own, more liberal conventions - we
+ # first see if we should be looking inside such an expression for a
+ # function call, to which we can apply more strict standards.
+ fncall = line # if there's no control flow construct, look at whole line
+ for pattern in (r'\bif\s*\((.*)\)\s*{',
+ r'\bfor\s*\((.*)\)\s*{',
+ r'\bwhile\s*\((.*)\)\s*[{;]',
+ r'\bswitch\s*\((.*)\)\s*{'):
+ match = Search(pattern, line)
+ if match:
+ # look inside the parens for function calls
+ fncall = match.group(1)
+ break
+
+ # Except in if/for/while/switch, there should never be space
+ # immediately inside parens (eg "f( 3, 4 )"). We make an exception
+ # for nested parens ( (a+b) + c ). Likewise, there should never be
+ # a space before a ( when it's a function argument. I assume it's a
+ # function argument when the char before the whitespace is legal in
+ # a function name (alnum + _) and we're not starting a macro. Also ignore
+ # pointers and references to arrays and functions coz they're too tricky:
+ # we use a very simple way to recognize these:
+ # " (something)(maybe-something)" or
+ # " (something)(maybe-something," or
+ # " (something)[something]"
+ # Note that we assume the contents of [] to be short enough that
+ # they'll never need to wrap.
+ if ( # Ignore control structures.
+ not Search(r'\b(if|for|while|switch|return|sizeof)\b', fncall) and
+ # Ignore pointers/references to functions.
+ not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
+ # Ignore pointers/references to arrays.
+ not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
+ # a ( used for a fn call
+ if Search(r'\w\s*\(\s(?!\s*\\$)', fncall):
+ error(filename, linenum, 'whitespace/parens', 4,
+ 'Extra space after ( in function call')
+ elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Extra space after (')
+ if (Search(r'\w\s+\(', fncall) and
+ not Search(r'#\s*define|typedef', fncall) and
+ not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
+ error(filename, linenum, 'whitespace/parens', 4,
+ 'Extra space before ( in function call')
+ # If the ) is followed only by a newline or a { + newline, assume it's
+ # part of a control statement (if/while/etc), and don't complain
+ if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
+ # If the closing parenthesis is preceded by only whitespaces,
+ # try to give a more descriptive error message.
+ if Search(r'^\s+\)', fncall):
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Closing ) should be moved to the previous line')
+ else:
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Extra space before )')
+
+
+def IsBlankLine(line):
+ """Returns true if the given line is blank.
+
+ We consider a line to be blank if the line is empty or consists of
+ only white spaces.
+
+ Args:
+ line: A line of a string.
+
+ Returns:
+ True, if the given line is blank.
+ """
+ return not line or line.isspace()
+
+
+def CheckForFunctionLengths(filename, clean_lines, linenum,
+ function_state, error):
+ """Reports for long function bodies.
+
+ For an overview why this is done, see:
+ http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
+
+ Uses a simplistic algorithm assuming other style guidelines
+ (especially spacing) are followed.
+ Only checks unindented functions, so class members are unchecked.
+ Trivial bodies are unchecked, so constructors with huge initializer lists
+ may be missed.
+ Blank/comment lines are not counted so as to avoid encouraging the removal
+ of vertical space and comments just to get through a lint check.
+ NOLINT *on the last line of a function* disables this check.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ function_state: Current function name and lines in body so far.
+ error: The function to call with any errors found.
+ """
+ lines = clean_lines.lines
+ line = lines[linenum]
+ joined_line = ''
+
+ starting_func = False
+ regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
+ match_result = Match(regexp, line)
+ if match_result:
+ # If the name is all caps and underscores, figure it's a macro and
+ # ignore it, unless it's TEST or TEST_F.
+ function_name = match_result.group(1).split()[-1]
+ if function_name == 'TEST' or function_name == 'TEST_F' or (
+ not Match(r'[A-Z_]+$', function_name)):
+ starting_func = True
+
+ if starting_func:
+ body_found = False
+ for start_linenum in range(linenum, clean_lines.NumLines()):
+ start_line = lines[start_linenum]
+ joined_line += ' ' + start_line.lstrip()
+ # Declarations and trivial functions
+ if Search(r'(;|})', start_line):
+ body_found = True
+ break # ... ignore
+ elif Search(r'{', start_line):
+ body_found = True
+ function = Search(r'((\w|:)*)\(', line).group(1)
+ if Match(r'TEST', function): # Handle TEST... macros
+ parameter_regexp = Search(r'(\(.*\))', joined_line)
+ if parameter_regexp: # Ignore bad syntax
+ function += parameter_regexp.group(1)
+ else:
+ function += '()'
+ function_state.Begin(function)
+ break
+ if not body_found:
+ # No body for the function (or evidence of a non-function) was
+ # found.
+ error(filename, linenum, 'readability/fn_size', 5,
+ 'Lint failed to find start of function body.')
+ elif Match(r'^\}\s*$', line): # function end
+ function_state.Check(error, filename, linenum)
+ function_state.End()
+ elif not Match(r'^\s*$', line):
+ function_state.Count() # Count non-blank/non-comment lines.
+
+
+_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?(:?)(\s|$)?')
+
+
+def CheckComment(comment, filename, linenum, error):
+ """Checks for common mistakes in TODO comments.
+
+ Args:
+ comment: The text of the comment from the line in question.
+ filename: The name of the current file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ match = _RE_PATTERN_TODO.match(comment)
+ if match:
+ # One whitespace is correct; zero whitespace is handled elsewhere.
+ leading_whitespace = match.group(1)
+ if len(leading_whitespace) > 1:
+ error(filename, linenum, 'whitespace/todo', 2,
+ 'Too many spaces before TODO')
+
+ username = match.group(2)
+ if not username:
+ error(filename, linenum, 'readability/todo', 2,
+ 'Missing username in TODO; it should look like '
+ '"// TODO(my_username): Stuff."')
+
+ colon = match.group(3)
+ if not colon:
+ error(filename, linenum, 'readability/todo', 2,
+ 'Missing colon in TODO; it should look like '
+ '"// TODO(my_username): Stuff."')
+
+ middle_whitespace = match.group(4)
+ # Comparisons made explicit for correctness -- pylint:
+ # disable=g-explicit-bool-comparison
+ if middle_whitespace != ' ' and middle_whitespace != '':
+ error(filename, linenum, 'whitespace/todo', 2,
+ 'TODO(my_username): should be followed by a space')
+
+
+def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
+ """Find the corresponding > to close a template.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ init_suffix: Remainder of the current line after the initial <.
+
+ Returns:
+ True if a matching bracket exists.
+ """
+ line = init_suffix
+ nesting_stack = ['<']
+ while True:
+ # Find the next operator that can tell us whether < is used as an
+ # opening bracket or as a less-than operator. We only want to
+ # warn on the latter case.
+ #
+ # We could also check all other operators and terminate the search
+ # early, e.g. if we got something like this "a<b+c", the "<" is
+ # most likely a less-than operator, but then we will get false
+ # positives for default arguments and other template expressions.
+ match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
+ if match:
+ # Found an operator, update nesting stack
+ operator = match.group(1)
+ line = match.group(2)
+
+ if nesting_stack[-1] == '<':
+ # Expecting closing angle bracket
+ if operator in ('<', '(', '['):
+ nesting_stack.append(operator)
+ elif operator == '>':
+ nesting_stack.pop()
+ if not nesting_stack:
+ # Found matching angle bracket
+ return True
+ elif operator == ',':
+ # Got a comma after a bracket, this is most likely a
+ # template argument. We have not seen a closing angle
+ # bracket yet, but it's probably a few lines later if we
+ # look for it, so just return early here.
+ return True
+ else:
+ # Got some other operator.
+ return False
+
+ else:
+ # Expecting closing parenthesis or closing bracket
+ if operator in ('<', '(', '['):
+ nesting_stack.append(operator)
+ elif operator in (')', ']'):
+ # We don't bother checking for matching () or []. If we got
+ # something like (] or [), it would have been a syntax
+ # error.
+ nesting_stack.pop()
+
+ else:
+ # Scan the next line
+ linenum += 1
+ if linenum >= len(clean_lines.elided):
+ break
+ line = clean_lines.elided[linenum]
+
+ # Exhausted all remaining lines and still no matching angle bracket.
+ # Most likely the input was incomplete, otherwise we should have
+ # seen a semicolon and returned early.
+ return True
+
+
+def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
+ """Find the corresponding < that started a template.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: Current line number.
+ init_prefix: Part of the current line before the initial >.
+
+ Returns:
+ True if a matching bracket exists.
+ """
+ line = init_prefix
+ nesting_stack = ['>']
+ while True:
+ # Find the previous operator
+ match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
+ if match:
+ # Found an operator, update nesting stack
+ operator = match.group(2)
+ line = match.group(1)
+
+ if nesting_stack[-1] == '>':
+ # Expecting opening angle bracket
+ if operator in ('>', ')', ']'):
+ nesting_stack.append(operator)
+ elif operator == '<':
+ nesting_stack.pop()
+ if not nesting_stack:
+ # Found matching angle bracket
+ return True
+ elif operator == ',':
+ # Got a comma before a bracket, this is most likely a
+ # template argument. The opening angle bracket is probably
+ # there if we look for it, so just return early here.
+ return True
+ else:
+ # Got some other operator.
+ return False
+
+ else:
+ # Expecting opening parenthesis or opening bracket
+ if operator in ('>', ')', ']'):
+ nesting_stack.append(operator)
+ elif operator in ('(', '['):
+ nesting_stack.pop()
+
+ else:
+ # Scan the previous line
+ linenum -= 1
+ if linenum < 0:
+ break
+ line = clean_lines.elided[linenum]
+
+ # Exhausted all earlier lines and still no matching angle bracket.
+ return False
+
+
+def CheckExpressionAlignment(filename, clean_lines, linenum, error, startpos=0):
+ """Checks for the correctness of alignment inside expressions
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ startpos: Position where to start searching for expression start.
+ """
+ level_starts = {}
+ line = clean_lines.elided_with_space_strings[linenum]
+ prev_line_start = Search(r'\S', line).start()
+ depth_line_starts = {}
+ pos = min([
+ idx
+ for idx in (
+ line.find(k, startpos)
+ for k in BRACES
+ if k != '{'
+ )
+ if idx >= 0
+ ] + [len(line) + 1])
+ if pos == len(line) + 1:
+ return
+ ignore_error_levels = set()
+ firstlinenum = linenum
+ for linenum, pos, brace, depth in GetExprBracesPosition(
+ clean_lines, linenum, pos
+ ):
+ line = clean_lines.elided_with_space_strings[linenum]
+ if depth is None:
+ if pos < len(line) - 1:
+ CheckExpressionAlignment(filename, clean_lines, linenum, error,
+ pos + 1)
+ return
+ elif depth <= 0:
+ error(filename, linenum, 'syntax/parenthesis', 4,
+ 'Unbalanced parenthesis')
+ return
+ if brace == 's':
+ assert firstlinenum != linenum
+ if level_starts[depth][1]:
+ if line[pos] == BRACES[depth_line_starts[depth][1]]:
+ if pos != depth_line_starts[depth][0]:
+ if depth not in ignore_error_levels:
+ error(filename, linenum, 'whitespace/indent', 2,
+ 'End of the inner expression should have '
+ 'the same indent as start')
+ else:
+ if (pos != depth_line_starts[depth][0] + 4
+ and not (depth_line_starts[depth][1] == '{'
+ and pos == depth_line_starts[depth][0] + 2)):
+ if depth not in ignore_error_levels:
+ error(filename, linenum, 'whitespace/indent', 2,
+ 'Inner expression indentation should be 4')
+ else:
+ if (pos != level_starts[depth][0] + 1
+ + (level_starts[depth][2] == '{')):
+ if depth not in ignore_error_levels:
+ error(filename, linenum, 'whitespace/alignment', 2,
+ 'Inner expression should be aligned '
+ 'as opening brace + 1 (+ 2 in case of {)')
+ prev_line_start = pos
+ elif brace == 'e':
+ pass
+ else:
+ opening = brace in BRACES
+ if opening:
+ # Only treat {} as part of the expression if it is preceded by
+ # "=" (brace initializer) or "(type)" (construct like (struct
+ # foo) { ... }).
+ if brace == '{' and not (Search(
+ r'(?:= *|\((?:struct )?\w+(\s*\[\w*\])?\)) *$',
+ line[:pos])
+ ):
+ ignore_error_levels.add(depth)
+ line_ended_with_opening = (
+ pos == len(line) - 2 * (line.endswith(' \\')) - 1)
+ level_starts[depth] = (pos, line_ended_with_opening, brace)
+ if line_ended_with_opening:
+ depth_line_starts[depth] = (prev_line_start, brace)
+ else:
+ del level_starts[depth]
+
+
+def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
+ """Checks for the correctness of various spacing issues in the code.
+
+ Things we check for: spaces around operators, spaces after
+ if/for/while/switch, no spaces around parens in function calls, two
+ spaces between code and comment, don't start a block with a blank
+ line, don't end a function with a blank line, don't add a blank line
+ after public/protected/private, don't have too many blank lines in a row,
+ spaces after {, spaces before }.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: The function to call with any errors found.
+ """
+
+ # Don't use "elided" lines here, otherwise we can't check commented lines.
+ # Don't want to use "raw" either, because we don't want to check inside
+ # C++11 raw strings,
+ raw = clean_lines.lines_without_raw_strings
+ line = raw[linenum]
+
+ # Before nixing comments, check if the line is blank for no good
+ # reason. This includes the first line after a block is opened, and
+ # blank lines at the end of a function (ie, right before a line like '}'
+ #
+ # Skip all the blank line checks if we are immediately inside a
+ # namespace body. In other words, don't issue blank line warnings
+ # for this block:
+ # namespace {
+ #
+ # }
+ #
+ # A warning about missing end of namespace comments will be issued instead.
+ if IsBlankLine(line):
+ elided = clean_lines.elided
+ prev_line = elided[linenum - 1]
+ prevbrace = prev_line.rfind('{')
+ # TODO(unknown): Don't complain if line before blank line, and line
+ # after,both start with alnums and are indented the same
+ # amount. This ignores whitespace at the start of a
+ # namespace block because those are not usually indented.
+ if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
+ # OK, we have a blank line at the start of a code block. Before we
+ # complain, we check if it is an exception to the rule: The previous
+ # non-empty line has the parameters of a function header that are
+ # indented 4 spaces (because they did not fit in a 80 column line
+ # when placed on the same line as the function name). We also check
+ # for the case where the previous line is indented 6 spaces, which
+ # may happen when the initializers of a constructor do not fit into
+ # a 80 column line.
+ exception = False
+ if Match(r' {6}\w', prev_line): # Initializer list?
+ # We are looking for the opening column of initializer list,
+ # which should be indented 4 spaces to cause 6 space indentation
+ # afterwards.
+ search_position = linenum - 2
+ while (search_position >= 0
+ and Match(r' {6}\w', elided[search_position])):
+ search_position -= 1
+ exception = (search_position >= 0
+ and elided[search_position][:5] == ' :')
+ else:
+ # Search for the function arguments or an initializer list. We
+ # use a simple heuristic here: If the line is indented 4 spaces;
+ # and we have a closing paren, without the opening paren,
+ # followed by an opening brace or colon (for initializer lists)
+ # we assume that it is the last line of a function header. If
+ # we have a colon indented 4 spaces, it is an initializer list.
+ exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
+ prev_line)
+ or Match(r' {4}:', prev_line))
+
+ if not exception:
+ error(filename, linenum, 'whitespace/blank_line', 2,
+ 'Redundant blank line at the start of a code block '
+ 'should be deleted.')
+ # Ignore blank lines at the end of a block in a long if-else
+ # chain, like this:
+ # if (condition1) {
+ # // Something followed by a blank line
+ #
+ # } else if (condition2) {
+ # // Something else
+ # }
+ if linenum + 1 < clean_lines.NumLines():
+ next_line = raw[linenum + 1]
+ if (next_line
+ and Match(r'\s*}', next_line)
+ and next_line.find('} else ') == -1):
+ error(filename, linenum, 'whitespace/blank_line', 3,
+ 'Redundant blank line at the end of a code block '
+ 'should be deleted.')
+
+ # Next, we complain if there's a comment too near the text
+ commentpos = line.find('//')
+ if commentpos != -1:
+ # Check if the // may be in quotes. If so, ignore it
+ # Comparisons made explicit for clarity -- pylint:
+ # disable=g-explicit-bool-comparison
+ if (line.count('"', 0, commentpos) -
+ line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
+ # Allow one space for new scopes, two spaces otherwise:
+ if (not Match(r'^\s*{ //', line) and
+ ((commentpos >= 1 and
+ line[commentpos - 1] not in string.whitespace) or
+ (commentpos >= 2 and
+ line[commentpos - 2] not in string.whitespace))):
+ error(filename, linenum, 'whitespace/comments', 2,
+ 'At least two spaces is best between code and comments')
+ # There should always be a space between the // and the comment
+ commentend = commentpos + 2
+ if commentend < len(line) and not line[commentend] == ' ':
+ # but some lines are exceptions -- e.g. if they're big
+ # comment delimiters like:
+ # //----------------------------------------------------------
+ # or are an empty C++ style Doxygen comment, like:
+ # ///
+ # or C++ style Doxygen comments placed after the variable:
+ # ///< Header comment
+ # //!< Header comment
+ # or they begin with multiple slashes followed by a space:
+ # //////// Header comment
+ match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
+ Search(r'^/$', line[commentend:]) or
+ Search(r'^!< ', line[commentend:]) or
+ Search(r'^/< ', line[commentend:]) or
+ Search(r'^/+ ', line[commentend:]))
+ if not match:
+ error(filename, linenum, 'whitespace/comments', 4,
+ 'Should have a space between // and comment')
+ CheckComment(line[commentpos:], filename, linenum, error)
+
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ # Don't try to do spacing checks for operator methods
+ line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
+
+ # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
+ # Otherwise not. Note we only check for non-spaces on *both* sides;
+ # sometimes people put non-spaces on one side when aligning ='s among
+ # many lines (not that this is behavior that I approve of...)
+ if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
+ error(filename, linenum, 'whitespace/operators', 4,
+ 'Missing spaces around =')
+
+ # It's ok not to have spaces around binary operators like + - * /, but if
+ # there's too little whitespace, we get concerned. It's hard to tell,
+ # though, so we punt on this one for now. TODO.
+
+ match = Search(r'(?:[^ (*/![])+(?<!\+\+|--)\*', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 2,
+ 'Missing space before asterisk in %s' % match.group(0))
+
+ # You should always have whitespace around binary operators.
+ #
+ # Check <= and >= first to avoid false positives with < and >, then
+ # check non-include lines for spacing around < and >.
+ match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around %s' % match.group(1))
+
+ # Boolean operators should be placed on the next line.
+ if Search(r'(?:&&|\|\|)$', line):
+ error(filename, linenum, 'whitespace/operators', 4,
+ 'Boolean operator should be placed on the same line as the start '
+ 'of its right operand')
+
+ # We allow no-spaces around << when used like this: 10<<20, but
+ # not otherwise (particularly, not when used as streams)
+ # Also ignore using ns::operator<<;
+ match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
+ if (match and
+ not (match.group(1).isdigit() and match.group(2).isdigit()) and
+ not (match.group(1) == 'operator' and match.group(2) == ';')):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around <<')
+ elif not Match(r'#.*include', line):
+ # Avoid false positives on ->
+ reduced_line = line.replace('->', '')
+
+ # Look for < that is not surrounded by spaces. This is only
+ # triggered if both sides are missing spaces, even though
+ # technically should should flag if at least one side is missing a
+ # space. This is done to avoid some false positives with shifts.
+ match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
+ if (match and not FindNextMatchingAngleBracket(clean_lines, linenum,
+ match.group(1))):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around <')
+
+ # Look for > that is not surrounded by spaces. Similar to the
+ # above, we only trigger if both sides are missing spaces to avoid
+ # false positives with shifts.
+ match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
+ if (match and
+ not FindPreviousMatchingAngleBracket(clean_lines, linenum,
+ match.group(1))):
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around >')
+
+ # We allow no-spaces around >> for almost anything. This is because
+ # C++11 allows ">>" to close nested templates, which accounts for
+ # most cases when ">>" is not followed by a space.
+ #
+ # We still warn on ">>" followed by alpha character, because that is
+ # likely due to ">>" being used for right shifts, e.g.:
+ # value >> alpha
+ #
+ # When ">>" is used to close templates, the alphanumeric letter that
+ # follows would be part of an identifier, and there should still be
+ # a space separating the template type and the identifier.
+ # type<type<type>> alpha
+ match = Search(r'>>[a-zA-Z_]', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around >>')
+
+ # There shouldn't be space around unary operators
+ match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 4,
+ 'Extra space for operator %s' % match.group(1))
+
+ # A pet peeve of mine: no spaces after an if, while, switch, or for
+ match = Search(r' (if\(|for\(|while\(|switch\()', line)
+ if match:
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Missing space before ( in %s' % match.group(1))
+
+ # For if/for/while/switch, the left and right parens should be
+ # consistent about how many spaces are inside the parens, and
+ # there should either be zero or one spaces inside the parens.
+ # We don't want: "if ( foo)" or "if ( foo )".
+ # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
+ match = Search(r'\b(if|for|while|switch)\s*'
+ r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
+ line)
+ if match:
+ if len(match.group(2)) != len(match.group(4)):
+ if not (match.group(3) == ';' and
+ len(match.group(2)) == 1 + len(match.group(4)) or
+ not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Mismatching spaces inside () in %s' % match.group(1))
+ if len(match.group(2)) not in [0, 1]:
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Should have zero or one spaces inside ( and ) in %s' %
+ match.group(1))
+
+ # You should always have a space after a comma (either as fn arg or
+ # operator).
+ #
+ # This does not apply when the non-space character following the
+ # comma is another comma, since the only time when that happens is
+ # for empty macro arguments.
+ #
+ # We run this check in two passes: first pass on elided lines to
+ # verify that lines contain missing whitespaces, second pass on raw
+ # lines to confirm that those missing whitespaces are not due to
+ # elided comments.
+ if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
+ error(filename, linenum, 'whitespace/comma', 3,
+ 'Missing space after ,')
+
+ # You should always have a space after a semicolon
+ # except for few corner cases
+ # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
+ # space after ;
+ if Search(r';[^\s};\\)/]', line):
+ error(filename, linenum, 'whitespace/semicolon', 3,
+ 'Missing space after ;')
+
+ # Next we will look for issues with function calls.
+ CheckSpacingForFunctionCall(filename, line, linenum, error)
+
+ # Check whether everything inside expressions is aligned correctly
+ if any((line.find(k) >= 0 for k in BRACES if k != '{')):
+ CheckExpressionAlignment(filename, clean_lines, linenum, error)
+
+ # Except after an opening paren, or after another opening brace (in case of
+ # an initializer list, for instance), you should have spaces before your
+ # braces. And since you should never have braces at the beginning of a line,
+ # this is an easy test.
+ match = Match(r'^(.*[^ ({]){', line)
+ if match:
+ # Try a bit harder to check for brace initialization. This
+ # happens in one of the following forms:
+ # Constructor() : initializer_list_{} { ... }
+ # Constructor{}.MemberFunction()
+ # Type variable{};
+ # FunctionCall(type{}, ...);
+ # LastArgument(..., type{});
+ # LOG(INFO) << type{} << " ...";
+ # map_of_type[{...}] = ...;
+ #
+ # We check for the character following the closing brace, and
+ # silence the warning if it's one of those listed above, i.e.
+ # "{.;,)<]".
+ #
+ # To account for nested initializer list, we allow any number of
+ # closing braces up to "{;,)<". We can't simply silence the
+ # warning on first sight of closing brace, because that would
+ # cause false negatives for things that are not initializer lists.
+ # Silence this: But not this:
+ # Outer{ if (...) {
+ # Inner{...} if (...){ // Missing space before {
+ # }; }
+ #
+ # There is a false negative with this approach if people inserted
+ # spurious semicolons, e.g. "if (cond){};", but we will catch the
+ # spurious semicolon with a separate check.
+ (endline, endlinenum, endpos) = CloseExpression(
+ clean_lines, linenum, len(match.group(1)))
+ trailing_text = ''
+ if endpos > -1:
+ trailing_text = endline[endpos:]
+ for offset in range(endlinenum + 1,
+ min(endlinenum + 3, clean_lines.NumLines() - 1)):
+ trailing_text += clean_lines.elided[offset]
+ if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before {')
+
+ # Make sure '} else {' has spaces.
+ if Search(r'}else', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before else')
+
+ # You shouldn't have spaces before your brackets, except maybe after
+ # 'delete []' or 'new char * []'.
+ if Search(r'\w\s+\[', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Extra space before [')
+
+ # You shouldn't have a space before a semicolon at the end of the line.
+ if Search(r':\s*;\s*$', line):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Semicolon defining empty statement. Use {} instead.')
+ elif Search(r'^\s*;\s*$', line):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Line contains only semicolon. If this should be an empty'
+ ' statement, use {} instead.')
+ elif Search(r'\s+;\s*$', line):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Extra space before last semicolon. If this should be an empty '
+ 'statement, use {} instead.')
+
+ if Search(r'\{(?!\})\S', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space after {')
+ if Search(r'\S(?<!\{)\}', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before }')
+
+ if Search(r'\S {2,}\\$', line):
+ error(filename, linenum, 'whitespace/line_continuation', 5,
+ 'Too many spaces before \\, line continuation character must be '
+ 'preceded by exactly one space. For “blank lines†'
+ 'it is preferred to use the same amount of spaces as preceding '
+ 'indent')
+
+ if Match(r'^ +#', line):
+ error(filename, linenum, 'whitespace/indent', 5,
+ 'Must not indent preprocessor directives, use 1-space indent '
+ 'after the hash')
+
+ cast_line = re.sub(r'^# *define +\w+\([^)]*\)', '', line)
+ match = Search(r'\((?:const )?(?:struct )?[a-zA-Z_]\w*(?: *\*(?:const)?)*\)'
+ r' +'
+ r'-?(?:\*+|&)?(?:\w+|\+\+|--|\()', cast_line)
+ if match and line[0] == ' ':
+ error(filename, linenum, 'whitespace/cast', 2,
+ 'Should leave no spaces after a cast: {!r}'.format(
+ match.group(0)))
+
+
+def GetPreviousNonBlankLine(clean_lines, linenum):
+ """Return the most recent non-blank line and its line number.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file contents.
+ linenum: The number of the line to check.
+
+ Returns:
+ A tuple with two elements. The first element is the contents of the last
+ non-blank line before the current line, or the empty string if this is the
+ first non-blank line. The second is the line number of that line, or -1
+ if this is the first non-blank line.
+ """
+
+ prevlinenum = linenum - 1
+ while prevlinenum >= 0:
+ prevline = clean_lines.elided[prevlinenum]
+ if not IsBlankLine(prevline): # if not a blank line...
+ return (prevline, prevlinenum)
+ prevlinenum -= 1
+ return ('', -1)
+
+
+def CheckBraces(filename, clean_lines, linenum, error):
+ """Looks for misplaced braces (e.g. at the end of line).
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[linenum] # get rid of comments and strings
+
+ if not (filename.endswith('.c') or filename.endswith('.h')):
+ if Match(r'\s*{\s*$', line):
+ # We allow an open brace to start a line in the case where someone
+ # is using braces in a block to explicitly create a new scope, which
+ # is commonly used to control the lifetime of stack-allocated
+ # variables. Braces are also used for brace initializers inside
+ # function calls. We don't detect this perfectly: we just don't
+ # complain if the last non-whitespace character on the previous
+ # non-blank line is ',', ';', ':', '(', '{', or '}', or if the
+ # previous line starts a preprocessor block.
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if (not Search(r'[,;:}{(]\s*$', prevline) and
+ not Match(r'\s*#', prevline)):
+ error(filename, linenum, 'whitespace/braces', 4,
+ '{ should almost always be at the end'
+ ' of the previous line')
+
+ # An else clause should be on the same line as the preceding closing brace.
+ # If there is no preceding closing brace, there should be one.
+ if Match(r'\s*else\s*', line):
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if Match(r'\s*}\s*$', prevline):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'An else should appear on the same line as the preceding }')
+ else:
+ error(filename, linenum, 'readability/braces', 5,
+ 'An else should always have braces before it')
+
+ # If should always have a brace
+ for blockstart in ('if', 'while', 'for'):
+ if Match(r'\s*{0}(?!\w)[^{{]*$'.format(blockstart), line):
+ pos = line.find(blockstart)
+ pos = line.find('(', pos)
+ if pos > 0:
+ (endline, _, endpos) = CloseExpression(
+ clean_lines, linenum, pos)
+ if endline[endpos:].find('{') == -1:
+ error(filename, linenum, 'readability/braces', 5,
+ '{0} should always use braces'.format(blockstart))
+
+ # If braces come on one side of an else, they should be on both.
+ # However, we have to worry about "else if" that spans multiple lines!
+ if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
+ if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
+ # find the ( after the if
+ pos = line.find('else if')
+ pos = line.find('(', pos)
+ if pos > 0:
+ (endline, _, endpos) = CloseExpression(
+ clean_lines, linenum, pos)
+ # must be brace after if
+ if endline[endpos:].find('{') == -1:
+ error(filename, linenum, 'readability/braces', 5,
+ 'If an else has a brace on one side,'
+ ' it should have it on both')
+ else: # common case: else not followed by a multi-line if
+ error(filename, linenum, 'readability/braces', 5,
+ 'If an else has a brace on one side,'
+ ' it should have it on both')
+
+ # Likewise, an else should never have the else clause on the same line
+ if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'Else clause should never be on same line as else (use 2 lines)')
+
+ # In the same way, a do/while should never be on one line
+ if Match(r'\s*do [^\s{]', line):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'do/while clauses should not be on a single line')
+
+ # Block bodies should not be followed by a semicolon. Due to C++11
+ # brace initialization, there are more places where semicolons are
+ # required than not, so we use a whitelist approach to check these
+ # rather than a blacklist. These are the places where "};" should
+ # be replaced by just "}":
+ # 1. Some flavor of block following closing parenthesis:
+ # for (;;) {};
+ # while (...) {};
+ # switch (...) {};
+ # Function(...) {};
+ # if (...) {};
+ # if (...) else if (...) {};
+ #
+ # 2. else block:
+ # if (...) else {};
+ #
+ # 3. const member function:
+ # Function(...) const {};
+ #
+ # 4. Block following some statement:
+ # x = 42;
+ # {};
+ #
+ # 5. Block at the beginning of a function:
+ # Function(...) {
+ # {};
+ # }
+ #
+ # Note that naively checking for the preceding "{" will also match
+ # braces inside multi-dimensional arrays, but this is fine since
+ # that expression will not contain semicolons.
+ #
+ # 6. Block following another block:
+ # while (true) {}
+ # {};
+ #
+ # 7. End of namespaces:
+ # namespace {};
+ #
+ # These semicolons seems far more common than other kinds of
+ # redundant semicolons, possibly due to people converting classes
+ # to namespaces. For now we do not warn for this case.
+ #
+ # Try matching case 1 first.
+ match = Match(r'^(.*\)\s*)\{', line)
+ if match:
+ # Matched closing parenthesis (case 1). Check the token before the
+ # matching opening parenthesis, and don't warn if it looks like a
+ # macro. This avoids these false positives:
+ # - macro that defines a base class
+ # - multi-line macro that defines a base class
+ # - macro that defines the whole class-head
+ #
+ # But we still issue warnings for macros that we know are safe to
+ # warn, specifically:
+ # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
+ # - TYPED_TEST
+ # - INTERFACE_DEF
+ # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
+ #
+ # We implement a whitelist of safe macros instead of a blacklist of
+ # unsafe macros, even though the latter appears less frequently in
+ # google code and would have been easier to implement. This is because
+ # the downside for getting the whitelist wrong means some extra
+ # semicolons, while the downside for getting the blacklist wrong
+ # would result in compile errors.
+ #
+ # In addition to macros, we also don't want to warn on compound
+ # literals.
+ closing_brace_pos = match.group(1).rfind(')')
+ opening_parenthesis = ReverseCloseExpression(
+ clean_lines, linenum, closing_brace_pos)
+ if opening_parenthesis[2] > -1:
+ line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
+ macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
+ if ((macro and
+ macro.group(1) not in (
+ 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
+ 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
+ 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
+ Search(r'\s+=\s*$', line_prefix) or
+ Search(r'^\s*return\s*$', line_prefix)):
+ match = None
+
+ else:
+ # Try matching cases 2-3.
+ match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
+ if not match:
+ # Try matching cases 4-6. These are always matched on separate
+ # lines.
+ #
+ # Note that we can't simply concatenate the previous line to the
+ # current line and do a single match, otherwise we may output
+ # duplicate warnings for the blank line case:
+ # if (cond) {
+ # // blank line
+ # }
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if prevline and Search(r'[;{}]\s*$', prevline):
+ match = Match(r'^(\s*)\{', line)
+
+ # Check matching closing brace
+ if match:
+ (endline, endlinenum, endpos) = CloseExpression(
+ clean_lines, linenum, len(match.group(1)))
+ if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
+ # Current {} pair is eligible for semicolon check, and we have found
+ # the redundant semicolon, output warning here.
+ #
+ # Note: because we are scanning forward for opening braces, and
+ # outputting warnings for the matching closing brace, if there are
+ # nested blocks with trailing semicolons, we will get the error
+ # messages in reversed order.
+ error(filename, endlinenum, 'readability/braces', 4,
+ "You don't need a ; after a }")
+
+
+def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
+ """Look for empty loop/conditional body with only a single semicolon.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Search for loop keywords at the beginning of the line. Because only
+ # whitespaces are allowed before the keywords, this will also ignore most
+ # do-while-loops, since those lines should start with closing brace.
+ #
+ # We also check "if" blocks here, since an empty conditional block
+ # is likely an error.
+ line = clean_lines.elided[linenum]
+ matched = Match(r'\s*(for|while|if)\s*\(', line)
+ if matched:
+ # Find the end of the conditional expression
+ (end_line, end_linenum, end_pos) = CloseExpression(
+ clean_lines, linenum, line.find('('))
+
+ # Output warning if what follows the condition expression is a
+ # semicolon. No warning for all other cases, including whitespace or
+ # newline, since we have a separate check for semicolons preceded by
+ # whitespace.
+ if end_pos >= 0 and Match(r';', end_line[end_pos:]):
+ if matched.group(1) == 'if':
+ error(filename, end_linenum,
+ 'whitespace/empty_conditional_body', 5,
+ 'Empty conditional bodies should use {}')
+ else:
+ error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
+ 'Empty loop bodies should use {} or continue')
+
+
+def CheckAltTokens(filename, clean_lines, linenum, error):
+ """Check alternative keywords being used in boolean expressions.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[linenum]
+
+ # Avoid preprocessor lines
+ if Match(r'^\s*#', line):
+ return
+
+ # Last ditch effort to avoid multi-line comments. This will not help
+ # if the comment started before the current line or ended after the
+ # current line, but it catches most of the false positives. At least,
+ # it provides a way to workaround this warning for people who use
+ # multi-line comments in preprocessor macros.
+ #
+ # TODO(unknown): remove this once cpplint has better support for
+ # multi-line comments.
+ if line.find('/*') >= 0 or line.find('*/') >= 0:
+ return
+
+ for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
+ error(filename, linenum, 'readability/alt_tokens', 2,
+ 'Use operator %s instead of %s' % (
+ _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
+
+
+def GetLineWidth(line):
+ """Determines the width of the line in column positions.
+
+ Args:
+ line: A string, which may be a Unicode string.
+
+ Returns:
+ The width of the line in column positions, accounting for Unicode
+ combining characters and wide characters.
+ """
+ if isinstance(line, str):
+ width = 0
+ for uc in unicodedata.normalize('NFC', line):
+ if unicodedata.east_asian_width(uc) in ('W', 'F'):
+ width += 2
+ elif not unicodedata.combining(uc):
+ width += 1
+ return width
+ else:
+ return len(line)
+
+
+def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
+ error):
+ """Checks rules from the 'C++ style rules' section of cppguide.html.
+
+ Most of these rules are hard to test (naming, comment style), but we
+ do what we can. In particular we check for 2-space indents, line lengths,
+ tab usage, spaces inside code, etc.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ linenum: The number of the line to check.
+ file_extension: The extension (without the dot) of the filename.
+ nesting_state: A _NestingState instance which maintains information about
+ the current stack of nested blocks being parsed.
+ error: The function to call with any errors found.
+ """
+
+ # Don't use "elided" lines here, otherwise we can't check commented lines.
+ # Don't want to use "raw" either, because we don't want to check inside
+ # C++11 raw strings,
+ raw_lines = clean_lines.lines_without_raw_strings
+ line = raw_lines[linenum]
+
+ if line.find('\t') != -1:
+ error(filename, linenum, 'whitespace/tab', 1,
+ 'Tab found; better to use spaces')
+
+ # One or three blank spaces at the beginning of the line is weird; it's
+ # hard to reconcile that with 2-space indents.
+ # NOTE: here are the conditions rob pike used for his tests. Mine aren't
+ # as sophisticated, but it may be worth becoming so:
+ # RLENGTH==initial_spaces
+ # if(RLENGTH > 20) complain = 0;
+ # if(match($0, " +(error|private|public|protected):")) complain = 0;
+ # if(match(prev, "&& *$")) complain = 0;
+ # if(match(prev, "\\|\\| *$")) complain = 0;
+ # if(match(prev, "[\",=><] *$")) complain = 0;
+ # if(match($0, " <<")) complain = 0;
+ # if(match(prev, " +for \\(")) complain = 0;
+ # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
+ initial_spaces = 0
+ cleansed_line = clean_lines.elided[linenum]
+ while initial_spaces < len(line) and line[initial_spaces] == ' ':
+ initial_spaces += 1
+ if line and line[-1].isspace():
+ error(filename, linenum, 'whitespace/end_of_line', 4,
+ 'Line ends in whitespace. Consider deleting these extra spaces.')
+ # There are certain situations we allow one space, notably for section
+ # labels
+ elif ((initial_spaces == 1 or initial_spaces == 3) and
+ not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
+ error(filename, linenum, 'whitespace/indent', 3,
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 2-space indent?')
+
+ # Check if the line is a header guard.
+ is_header_guard = False
+ if file_extension == 'h':
+ cppvar = GetHeaderGuardCPPVariable(filename)
+ if (line.startswith('#ifndef %s' % cppvar) or
+ line.startswith('#define %s' % cppvar) or
+ line.startswith('#endif // %s' % cppvar)):
+ is_header_guard = True
+ # #include lines and header guards can be long, since there's no clean way
+ # to split them.
+ #
+ # URLs can be long too. It's possible to split these, but it makes them
+ # harder to cut&paste.
+ #
+ # The "$Id:...$" comment may also get very long without it being the
+ # developers fault.
+ if (not line.startswith('#include') and not is_header_guard and
+ not Match(r'^\s*//.*http(s?)://\S*$', line) and
+ not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
+ line_width = GetLineWidth(line)
+ extended_length = int((_line_length * 1.25))
+ if line_width > extended_length:
+ error(filename, linenum, 'whitespace/line_length', 4,
+ 'Lines should very rarely be longer than %i characters' %
+ extended_length)
+ elif line_width > _line_length:
+ error(filename, linenum, 'whitespace/line_length', 2,
+ 'Lines should be <= %i characters long' % _line_length)
+
+ if (cleansed_line.count(';') > 1 and
+ # for loops are allowed two ;'s (and may run over two lines).
+ cleansed_line.find('for') == -1 and
+ (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
+ GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
+ # It's ok to have many commands in a switch case that fits in 1 line
+ not ((cleansed_line.find('case ') != -1 or
+ cleansed_line.find('default:') != -1) and
+ cleansed_line.find('break;') != -1)):
+ error(filename, linenum, 'whitespace/newline', 0,
+ 'More than one command on the same line')
+
+ # Some more style checks
+ CheckBraces(filename, clean_lines, linenum, error)
+ CheckEmptyBlockBody(filename, clean_lines, linenum, error)
+ CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
+ CheckAltTokens(filename, clean_lines, linenum, error)
+
+
+_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
+_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
+# Matches the first component of a filename delimited by -s and _s. That is:
+# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
+_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
+
+
+def _ClassifyInclude(fileinfo, include, is_system):
+ """Figures out what kind of header 'include' is.
+
+ Args:
+ fileinfo: The current file cpplint is running over. A FileInfo instance.
+ include: The path to a #included file.
+ is_system: True if the #include used <> rather than "".
+
+ Returns:
+ One of the _XXX_HEADER constants.
+ """
+ if is_system:
+ return _C_SYS_HEADER
+ return _OTHER_HEADER
+
+
+def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
+ """Check rules that are applicable to #include lines.
+
+ Strings on #include lines are NOT removed from elided line, to make
+ certain tasks easier. However, to prevent false positives, checks
+ applicable to #include lines in CheckLanguage must be put here.
+
+ Args:
+ filename : The name of the current file.
+ clean_lines : A CleansedLines instance containing the file.
+ linenum : The number of the line to check.
+ include_state : An _IncludeState instance in which the headers are
+ inserted.
+ error : The function to call with any errors found.
+ """
+ fileinfo = FileInfo(filename)
+
+ line = clean_lines.lines[linenum]
+
+ # "include" should use the new style "foo/bar.h" instead of just "bar.h"
+ # XXX: neovim doesn't currently use this style
+ # if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
+ # error(filename, linenum, 'build/include', 4,
+ # 'Include the directory when naming .h files')
+
+ # we shouldn't include a file more than once. actually, there are a
+ # handful of instances where doing so is okay, but in general it's
+ # not.
+ match = _RE_PATTERN_INCLUDE.search(line)
+ if match:
+ include = match.group(2)
+ is_system = (match.group(1) == '<')
+ if include in include_state:
+ error(filename, linenum, 'build/include', 4,
+ '"%s" already included at %s:%s' %
+ (include, filename, include_state[include]))
+ else:
+ include_state[include] = linenum
+
+ # We want to ensure that headers appear in the right order:
+ # 1) for foo.cc, foo.h (preferred location)
+ # 2) c system files
+ # 3) cpp system files
+ # 4) for foo.cc, foo.h (deprecated location)
+ # 5) other google headers
+ #
+ # We classify each include statement as one of those 5 types
+ # using a number of techniques. The include_state object keeps
+ # track of the highest type seen, and complains if we see a
+ # lower type after that.
+ error_message = include_state.CheckNextIncludeOrder(
+ _ClassifyInclude(fileinfo, include, is_system))
+ if error_message:
+ error(filename, linenum, 'build/include_order', 4,
+ '%s. Should be: c system, c++ system, other.'
+ % error_message)
+ canonical_include = include_state.CanonicalizeAlphabeticalOrder(
+ include)
+ include_state.SetLastHeader(canonical_include)
+
+
+def _GetTextInside(text, start_pattern):
+ r"""Retrieves all the text between matching open and close parentheses.
+
+ Given a string of lines and a regular expression string, retrieve all the
+ text following the expression and between opening punctuation symbols like
+ (, [, or {, and the matching close-punctuation symbol. This properly nested
+ occurrences of the punctuations, so for the text like
+ printf(a(), b(c()));
+ a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
+ start_pattern must match string having an open punctuation symbol at the
+ end.
+
+ Args:
+ text: The lines to extract text. Its comments and strings must be elided.
+ It can be single line and can span multiple lines.
+ start_pattern: The regexp string indicating where to start extracting
+ the text.
+ Returns:
+ The extracted text.
+ None if either the opening string or ending punctuation couldn't be found.
+ """
+ # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
+ # rewritten to use _GetTextInside (and use inferior regexp matching today).
+
+ # Give opening punctuations to get the matching close-punctuations.
+ matching_punctuation = {'(': ')', '{': '}', '[': ']'}
+ closing_punctuation = set(matching_punctuation.values())
+
+ # Find the position to start extracting text.
+ match = re.search(start_pattern, text, re.M)
+ if not match: # start_pattern not found in text.
+ return None
+ start_position = match.end(0)
+
+ assert start_position > 0, (
+ 'start_pattern must ends with an opening punctuation.')
+ assert text[start_position - 1] in matching_punctuation, (
+ 'start_pattern must ends with an opening punctuation.')
+ # Stack of closing punctuations we expect to have in text after position.
+ punctuation_stack = [matching_punctuation[text[start_position - 1]]]
+ position = start_position
+ while punctuation_stack and position < len(text):
+ if text[position] == punctuation_stack[-1]:
+ punctuation_stack.pop()
+ elif text[position] in closing_punctuation:
+ # A closing punctuation without matching opening punctuations.
+ return None
+ elif text[position] in matching_punctuation:
+ punctuation_stack.append(matching_punctuation[text[position]])
+ position += 1
+ if punctuation_stack:
+ # Opening punctuations left without matching close-punctuations.
+ return None
+ # punctuations match.
+ return text[start_position:position - 1]
+
+
+def CheckLanguage(filename, clean_lines, linenum, file_extension,
+ include_state, nesting_state, error):
+ """Checks rules from the 'C++ language rules' section of cppguide.html.
+
+ Some of these rules are hard to test (function overloading, using
+ uint32 inappropriately), but we do the best we can.
+
+ Args:
+ filename : The name of the current file.
+ clean_lines : A CleansedLines instance containing the file.
+ linenum : The number of the line to check.
+ file_extension : The extension (without the dot) of the filename.
+ include_state : An _IncludeState instance in which the headers are
+ inserted.
+ nesting_state : A _NestingState instance which maintains information
+ about the current stack of nested blocks being parsed.
+ error : The function to call with any errors found.
+ """
+ # If the line is empty or consists of entirely a comment, no need to
+ # check it.
+ line = clean_lines.elided[linenum]
+ if not line:
+ return
+
+ match = _RE_PATTERN_INCLUDE.search(line)
+ if match:
+ CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
+ return
+
+ # Reset include state across preprocessor directives. This is meant
+ # to silence warnings for conditional includes.
+ if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
+ include_state.ResetSection()
+
+ # TODO(unknown): figure out if they're using default arguments in fn proto.
+
+ # Check if people are using the verboten C basic types.
+ match = Search(r'\b(short|long long)\b', line)
+ if match:
+ error(filename, linenum, 'runtime/int', 4,
+ 'Use int16_t/int64_t/etc, rather than the C type %s'
+ % match.group(1))
+
+ # When snprintf is used, the second argument shouldn't be a literal.
+ match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
+ if match and match.group(2) != '0':
+ # If 2nd arg is zero, snprintf is used to calculate size.
+ error(filename, linenum, 'runtime/printf', 3,
+ 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
+ 'to snprintf.' % (match.group(1), match.group(2)))
+
+ # Check if some verboten C functions are being used.
+ if Search(r'\bsprintf\b', line):
+ error(filename, linenum, 'runtime/printf', 5,
+ 'Never use sprintf. Use snprintf instead.')
+ match = Search(r'\b(strcpy|strcat)\b', line)
+ if match:
+ error(filename, linenum, 'runtime/printf', 4,
+ 'Almost always, snprintf is better than %s' % match.group(1))
+
+ # Check for suspicious usage of "if" like
+ # } if (a == b) {
+ if Search(r'\}\s*if\s*\(', line):
+ error(filename, linenum, 'readability/braces', 4,
+ 'Did you mean "else if"? If not, start a new line for "if".')
+
+ # Check for potential format string bugs like printf(foo).
+ # We constrain the pattern not to pick things like DocidForPrintf(foo).
+ # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
+ # TODO(sugawarayu): Catch the following case. Need to change the calling
+ # convention of the whole function to process multiple line to handle it.
+ # printf(
+ # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
+ printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
+ if printf_args:
+ match = Match(r'([\w.\->()]+)$', printf_args)
+ if match and match.group(1) != '__VA_ARGS__':
+ function_name = re.search(r'\b((?:string)?printf)\s*\(',
+ line, re.I).group(1)
+ error(filename, linenum, 'runtime/printf', 4,
+ 'Potential format string bug. Do %s("%%s", %s) instead.'
+ % (function_name, match.group(1)))
+
+ # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
+ match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
+ if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
+ error(filename, linenum, 'runtime/memset', 4,
+ 'Did you mean "memset(%s, 0, %s)"?'
+ % (match.group(1), match.group(2)))
+
+ # Detect variable-length arrays.
+ match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
+ if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
+ match.group(3).find(']') == -1):
+ # Split the size using space and arithmetic operators as delimiters.
+ # If any of the resulting tokens are not compile time constants then
+ # report the error.
+ tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
+ is_const = True
+ skip_next = False
+ for tok in tokens:
+ if skip_next:
+ skip_next = False
+ continue
+
+ if Search(r'sizeof\(.+\)', tok):
+ continue
+ if Search(r'arraysize\(\w+\)', tok):
+ continue
+
+ tok = tok.lstrip('(')
+ tok = tok.rstrip(')')
+ if not tok:
+ continue
+ if Match(r'\d+', tok):
+ continue
+ if Match(r'0[xX][0-9a-fA-F]+', tok):
+ continue
+ if Match(r'k[A-Z0-9]\w*', tok):
+ continue
+ if Match(r'(.+::)?k[A-Z0-9]\w*', tok):
+ continue
+ if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok):
+ continue
+ # A catch all for tricky sizeof cases, including
+ # 'sizeof expression', 'sizeof(*type)', 'sizeof(const type)',
+ # 'sizeof(struct StructName)' requires skipping the next token
+ # because we split on ' ' and '*'.
+ if tok.startswith('sizeof'):
+ skip_next = True
+ continue
+ is_const = False
+ break
+ if not is_const:
+ error(filename, linenum, 'runtime/arrays', 1,
+ "Do not use variable-length arrays. Use an appropriately"
+ " named ('k' followed by CamelCase) compile-time constant for"
+ " the size.")
+
+ # Detect TRUE and FALSE.
+ match = Search(r'\b(TRUE|FALSE)\b', line)
+ if match:
+ token = match.group(1)
+ error(filename, linenum, 'readability/bool', 4,
+ 'Use %s instead of %s.' % (token.lower(), token))
+
+ # Detect preincrement/predecrement
+ match = Match(r'^\s*(?:\+\+|--)', line)
+ if match:
+ error(filename, linenum, 'readability/increment', 5,
+ 'Do not use preincrement in statements, '
+ 'use postincrement instead')
+ # Detect preincrement/predecrement in for(;; preincrement)
+ match = Search(r';\s*(\+\+|--)', line)
+ if match:
+ end_pos, end_depth = FindEndOfExpressionInLine(line, match.start(1), 1,
+ '(', ')')
+ expr = line[match.start(1):end_pos]
+ if end_depth == 0 and ';' not in expr and ' = ' not in expr:
+ error(filename, linenum, 'readability/increment', 4,
+ 'Do not use preincrement in statements, including '
+ 'for(;; action)')
+
+
+def ProcessLine(filename, file_extension, clean_lines, line,
+ include_state, function_state, nesting_state, error,
+ extra_check_functions=[]):
+ """Processes a single line in the file.
+
+ Args:
+ filename : Filename of the file that is being processed.
+ file_extension : The extension (dot not included) of the file.
+ clean_lines : An array of strings, each representing a line of
+ the file, with comments stripped.
+ line : Number of line being processed.
+ include_state : An _IncludeState instance in which the headers are
+ inserted.
+ function_state : A _FunctionState instance which counts function
+ lines, etc.
+ nesting_state : A _NestingState instance which maintains
+ information about the current stack of nested
+ blocks being parsed.
+ error : A callable to which errors are reported, which
+ takes 4 arguments: filename, line number, error
+ level, and message
+ extra_check_functions : An array of additional check functions that will
+ be run on each source line. Each function takes 4
+ arguments : filename, clean_lines, line, error
+ """
+ raw_lines = clean_lines.raw_lines
+ init_lines = clean_lines.init_lines
+ ParseNolintSuppressions(filename, raw_lines[line], line, error)
+ nesting_state.Update(filename, clean_lines, line, error)
+ if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
+ return
+ CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
+ CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
+ CheckForOldStyleComments(filename, init_lines[line], line, error)
+ CheckStyle(
+ filename, clean_lines, line, file_extension, nesting_state, error)
+ CheckLanguage(filename, clean_lines, line, file_extension, include_state,
+ nesting_state, error)
+ CheckForNonStandardConstructs(filename, clean_lines, line,
+ nesting_state, error)
+ CheckPosixThreading(filename, clean_lines, line, error)
+ CheckMemoryFunctions(filename, clean_lines, line, error)
+ for check_fn in extra_check_functions:
+ check_fn(filename, clean_lines, line, error)
+
+
+def ProcessFileData(filename, file_extension, lines, error,
+ extra_check_functions=[]):
+ """Performs lint checks and reports any errors to the given error function.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The extension (dot not included) of the file.
+ lines: An array of strings, each representing a line of the file, with the
+ last element being empty if the file is terminated with a newline.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ filename, line number, error level, and message
+ extra_check_functions: An array of additional check functions that will be
+ run on each source line. Each function takes 4
+ arguments: filename, clean_lines, line, error
+ """
+ lines = (['// marker so line numbers and indices both start at 1'] + lines +
+ ['// marker so line numbers end in a known way'])
+
+ include_state = _IncludeState()
+ function_state = _FunctionState()
+ nesting_state = _NestingState()
+
+ ResetNolintSuppressions()
+ ResetKnownErrorSuppressions()
+
+ for line in range(1, len(lines)):
+ ParseKnownErrorSuppressions(filename, lines, line)
+
+ init_lines = lines[:]
+
+ if _cpplint_state.record_errors_file:
+ def RecordedError(filename, linenum, category, confidence, message):
+ if not IsErrorSuppressedByNolint(category, linenum):
+ key = init_lines[linenum - 1 if linenum else 0:linenum + 2]
+ err = [filename, key, category]
+ json.dump(err, _cpplint_state.record_errors_file)
+ _cpplint_state.record_errors_file.write('\n')
+ Error(filename, linenum, category, confidence, message)
+
+ error = RecordedError
+
+ if file_extension == 'h':
+ CheckForHeaderGuard(filename, lines, error)
+
+ RemoveMultiLineComments(filename, lines, error)
+ clean_lines = CleansedLines(lines, init_lines)
+ for line in range(clean_lines.NumLines()):
+ ProcessLine(filename, file_extension, clean_lines, line,
+ include_state, function_state, nesting_state, error,
+ extra_check_functions)
+
+ # We check here rather than inside ProcessLine so that we see raw
+ # lines rather than "cleaned" lines.
+ CheckForBadCharacters(filename, lines, error)
+
+ CheckForNewlineAtEOF(filename, lines, error)
+
+
+def ProcessFile(filename, vlevel, extra_check_functions=[]):
+ """Does neovim-lint on a single file.
+
+ Args:
+ filename: The name of the file to parse.
+
+ vlevel: The level of errors to report. Every error of confidence
+ >= verbose_level will be reported. 0 is a good default.
+
+ extra_check_functions: An array of additional check functions that will be
+ run on each source line. Each function takes 4
+ arguments: filename, clean_lines, line, error
+ """
+
+ _SetVerboseLevel(vlevel)
+
+ try:
+ # Support the Unix convention of using "-" for stdin. Note that
+ # we are not opening the file with universal newline support
+ # (which codecs doesn't support anyway), so the resulting lines do
+ # contain trailing '\r' characters if we are reading a file that
+ # has CRLF endings.
+ # If after the split a trailing '\r' is present, it is removed
+ # below. If it is not expected to be present (i.e. os.linesep !=
+ # '\r\n' as in Windows), a warning is issued below if this file
+ # is processed.
+
+ if filename == '-':
+ lines = codecs.StreamReaderWriter(sys.stdin,
+ codecs.getreader('utf8'),
+ codecs.getwriter('utf8'),
+ 'replace').read().split('\n')
+ else:
+ lines = codecs.open(
+ filename, 'r', 'utf8', 'replace').read().split('\n')
+
+ carriage_return_found = False
+ # Remove trailing '\r'.
+ for linenum in range(len(lines)):
+ if lines[linenum].endswith('\r'):
+ lines[linenum] = lines[linenum].rstrip('\r')
+ carriage_return_found = True
+
+ except IOError:
+ sys.stderr.write(
+ "Skipping input '%s': Can't open for reading\n" % filename)
+ return
+
+ # Note, if no dot is found, this will give the entire filename as the ext.
+ file_extension = filename[filename.rfind('.') + 1:]
+
+ # When reading from stdin, the extension is unknown, so no cpplint tests
+ # should rely on the extension.
+ if filename != '-' and file_extension not in _valid_extensions:
+ sys.stderr.write('Ignoring %s; not a valid file name '
+ '(%s)\n' % (filename, ', '.join(_valid_extensions)))
+ else:
+ ProcessFileData(filename, file_extension, lines, Error,
+ extra_check_functions)
+ if carriage_return_found and os.linesep != '\r\n':
+ # Use 0 for linenum since outputting only one error for potentially
+ # several lines.
+ Error(filename, 0, 'whitespace/newline', 1,
+ 'One or more unexpected \\r (^M) found;'
+ 'better to use only a \\n')
+
+
+def PrintUsage(message):
+ """Prints a brief usage string and exits, optionally with an error message.
+
+ Args:
+ message: The optional error message.
+ """
+ if message:
+ sys.stderr.write(_USAGE)
+ sys.exit('\nFATAL ERROR: ' + message)
+ else:
+ sys.stdout.write(_USAGE)
+ sys.exit(0)
+
+
+def PrintCategories():
+ """Prints a list of all the error-categories used by error messages.
+
+ These are the categories used to filter messages via --filter.
+ """
+ sys.stdout.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
+ sys.exit(0)
+
+
+def ParseArguments(args):
+ """Parses the command line arguments.
+
+ This may set the output format and verbosity level as side-effects.
+
+ Args:
+ args: The command line arguments:
+
+ Returns:
+ The list of filenames to lint.
+ """
+ try:
+ (opts, filenames) = getopt.getopt(args, '', ['help',
+ 'output=',
+ 'verbose=',
+ 'counting=',
+ 'filter=',
+ 'root=',
+ 'linelength=',
+ 'extensions=',
+ 'record-errors=',
+ 'suppress-errors='])
+ except getopt.GetoptError:
+ PrintUsage('Invalid arguments.')
+
+ verbosity = _VerboseLevel()
+ output_format = _OutputFormat()
+ filters = ''
+ counting_style = ''
+ record_errors_file = None
+ suppress_errors_file = None
+
+ for (opt, val) in opts:
+ if opt == '--help':
+ PrintUsage(None)
+ elif opt == '--output':
+ if val not in ('emacs', 'vs7', 'eclipse'):
+ PrintUsage('The only allowed output formats are emacs,'
+ ' vs7 and eclipse.')
+ output_format = val
+ elif opt == '--verbose':
+ verbosity = int(val)
+ elif opt == '--filter':
+ filters = val
+ if not filters:
+ PrintCategories()
+ elif opt == '--counting':
+ if val not in ('total', 'toplevel', 'detailed'):
+ PrintUsage(
+ 'Valid counting options are total, toplevel, and detailed')
+ counting_style = val
+ elif opt == '--linelength':
+ global _line_length
+ try:
+ _line_length = int(val)
+ except ValueError:
+ PrintUsage('Line length must be digits.')
+ elif opt == '--extensions':
+ global _valid_extensions
+ try:
+ _valid_extensions = set(val.split(','))
+ except ValueError:
+ PrintUsage('Extensions must be comma separated list.')
+ elif opt == '--record-errors':
+ record_errors_file = val
+ elif opt == '--suppress-errors':
+ suppress_errors_file = val
+
+ if not filenames:
+ PrintUsage('No files were specified.')
+
+ _SetOutputFormat(output_format)
+ _SetVerboseLevel(verbosity)
+ _SetFilters(filters)
+ _SetCountingStyle(counting_style)
+ _SuppressErrorsFrom(suppress_errors_file)
+ _RecordErrorsTo(record_errors_file)
+
+ return filenames
+
+
+def main():
+ filenames = ParseArguments(sys.argv[1:])
+
+ _cpplint_state.ResetErrorCounts()
+ for filename in filenames:
+ ProcessFile(filename, _cpplint_state.verbose_level)
+ _cpplint_state.PrintErrorCounts()
+
+ sys.exit(_cpplint_state.error_count > 0)
+
+
+if __name__ == '__main__':
+ main()
+
+# vim: ts=4 sts=4 sw=4
+
+# Ignore "too complex" warnings when using pymode.
+# pylama:ignore=C901
diff --git a/src/nvim/CMakeLists.txt b/src/nvim/CMakeLists.txt
index 20927ec899..feb17e070e 100644
--- a/src/nvim/CMakeLists.txt
+++ b/src/nvim/CMakeLists.txt
@@ -78,6 +78,9 @@ foreach(sfile ${NEOVIM_SOURCES})
if(${f} MATCHES "^(regexp_nfa.c)$")
list(APPEND to_remove ${sfile})
endif()
+ if(WIN32 AND ${f} MATCHES "^(pty_process_unix.c)$")
+ list(APPEND to_remove ${sfile})
+ endif()
endforeach()
list(REMOVE_ITEM NEOVIM_SOURCES ${to_remove})
@@ -268,7 +271,7 @@ if(CLANG_ASAN_UBSAN)
set(SANITIZE_RECOVER -fno-sanitize-recover) # Clang 3.5-
endif()
set_property(TARGET nvim APPEND_STRING PROPERTY COMPILE_FLAGS "-DEXITFREE ")
- set_property(TARGET nvim APPEND_STRING PROPERTY COMPILE_FLAGS "${SANITIZE_RECOVER} -fno-omit-frame-pointer -fno-optimize-sibling-calls -fsanitize=address -fsanitize=undefined -fsanitize-blacklist=${PROJECT_SOURCE_DIR}/.asan-blacklist")
+ set_property(TARGET nvim APPEND_STRING PROPERTY COMPILE_FLAGS "${SANITIZE_RECOVER} -fno-omit-frame-pointer -fno-optimize-sibling-calls -fsanitize=address -fsanitize=undefined -fsanitize-blacklist=${PROJECT_SOURCE_DIR}/src/.asan-blacklist")
set_property(TARGET nvim APPEND_STRING PROPERTY LINK_FLAGS "-fsanitize=address -fsanitize=undefined ")
elseif(CLANG_MSAN)
message(STATUS "Enabling Clang memory sanitizer for nvim.")
diff --git a/src/nvim/api/private/defs.h b/src/nvim/api/private/defs.h
index fbfa87d5ae..80a88becf4 100644
--- a/src/nvim/api/private/defs.h
+++ b/src/nvim/api/private/defs.h
@@ -12,8 +12,8 @@
#define REMOTE_TYPE(type) typedef uint64_t type
#ifdef INCLUDE_GENERATED_DECLARATIONS
- #define ArrayOf(...) Array
- #define DictionaryOf(...) Dictionary
+# define ArrayOf(...) Array
+# define DictionaryOf(...) Dictionary
#endif
// Basic types
diff --git a/src/nvim/api/private/handle.c b/src/nvim/api/private/handle.c
index abbda95073..69df7294ad 100644
--- a/src/nvim/api/private/handle.c
+++ b/src/nvim/api/private/handle.c
@@ -7,24 +7,24 @@
#define HANDLE_INIT(name) name##_handles = pmap_new(uint64_t)()
-#define HANDLE_IMPL(type, name) \
- static PMap(uint64_t) *name##_handles = NULL; \
- \
- type *handle_get_##name(uint64_t handle) \
- { \
- return pmap_get(uint64_t)(name##_handles, handle); \
- } \
- \
- void handle_register_##name(type *name) \
- { \
- assert(!name->handle); \
- name->handle = next_handle++; \
- pmap_put(uint64_t)(name##_handles, name->handle, name); \
- } \
- \
- void handle_unregister_##name(type *name) \
- { \
- pmap_del(uint64_t)(name##_handles, name->handle); \
+#define HANDLE_IMPL(type, name) \
+ static PMap(uint64_t) *name##_handles = NULL; \
+ \
+ type *handle_get_##name(uint64_t handle) \
+ { \
+ return pmap_get(uint64_t)(name##_handles, handle); \
+ } \
+ \
+ void handle_register_##name(type *name) \
+ { \
+ assert(!name->handle); \
+ name->handle = next_handle++; \
+ pmap_put(uint64_t)(name##_handles, name->handle, name); \
+ } \
+ \
+ void handle_unregister_##name(type *name) \
+ { \
+ pmap_del(uint64_t)(name##_handles, name->handle); \
}
static uint64_t next_handle = 1;
diff --git a/src/nvim/api/private/handle.h b/src/nvim/api/private/handle.h
index 1a196f6797..804e266dc3 100644
--- a/src/nvim/api/private/handle.h
+++ b/src/nvim/api/private/handle.h
@@ -4,9 +4,9 @@
#include "nvim/vim.h"
#include "nvim/buffer_defs.h"
-#define HANDLE_DECLS(type, name) \
- type *handle_get_##name(uint64_t handle); \
- void handle_register_##name(type *name); \
+#define HANDLE_DECLS(type, name) \
+ type *handle_get_##name(uint64_t handle); \
+ void handle_register_##name(type *name); \
void handle_unregister_##name(type *name);
HANDLE_DECLS(buf_T, buffer)
diff --git a/src/nvim/api/private/helpers.h b/src/nvim/api/private/helpers.h
index a0f14ac7a4..731f186ecc 100644
--- a/src/nvim/api/private/helpers.h
+++ b/src/nvim/api/private/helpers.h
@@ -8,66 +8,56 @@
#include "nvim/memory.h"
#include "nvim/lib/kvec.h"
-#define api_set_error(err, errtype, ...) \
- do { \
- snprintf((err)->msg, \
- sizeof((err)->msg), \
- __VA_ARGS__); \
- (err)->set = true; \
- (err)->type = kErrorType##errtype; \
+#define api_set_error(err, errtype, ...) \
+ do { \
+ snprintf((err)->msg, \
+ sizeof((err)->msg), \
+ __VA_ARGS__); \
+ (err)->set = true; \
+ (err)->type = kErrorType##errtype; \
} while (0)
#define OBJECT_OBJ(o) o
-#define BOOLEAN_OBJ(b) ((Object) { \
- .type = kObjectTypeBoolean, \
- .data.boolean = b \
- })
-
-#define INTEGER_OBJ(i) ((Object) { \
- .type = kObjectTypeInteger, \
- .data.integer = i \
- })
-
-#define STRING_OBJ(s) ((Object) { \
- .type = kObjectTypeString, \
- .data.string = s \
- })
-
-#define BUFFER_OBJ(s) ((Object) { \
- .type = kObjectTypeBuffer, \
- .data.buffer = s \
- })
-
-#define WINDOW_OBJ(s) ((Object) { \
- .type = kObjectTypeWindow, \
- .data.window = s \
- })
-
-#define TABPAGE_OBJ(s) ((Object) { \
- .type = kObjectTypeTabpage, \
- .data.tabpage = s \
- })
-
-#define ARRAY_OBJ(a) ((Object) { \
- .type = kObjectTypeArray, \
- .data.array = a \
- })
-
-#define DICTIONARY_OBJ(d) ((Object) { \
- .type = kObjectTypeDictionary, \
- .data.dictionary = d \
- })
+#define BOOLEAN_OBJ(b) ((Object) { \
+ .type = kObjectTypeBoolean, \
+ .data.boolean = b })
+
+#define INTEGER_OBJ(i) ((Object) { \
+ .type = kObjectTypeInteger, \
+ .data.integer = i })
+
+#define STRING_OBJ(s) ((Object) { \
+ .type = kObjectTypeString, \
+ .data.string = s })
+
+#define BUFFER_OBJ(s) ((Object) { \
+ .type = kObjectTypeBuffer, \
+ .data.buffer = s })
+
+#define WINDOW_OBJ(s) ((Object) { \
+ .type = kObjectTypeWindow, \
+ .data.window = s })
+
+#define TABPAGE_OBJ(s) ((Object) { \
+ .type = kObjectTypeTabpage, \
+ .data.tabpage = s })
+
+#define ARRAY_OBJ(a) ((Object) { \
+ .type = kObjectTypeArray, \
+ .data.array = a })
+
+#define DICTIONARY_OBJ(d) ((Object) { \
+ .type = kObjectTypeDictionary, \
+ .data.dictionary = d })
#define NIL ((Object) {.type = kObjectTypeNil})
-#define PUT(dict, k, v) \
- kv_push(KeyValuePair, \
- dict, \
- ((KeyValuePair) {.key = cstr_to_string(k), .value = v}))
+#define PUT(dict, k, v) \
+ kv_push(dict, ((KeyValuePair) { .key = cstr_to_string(k), .value = v }))
-#define ADD(array, item) \
- kv_push(Object, array, item)
+#define ADD(array, item) \
+ kv_push(array, item)
#define STATIC_CSTR_AS_STRING(s) ((String) {.data = s, .size = sizeof(s) - 1})
diff --git a/src/nvim/api/vim.c b/src/nvim/api/vim.c
index 46d72b847d..ac7cc65ee4 100644
--- a/src/nvim/api/vim.c
+++ b/src/nvim/api/vim.c
@@ -57,6 +57,7 @@ void vim_feedkeys(String keys, String mode, Boolean escape_csi)
bool remap = true;
bool insert = false;
bool typed = false;
+ bool execute = false;
if (keys.size == 0) {
return;
@@ -68,6 +69,7 @@ void vim_feedkeys(String keys, String mode, Boolean escape_csi)
case 'm': remap = true; break;
case 't': typed = true; break;
case 'i': insert = true; break;
+ case 'x': execute = true; break;
}
}
@@ -86,8 +88,12 @@ void vim_feedkeys(String keys, String mode, Boolean escape_csi)
xfree(keys_esc);
}
- if (vgetc_busy)
+ if (vgetc_busy) {
typebuf_was_filled = true;
+ }
+ if (execute) {
+ exec_normal(true);
+ }
}
/// Passes input keys to Neovim. Unlike `vim_feedkeys`, this will use a
@@ -641,14 +647,14 @@ static void write_msg(String message, bool to_err)
static size_t out_pos = 0, err_pos = 0;
static char out_line_buf[LINE_BUFFER_SIZE], err_line_buf[LINE_BUFFER_SIZE];
-#define PUSH_CHAR(i, pos, line_buf, msg) \
- if (message.data[i] == NL || pos == LINE_BUFFER_SIZE - 1) { \
- line_buf[pos] = NUL; \
- msg((uint8_t *)line_buf); \
- pos = 0; \
- continue; \
- } \
- \
+#define PUSH_CHAR(i, pos, line_buf, msg) \
+ if (message.data[i] == NL || pos == LINE_BUFFER_SIZE - 1) { \
+ line_buf[pos] = NUL; \
+ msg((uint8_t *)line_buf); \
+ pos = 0; \
+ continue; \
+ } \
+ \
line_buf[pos++] = message.data[i];
++no_wait_return;
diff --git a/src/nvim/buffer.c b/src/nvim/buffer.c
index 72716daf0e..71ec20c788 100644
--- a/src/nvim/buffer.c
+++ b/src/nvim/buffer.c
@@ -1534,6 +1534,7 @@ void free_buf_options(buf_T *buf, int free_p_ff)
clear_string_option(&buf->b_p_cms);
clear_string_option(&buf->b_p_nf);
clear_string_option(&buf->b_p_syn);
+ clear_string_option(&buf->b_s.b_syn_isk);
clear_string_option(&buf->b_s.b_p_spc);
clear_string_option(&buf->b_s.b_p_spf);
vim_regfree(buf->b_s.b_cap_prog);
@@ -4950,7 +4951,7 @@ int bufhl_add_hl(buf_T *buf,
bufhl_vec_T* lineinfo = map_ref(linenr_T, bufhl_vec_T)(buf->b_bufhl_info,
lnum, true);
- bufhl_hl_item_T *hlentry = kv_pushp(bufhl_hl_item_T, *lineinfo);
+ bufhl_hl_item_T *hlentry = kv_pushp(*lineinfo);
hlentry->src_id = src_id;
hlentry->hl_id = hl_id;
hlentry->start = col_start;
diff --git a/src/nvim/buffer.h b/src/nvim/buffer.h
index d51a2f7dae..36cbec7e60 100644
--- a/src/nvim/buffer.h
+++ b/src/nvim/buffer.h
@@ -76,14 +76,14 @@ static inline void restore_win_for_buf(win_T *save_curwin,
}
}
-#define WITH_BUFFER(b, code) \
- do { \
- buf_T *save_curbuf = NULL; \
- win_T *save_curwin = NULL; \
- tabpage_T *save_curtab = NULL; \
- switch_to_win_for_buf(b, &save_curwin, &save_curtab, &save_curbuf); \
- code; \
- restore_win_for_buf(save_curwin, save_curtab, save_curbuf); \
+#define WITH_BUFFER(b, code) \
+ do { \
+ buf_T *save_curbuf = NULL; \
+ win_T *save_curwin = NULL; \
+ tabpage_T *save_curtab = NULL; \
+ switch_to_win_for_buf(b, &save_curwin, &save_curtab, &save_curbuf); \
+ code; \
+ restore_win_for_buf(save_curwin, save_curtab, save_curbuf); \
} while (0)
diff --git a/src/nvim/buffer_defs.h b/src/nvim/buffer_defs.h
index 0324f6b88a..b515c4e1e4 100644
--- a/src/nvim/buffer_defs.h
+++ b/src/nvim/buffer_defs.h
@@ -438,15 +438,17 @@ typedef struct {
linenr_T b_sst_check_lnum;
uint16_t b_sst_lasttick; /* last display tick */
- /* for spell checking */
- garray_T b_langp; /* list of pointers to slang_T, see spell.c */
- bool b_spell_ismw[256]; /* flags: is midword char */
- char_u *b_spell_ismw_mb; /* multi-byte midword chars */
- char_u *b_p_spc; /* 'spellcapcheck' */
- regprog_T *b_cap_prog; /* program for 'spellcapcheck' */
- char_u *b_p_spf; /* 'spellfile' */
- char_u *b_p_spl; /* 'spelllang' */
- int b_cjk; /* all CJK letters as OK */
+ // for spell checking
+ garray_T b_langp; // list of pointers to slang_T, see spell.c
+ bool b_spell_ismw[256]; // flags: is midword char
+ char_u *b_spell_ismw_mb; // multi-byte midword chars
+ char_u *b_p_spc; // 'spellcapcheck'
+ regprog_T *b_cap_prog; // program for 'spellcapcheck'
+ char_u *b_p_spf; // 'spellfile'
+ char_u *b_p_spl; // 'spelllang'
+ int b_cjk; // all CJK letters as OK
+ char_u b_syn_chartab[32]; // syntax iskeyword option
+ char_u *b_syn_isk; // iskeyword option
} synblock_T;
diff --git a/src/nvim/charset.c b/src/nvim/charset.c
index d0dc7b66fc..5ae4416052 100644
--- a/src/nvim/charset.c
+++ b/src/nvim/charset.c
@@ -1582,7 +1582,7 @@ static char_u latin1lower[257] =
///
/// @param c character to check
bool vim_islower(int c)
- FUNC_ATTR_PURE FUNC_ATTR_WARN_UNUSED_RESULT FUNC_ATTR_NONNULL_ALL
+ FUNC_ATTR_PURE FUNC_ATTR_WARN_UNUSED_RESULT
{
if (c <= '@') {
return false;
@@ -1613,7 +1613,7 @@ bool vim_islower(int c)
///
/// @param c character to check
bool vim_isupper(int c)
- FUNC_ATTR_PURE FUNC_ATTR_WARN_UNUSED_RESULT FUNC_ATTR_NONNULL_ALL
+ FUNC_ATTR_PURE FUNC_ATTR_WARN_UNUSED_RESULT
{
if (c <= '@') {
return false;
diff --git a/src/nvim/edit.c b/src/nvim/edit.c
index 74efadc9e4..03ef41f849 100644
--- a/src/nvim/edit.c
+++ b/src/nvim/edit.c
@@ -961,7 +961,7 @@ static int insert_handle_key(InsertState *s)
break;
case K_EVENT: // some event
- queue_process_events(loop.events);
+ queue_process_events(main_loop.events);
break;
case K_FOCUSGAINED: // Neovim has been given focus
diff --git a/src/nvim/eval.c b/src/nvim/eval.c
index dce3556de6..111105fa37 100644
--- a/src/nvim/eval.c
+++ b/src/nvim/eval.c
@@ -67,6 +67,7 @@
#include "nvim/syntax.h"
#include "nvim/tag.h"
#include "nvim/ui.h"
+#include "nvim/main.h"
#include "nvim/mouse.h"
#include "nvim/terminal.h"
#include "nvim/undo.h"
@@ -76,7 +77,7 @@
#include "nvim/eval/decode.h"
#include "nvim/os/os.h"
#include "nvim/event/libuv_process.h"
-#include "nvim/event/pty_process.h"
+#include "nvim/os/pty_process.h"
#include "nvim/event/rstream.h"
#include "nvim/event/wstream.h"
#include "nvim/event/time.h"
@@ -507,6 +508,7 @@ void eval_init(void)
/* add to compat scope dict */
hash_add(&compat_hashtab, p->vv_di.di_key);
}
+ vimvars[VV_VERSION].vv_nr = VIM_VERSION_100;
dict_T *const msgpack_types_dict = dict_alloc();
for (size_t i = 0; i < ARRAY_SIZE(msgpack_type_names); i++) {
@@ -2977,11 +2979,16 @@ int do_unlet(char_u *name, int forceit)
} else if (current_funccal != NULL
&& ht == &current_funccal->l_vars.dv_hashtab) {
d = &current_funccal->l_vars;
+ } else if (ht == &compat_hashtab) {
+ d = &vimvardict;
} else {
di = find_var_in_ht(ht, *name, (char_u *)"", false);
d = di->di_tv.vval.v_dict;
}
-
+ if (d == NULL) {
+ EMSG2(_(e_intern2), "do_unlet()");
+ return FAIL;
+ }
hi = hash_find(ht, varname);
if (!HASHITEM_EMPTY(hi)) {
di = HI2DI(hi);
@@ -2990,6 +2997,11 @@ int do_unlet(char_u *name, int forceit)
|| tv_check_lock(d->dv_lock, name, false)) {
return FAIL;
}
+
+ if (d == NULL || tv_check_lock(d->dv_lock, name, false)) {
+ return FAIL;
+ }
+
typval_T oldtv;
bool watched = is_watched(dict);
@@ -9887,7 +9899,7 @@ static void f_getcmdwintype(typval_T *argvars, typval_T *rettv)
static void f_getcwd(typval_T *argvars, typval_T *rettv)
{
// Possible scope of working directory to return.
- CdScope scope = MIN_CD_SCOPE;
+ CdScope scope = kCdScopeInvalid;
// Numbers of the scope objects (window, tab) we want the working directory
// of. A `-1` means to skip this scope, a `0` means the current object.
@@ -9916,26 +9928,27 @@ static void f_getcwd(typval_T *argvars, typval_T *rettv)
return;
}
scope_number[i] = argvars[i].vval.v_number;
- // The scope is the current iteration step.
- scope = i;
// It is an error for the scope number to be less than `-1`.
if (scope_number[i] < -1) {
EMSG(_(e_invarg));
return;
}
+ // Use the narrowest scope the user requested
+ if (scope_number[i] >= 0 && scope == kCdScopeInvalid) {
+ // The scope is the current iteration step.
+ scope = i;
+ } else if (scope_number[i] < 0) {
+ scope = i + 1;
+ }
}
- // Normalize scope, the number of the new scope will be 0.
- if (scope_number[scope] < 0) {
- // Arguments to `getcwd` always end at second-highest scope, so scope will
- // always be <= `MAX_CD_SCOPE`.
- scope++;
+ // If the user didn't specify anything, default to window scope
+ if (scope == kCdScopeInvalid) {
+ scope = MIN_CD_SCOPE;
}
// Find the tabpage by number
- if (scope_number[kCdScopeTab] == -1) {
- tp = NULL;
- } else if (scope_number[kCdScopeTab] > 0) {
+ if (scope_number[kCdScopeTab] > 0) {
tp = find_tabpage(scope_number[kCdScopeTab]);
if (!tp) {
EMSG(_("E5000: Cannot find tab number."));
@@ -9944,16 +9957,14 @@ static void f_getcwd(typval_T *argvars, typval_T *rettv)
}
// Find the window in `tp` by number, `NULL` if none.
- if (scope_number[kCdScopeWindow] == -1) {
- win = NULL;
- } else if (scope_number[kCdScopeWindow] >= 0) {
- if (!tp) {
+ if (scope_number[kCdScopeWindow] >= 0) {
+ if (scope_number[kCdScopeTab] < 0) {
EMSG(_("E5001: Higher scope cannot be -1 if lower scope is >= 0."));
return;
}
if (scope_number[kCdScopeWindow] > 0) {
- win = find_win_by_nr(&argvars[0], curtab);
+ win = find_win_by_nr(&argvars[0], tp);
if (!win) {
EMSG(_("E5002: Cannot find window number."));
return;
@@ -9988,6 +9999,9 @@ static void f_getcwd(typval_T *argvars, typval_T *rettv)
}
}
break;
+ case kCdScopeInvalid:
+ // We should never get here
+ assert(false);
}
if (from) {
@@ -10835,7 +10849,7 @@ static void f_has_key(typval_T *argvars, typval_T *rettv)
static void f_haslocaldir(typval_T *argvars, typval_T *rettv)
{
// Possible scope of working directory to return.
- CdScope scope = MIN_CD_SCOPE;
+ CdScope scope = kCdScopeInvalid;
// Numbers of the scope objects (window, tab) we want the working directory
// of. A `-1` means to skip this scope, a `0` means the current object.
@@ -10860,25 +10874,26 @@ static void f_haslocaldir(typval_T *argvars, typval_T *rettv)
return;
}
scope_number[i] = argvars[i].vval.v_number;
- // The scope is the current iteration step.
- scope = i;
if (scope_number[i] < -1) {
EMSG(_(e_invarg));
return;
}
+ // Use the narrowest scope the user requested
+ if (scope_number[i] >= 0 && scope == kCdScopeInvalid) {
+ // The scope is the current iteration step.
+ scope = i;
+ } else if (scope_number[i] < 0) {
+ scope = i + 1;
+ }
}
- // Normalize scope, the number of the new scope will be 0.
- if (scope_number[scope] < 0) {
- // Arguments to `haslocaldir` always end at second-highest scope, so scope
- // will always be <= `MAX_CD_SCOPE`.
- scope++;
+ // If the user didn't specify anything, default to window scope
+ if (scope == kCdScopeInvalid) {
+ scope = MIN_CD_SCOPE;
}
// Find the tabpage by number
- if (scope_number[kCdScopeTab] == -1) {
- tp = NULL;
- } else if (scope_number[kCdScopeTab] > 0) {
+ if (scope_number[kCdScopeTab] > 0) {
tp = find_tabpage(scope_number[kCdScopeTab]);
if (!tp) {
EMSG(_("5000: Cannot find tab number."));
@@ -10887,16 +10902,14 @@ static void f_haslocaldir(typval_T *argvars, typval_T *rettv)
}
// Find the window in `tp` by number, `NULL` if none.
- if (scope_number[kCdScopeWindow] == -1) {
- win = NULL;
- } else if (scope_number[kCdScopeWindow] >= 0) {
- if (!tp) {
+ if (scope_number[kCdScopeWindow] >= 0) {
+ if (scope_number[kCdScopeTab] < 0) {
EMSG(_("E5001: Higher scope cannot be -1 if lower scope is >= 0."));
return;
}
if (scope_number[kCdScopeWindow] > 0) {
- win = find_win_by_nr(&argvars[0], curtab);
+ win = find_win_by_nr(&argvars[0], tp);
if (!win) {
EMSG(_("E5002: Cannot find window number."));
return;
@@ -10917,6 +10930,9 @@ static void f_haslocaldir(typval_T *argvars, typval_T *rettv)
// The global scope never has a local directory
rettv->vval.v_number = 0;
break;
+ case kCdScopeInvalid:
+ // We should never get here
+ assert(false);
}
}
@@ -11726,8 +11742,21 @@ static void f_jobstart(typval_T *argvars, typval_T *rettv)
dict_T *job_opts = NULL;
ufunc_T *on_stdout = NULL, *on_stderr = NULL, *on_exit = NULL;
+ char *cwd = NULL;
if (argvars[1].v_type == VAR_DICT) {
job_opts = argvars[1].vval.v_dict;
+
+ char *new_cwd = (char *)get_dict_string(job_opts, (char_u *)"cwd", false);
+ if (new_cwd && strlen(new_cwd) > 0) {
+ cwd = new_cwd;
+ // The new cwd must be a directory.
+ if (!os_isdir((char_u *)cwd)) {
+ EMSG2(_(e_invarg2), "expected valid directory");
+ shell_free_argv(argv);
+ return;
+ }
+ }
+
if (!common_job_callbacks(job_opts, &on_stdout, &on_stderr, &on_exit)) {
shell_free_argv(argv);
return;
@@ -11737,7 +11766,7 @@ static void f_jobstart(typval_T *argvars, typval_T *rettv)
bool pty = job_opts && get_dict_number(job_opts, (uint8_t *)"pty") != 0;
bool detach = job_opts && get_dict_number(job_opts, (uint8_t *)"detach") != 0;
TerminalJobData *data = common_job_init(argv, on_stdout, on_stderr, on_exit,
- job_opts, pty, detach);
+ job_opts, pty, detach, cwd);
Process *proc = (Process *)&data->proc;
if (pty) {
@@ -11812,7 +11841,7 @@ static void f_jobwait(typval_T *argvars, typval_T *rettv)
list_T *rv = list_alloc();
ui_busy_start();
- Queue *waiting_jobs = queue_new_parent(loop_on_put, &loop);
+ Queue *waiting_jobs = queue_new_parent(loop_on_put, &main_loop);
// For each item in the input list append an integer to the output list. -3
// is used to represent an invalid job id, -2 is for a interrupted job and
// -1 for jobs that were skipped or timed out.
@@ -11890,7 +11919,7 @@ static void f_jobwait(typval_T *argvars, typval_T *rettv)
}
// restore the parent queue for the job
queue_process_events(data->events);
- queue_replace_parent(data->events, loop.events);
+ queue_replace_parent(data->events, main_loop.events);
}
queue_free(waiting_jobs);
@@ -15080,13 +15109,18 @@ typedef struct {
int idx;
} sortItem_T;
-static int item_compare_ic;
-static bool item_compare_numeric;
-static bool item_compare_numbers;
-static bool item_compare_float;
-static char_u *item_compare_func;
-static dict_T *item_compare_selfdict;
-static int item_compare_func_err;
+/// struct storing information about current sort
+typedef struct {
+ int item_compare_ic;
+ bool item_compare_numeric;
+ bool item_compare_numbers;
+ bool item_compare_float;
+ char_u *item_compare_func;
+ dict_T *item_compare_selfdict;
+ int item_compare_func_err;
+} sortinfo_T;
+static sortinfo_T *sortinfo = NULL;
+
#define ITEM_COMPARE_FAIL 999
/*
@@ -15106,14 +15140,14 @@ static int item_compare(const void *s1, const void *s2, bool keep_zero)
typval_T *tv1 = &si1->item->li_tv;
typval_T *tv2 = &si2->item->li_tv;
- if (item_compare_numbers) {
+ if (sortinfo->item_compare_numbers) {
long v1 = get_tv_number(tv1);
long v2 = get_tv_number(tv2);
return v1 == v2 ? 0 : v1 > v2 ? 1 : -1;
}
- if (item_compare_float) {
+ if (sortinfo->item_compare_float) {
float_T v1 = get_tv_float(tv1);
float_T v2 = get_tv_float(tv2);
@@ -15124,7 +15158,7 @@ static int item_compare(const void *s1, const void *s2, bool keep_zero)
// do that for string variables. Use a single quote when comparing with
// a non-string to do what the docs promise.
if (tv1->v_type == VAR_STRING) {
- if (tv2->v_type != VAR_STRING || item_compare_numeric) {
+ if (tv2->v_type != VAR_STRING || sortinfo->item_compare_numeric) {
p1 = (char_u *)"'";
} else {
p1 = tv1->vval.v_string;
@@ -15133,7 +15167,7 @@ static int item_compare(const void *s1, const void *s2, bool keep_zero)
tofree1 = p1 = (char_u *) encode_tv2string(tv1, NULL);
}
if (tv2->v_type == VAR_STRING) {
- if (tv1->v_type != VAR_STRING || item_compare_numeric) {
+ if (tv1->v_type != VAR_STRING || sortinfo->item_compare_numeric) {
p2 = (char_u *)"'";
} else {
p2 = tv2->vval.v_string;
@@ -15141,12 +15175,14 @@ static int item_compare(const void *s1, const void *s2, bool keep_zero)
} else {
tofree2 = p2 = (char_u *) encode_tv2string(tv2, NULL);
}
- if (p1 == NULL)
+ if (p1 == NULL) {
p1 = (char_u *)"";
- if (p2 == NULL)
+ }
+ if (p2 == NULL) {
p2 = (char_u *)"";
- if (!item_compare_numeric) {
- if (item_compare_ic) {
+ }
+ if (!sortinfo->item_compare_numeric) {
+ if (sortinfo->item_compare_ic) {
res = STRICMP(p1, p2);
} else {
res = STRCMP(p1, p2);
@@ -15187,9 +15223,10 @@ static int item_compare2(const void *s1, const void *s2, bool keep_zero)
typval_T argv[3];
int dummy;
- /* shortcut after failure in previous call; compare all items equal */
- if (item_compare_func_err)
+ // shortcut after failure in previous call; compare all items equal
+ if (sortinfo->item_compare_func_err) {
return 0;
+ }
si1 = (sortItem_T *)s1;
si2 = (sortItem_T *)s2;
@@ -15199,19 +15236,22 @@ static int item_compare2(const void *s1, const void *s2, bool keep_zero)
copy_tv(&si1->item->li_tv, &argv[0]);
copy_tv(&si2->item->li_tv, &argv[1]);
- rettv.v_type = VAR_UNKNOWN; /* clear_tv() uses this */
- res = call_func(item_compare_func, (int)STRLEN(item_compare_func),
- &rettv, 2, argv, 0L, 0L, &dummy, TRUE,
- item_compare_selfdict);
+ rettv.v_type = VAR_UNKNOWN; // clear_tv() uses this
+ res = call_func(sortinfo->item_compare_func,
+ (int)STRLEN(sortinfo->item_compare_func),
+ &rettv, 2, argv, 0L, 0L, &dummy, true,
+ sortinfo->item_compare_selfdict);
clear_tv(&argv[0]);
clear_tv(&argv[1]);
- if (res == FAIL)
+ if (res == FAIL) {
res = ITEM_COMPARE_FAIL;
- else
- res = get_tv_number_chk(&rettv, &item_compare_func_err);
- if (item_compare_func_err)
- res = ITEM_COMPARE_FAIL; /* return value has wrong type */
+ } else {
+ res = get_tv_number_chk(&rettv, &sortinfo->item_compare_func_err);
+ }
+ if (sortinfo->item_compare_func_err) {
+ res = ITEM_COMPARE_FAIL; // return value has wrong type
+ }
clear_tv(&rettv);
// When the result would be zero, compare the pointers themselves. Makes
@@ -15244,6 +15284,12 @@ static void do_sort_uniq(typval_T *argvars, typval_T *rettv, bool sort)
long len;
long i;
+ // Pointer to current info struct used in compare function. Save and restore
+ // the current one for nested calls.
+ sortinfo_T info;
+ sortinfo_T *old_sortinfo = sortinfo;
+ sortinfo = &info;
+
if (argvars[0].v_type != VAR_LIST) {
EMSG2(_(e_listarg), sort ? "sort()" : "uniq()");
} else {
@@ -15254,61 +15300,70 @@ static void do_sort_uniq(typval_T *argvars, typval_T *rettv, bool sort)
? N_("sort() argument")
: N_("uniq() argument")),
true)) {
- return;
+ goto theend;
}
rettv->vval.v_list = l;
rettv->v_type = VAR_LIST;
++l->lv_refcount;
len = list_len(l);
- if (len <= 1)
- return; /* short list sorts pretty quickly */
+ if (len <= 1) {
+ goto theend; // short list sorts pretty quickly
+ }
- item_compare_ic = FALSE;
- item_compare_numeric = false;
- item_compare_numbers = false;
- item_compare_float = false;
- item_compare_func = NULL;
- item_compare_selfdict = NULL;
+ info.item_compare_ic = false;
+ info.item_compare_numeric = false;
+ info.item_compare_numbers = false;
+ info.item_compare_float = false;
+ info.item_compare_func = NULL;
+ info.item_compare_selfdict = NULL;
if (argvars[1].v_type != VAR_UNKNOWN) {
/* optional second argument: {func} */
if (argvars[1].v_type == VAR_FUNC) {
- item_compare_func = argvars[1].vval.v_string;
+ info.item_compare_func = argvars[1].vval.v_string;
} else {
int error = FALSE;
i = get_tv_number_chk(&argvars[1], &error);
- if (error)
- return; /* type error; errmsg already given */
- if (i == 1)
- item_compare_ic = TRUE;
- else
- item_compare_func = get_tv_string(&argvars[1]);
- if (item_compare_func != NULL) {
- if (STRCMP(item_compare_func, "n") == 0) {
- item_compare_func = NULL;
- item_compare_numeric = true;
- } else if (STRCMP(item_compare_func, "N") == 0) {
- item_compare_func = NULL;
- item_compare_numbers = true;
- } else if (STRCMP(item_compare_func, "f") == 0) {
- item_compare_func = NULL;
- item_compare_float = true;
- } else if (STRCMP(item_compare_func, "i") == 0) {
- item_compare_func = NULL;
- item_compare_ic = TRUE;
+ if (error) {
+ goto theend; // type error; errmsg already given
+ }
+ if (i == 1) {
+ info.item_compare_ic = true;
+ } else if (argvars[1].v_type != VAR_NUMBER) {
+ info.item_compare_func = get_tv_string(&argvars[1]);
+ } else if (i != 0) {
+ EMSG(_(e_invarg));
+ goto theend;
+ }
+ if (info.item_compare_func != NULL) {
+ if (*info.item_compare_func == NUL) {
+ // empty string means default sort
+ info.item_compare_func = NULL;
+ } else if (STRCMP(info.item_compare_func, "n") == 0) {
+ info.item_compare_func = NULL;
+ info.item_compare_numeric = true;
+ } else if (STRCMP(info.item_compare_func, "N") == 0) {
+ info.item_compare_func = NULL;
+ info.item_compare_numbers = true;
+ } else if (STRCMP(info.item_compare_func, "f") == 0) {
+ info.item_compare_func = NULL;
+ info.item_compare_float = true;
+ } else if (STRCMP(info.item_compare_func, "i") == 0) {
+ info.item_compare_func = NULL;
+ info.item_compare_ic = true;
}
}
}
if (argvars[2].v_type != VAR_UNKNOWN) {
- /* optional third argument: {dict} */
+ // optional third argument: {dict}
if (argvars[2].v_type != VAR_DICT) {
EMSG(_(e_dictreq));
- return;
+ goto theend;
}
- item_compare_selfdict = argvars[2].vval.v_dict;
+ info.item_compare_selfdict = argvars[2].vval.v_dict;
}
}
@@ -15324,19 +15379,20 @@ static void do_sort_uniq(typval_T *argvars, typval_T *rettv, bool sort)
i++;
}
- item_compare_func_err = FALSE;
+ info.item_compare_func_err = false;
// Test the compare function.
- if (item_compare_func != NULL
+ if (info.item_compare_func != NULL
&& item_compare2_not_keeping_zero(&ptrs[0], &ptrs[1])
== ITEM_COMPARE_FAIL) {
EMSG(_("E702: Sort compare function failed"));
} else {
// Sort the array with item pointers.
qsort(ptrs, (size_t)len, sizeof (sortItem_T),
- item_compare_func == NULL ? item_compare_not_keeping_zero :
- item_compare2_not_keeping_zero);
+ (info.item_compare_func == NULL ?
+ item_compare_not_keeping_zero :
+ item_compare2_not_keeping_zero));
- if (!item_compare_func_err) {
+ if (!info.item_compare_func_err) {
// Clear the list and append the items in the sorted order.
l->lv_first = NULL;
l->lv_last = NULL;
@@ -15352,21 +15408,24 @@ static void do_sort_uniq(typval_T *argvars, typval_T *rettv, bool sort)
int (*item_compare_func_ptr)(const void *, const void *);
// f_uniq(): ptrs will be a stack of items to remove.
- item_compare_func_err = FALSE;
- item_compare_func_ptr = item_compare_func ? item_compare2_keeping_zero :
- item_compare_keeping_zero;
+ info.item_compare_func_err = false;
+ if (info.item_compare_func != NULL) {
+ item_compare_func_ptr = item_compare2_keeping_zero;
+ } else {
+ item_compare_func_ptr = item_compare_keeping_zero;
+ }
for (li = l->lv_first; li != NULL && li->li_next != NULL; li = li->li_next) {
if (item_compare_func_ptr(&li, &li->li_next) == 0) {
ptrs[i++].item = li;
}
- if (item_compare_func_err) {
+ if (info.item_compare_func_err) {
EMSG(_("E882: Uniq compare function failed"));
break;
}
}
- if (!item_compare_func_err) {
+ if (!info.item_compare_func_err) {
while (--i >= 0) {
assert(ptrs[i].item->li_next);
li = ptrs[i].item->li_next;
@@ -15385,6 +15444,9 @@ static void do_sort_uniq(typval_T *argvars, typval_T *rettv, bool sort)
xfree(ptrs);
}
+
+theend:
+ sortinfo = old_sortinfo;
}
/// "sort"({list})" function
@@ -16421,8 +16483,21 @@ static void f_termopen(typval_T *argvars, typval_T *rettv)
ufunc_T *on_stdout = NULL, *on_stderr = NULL, *on_exit = NULL;
dict_T *job_opts = NULL;
+ char *cwd = ".";
if (argvars[1].v_type == VAR_DICT) {
job_opts = argvars[1].vval.v_dict;
+
+ char *new_cwd = (char *)get_dict_string(job_opts, (char_u *)"cwd", false);
+ if (new_cwd && strlen(new_cwd) > 0) {
+ cwd = new_cwd;
+ // The new cwd must be a directory.
+ if (!os_isdir((char_u *)cwd)) {
+ EMSG2(_(e_invarg2), "expected valid directory");
+ shell_free_argv(argv);
+ return;
+ }
+ }
+
if (!common_job_callbacks(job_opts, &on_stdout, &on_stderr, &on_exit)) {
shell_free_argv(argv);
return;
@@ -16430,7 +16505,7 @@ static void f_termopen(typval_T *argvars, typval_T *rettv)
}
TerminalJobData *data = common_job_init(argv, on_stdout, on_stderr, on_exit,
- job_opts, true, false);
+ job_opts, true, false, cwd);
data->proc.pty.width = curwin->w_width;
data->proc.pty.height = curwin->w_height;
data->proc.pty.term_name = xstrdup("xterm-256color");
@@ -16445,11 +16520,6 @@ static void f_termopen(typval_T *argvars, typval_T *rettv)
topts.resize_cb = term_resize;
topts.close_cb = term_close;
- char *cwd = ".";
- if (argvars[1].v_type == VAR_STRING
- && os_isdir(argvars[1].vval.v_string)) {
- cwd = (char *)argvars[1].vval.v_string;
- }
int pid = data->proc.pty.process.pid;
char buf[1024];
@@ -16536,8 +16606,8 @@ static void f_timer_start(typval_T *argvars, typval_T *rettv)
timer->timer_id = last_timer_id++;
timer->callback = func;
- time_watcher_init(&loop, &timer->tw, timer);
- timer->tw.events = queue_new_child(loop.events);
+ time_watcher_init(&main_loop, &timer->tw, timer);
+ timer->tw.events = queue_new_child(main_loop.events);
// if main loop is blocked, don't queue up multiple events
timer->tw.blockable = true;
time_watcher_start(&timer->tw, timer_due_cb, timeout,
@@ -16570,7 +16640,7 @@ static void timer_due_cb(TimeWatcher *tw, void *data)
{
timer_T *timer = (timer_T *)data;
if (timer->stopped) {
- return;
+ return;
}
// if repeat was negative repeat forever
if (timer->repeat_count >= 0 && --timer->repeat_count == 0) {
@@ -16610,6 +16680,14 @@ static void timer_free_cb(TimeWatcher *tw, void *data)
xfree(timer);
}
+void timer_teardown(void)
+{
+ timer_T *timer;
+ map_foreach_value(timers, timer, {
+ timer_stop(timer);
+ })
+}
+
/*
* "tolower(string)" function
*/
@@ -17709,7 +17787,8 @@ void set_vim_var_special(const VimVarIndex idx, const SpecialVarValue val)
void set_vim_var_string(const VimVarIndex idx, const char *const val,
const ptrdiff_t len)
{
- xfree(vimvars[idx].vv_str);
+ clear_tv(&vimvars[idx].vv_di.di_tv);
+ vimvars[idx].vv_type = VAR_STRING;
if (val == NULL) {
vimvars[idx].vv_str = NULL;
} else if (len == -1) {
@@ -17725,7 +17804,8 @@ void set_vim_var_string(const VimVarIndex idx, const char *const val,
/// @param[in,out] val Value to set to. Reference count will be incremented.
void set_vim_var_list(const VimVarIndex idx, list_T *const val)
{
- list_unref(vimvars[idx].vv_list);
+ clear_tv(&vimvars[idx].vv_di.di_tv);
+ vimvars[idx].vv_type = VAR_LIST;
vimvars[idx].vv_list = val;
if (val != NULL) {
val->lv_refcount++;
@@ -17739,7 +17819,8 @@ void set_vim_var_list(const VimVarIndex idx, list_T *const val)
/// Also keys of the dictionary will be made read-only.
void set_vim_var_dict(const VimVarIndex idx, dict_T *const val)
{
- dict_unref(vimvars[idx].vv_dict);
+ clear_tv(&vimvars[idx].vv_di.di_tv);
+ vimvars[idx].vv_type = VAR_DICT;
vimvars[idx].vv_dict = val;
if (val != NULL) {
@@ -21706,7 +21787,8 @@ static inline TerminalJobData *common_job_init(char **argv,
ufunc_T *on_exit,
dict_T *self,
bool pty,
- bool detach)
+ bool detach,
+ char *cwd)
{
TerminalJobData *data = xcalloc(1, sizeof(TerminalJobData));
data->stopped = false;
@@ -21714,11 +21796,11 @@ static inline TerminalJobData *common_job_init(char **argv,
data->on_stderr = on_stderr;
data->on_exit = on_exit;
data->self = self;
- data->events = queue_new_child(loop.events);
+ data->events = queue_new_child(main_loop.events);
if (pty) {
- data->proc.pty = pty_process_init(&loop, data);
+ data->proc.pty = pty_process_init(&main_loop, data);
} else {
- data->proc.uv = libuv_process_init(&loop, data);
+ data->proc.uv = libuv_process_init(&main_loop, data);
}
Process *proc = (Process *)&data->proc;
proc->argv = argv;
@@ -21730,6 +21812,7 @@ static inline TerminalJobData *common_job_init(char **argv,
proc->cb = on_process_exit;
proc->events = data->events;
proc->detach = detach;
+ proc->cwd = cwd;
return data;
}
@@ -21816,7 +21899,7 @@ static inline void free_term_job_data(TerminalJobData *data)
{
// data->queue may still be used after this function returns(process_wait), so
// only free in the next event loop iteration
- queue_put(loop.fast_events, free_term_job_data_event, 1, data);
+ queue_put(main_loop.fast_events, free_term_job_data_event, 1, data);
}
// vimscript job callbacks must be executed on Nvim main loop
@@ -22078,14 +22161,13 @@ typval_T eval_call_provider(char *provider, char *method, list_T *arguments)
bool eval_has_provider(char *name)
{
-
-#define check_provider(name) \
- if (has_##name == -1) { \
- has_##name = !!find_func((uint8_t *)"provider#" #name "#Call"); \
- if (!has_##name) { \
- script_autoload((uint8_t *)"provider#" #name "#Call", false); \
- has_##name = !!find_func((uint8_t *)"provider#" #name "#Call"); \
- } \
+#define check_provider(name) \
+ if (has_##name == -1) { \
+ has_##name = !!find_func((uint8_t *)"provider#" #name "#Call"); \
+ if (!has_##name) { \
+ script_autoload((uint8_t *)"provider#" #name "#Call", false); \
+ has_##name = !!find_func((uint8_t *)"provider#" #name "#Call"); \
+ } \
}
static int has_clipboard = -1, has_python = -1, has_python3 = -1;
@@ -22104,9 +22186,8 @@ bool eval_has_provider(char *name)
return false;
}
-// Compute the `DictWatcher` address from a QUEUE node. This only exists because
-// ASAN doesn't handle `QUEUE_DATA` pointer arithmetic, and we blacklist this
-// function on .asan-blacklist.
+// Compute the `DictWatcher` address from a QUEUE node. This only exists for
+// .asan-blacklist (ASAN doesn't handle QUEUE_DATA pointer arithmetic).
static DictWatcher *dictwatcher_node_data(QUEUE *q)
FUNC_ATTR_NONNULL_ALL FUNC_ATTR_NONNULL_RET
{
diff --git a/src/nvim/eval/decode.c b/src/nvim/eval/decode.c
index 0774ef515f..43e9f76c0f 100644
--- a/src/nvim/eval/decode.c
+++ b/src/nvim/eval/decode.c
@@ -101,7 +101,7 @@ static inline int json_decoder_pop(ValuesStackItem obj,
FUNC_ATTR_NONNULL_ALL
{
if (kv_size(*container_stack) == 0) {
- kv_push(ValuesStackItem, *stack, obj);
+ kv_push(*stack, obj);
return OK;
}
ContainerStackItem last_container = kv_last(*container_stack);
@@ -190,7 +190,7 @@ static inline int json_decoder_pop(ValuesStackItem obj,
*next_map_special = true;
return OK;
}
- kv_push(ValuesStackItem, *stack, obj);
+ kv_push(*stack, obj);
}
return OK;
}
@@ -628,10 +628,8 @@ int json_decode_string(const char *const buf, const size_t buf_len,
convert_setup(&conv, (char_u *) "utf-8", p_enc);
conv.vc_fail = true;
int ret = OK;
- ValuesStack stack;
- kv_init(stack);
- ContainerStack container_stack;
- kv_init(container_stack);
+ ValuesStack stack = KV_INITIAL_VALUE;
+ ContainerStack container_stack = KV_INITIAL_VALUE;
rettv->v_type = VAR_UNKNOWN;
bool didcomma = false;
bool didcolon = false;
@@ -815,13 +813,13 @@ json_decode_string_cycle_start:
.v_lock = VAR_UNLOCKED,
.vval = { .v_list = list },
};
- kv_push(ContainerStackItem, container_stack, ((ContainerStackItem) {
+ kv_push(container_stack, ((ContainerStackItem) {
.stack_index = kv_size(stack),
.s = p,
.container = tv,
.special_val = NULL,
}));
- kv_push(ValuesStackItem, stack, OBJ(tv, false, didcomma, didcolon));
+ kv_push(stack, OBJ(tv, false, didcomma, didcolon));
break;
}
case '{': {
@@ -845,13 +843,13 @@ json_decode_string_cycle_start:
.vval = { .v_dict = dict },
};
}
- kv_push(ContainerStackItem, container_stack, ((ContainerStackItem) {
+ kv_push(container_stack, ((ContainerStackItem) {
.stack_index = kv_size(stack),
.s = p,
.container = tv,
.special_val = val_list,
}));
- kv_push(ValuesStackItem, stack, OBJ(tv, false, didcomma, didcolon));
+ kv_push(stack, OBJ(tv, false, didcomma, didcolon));
break;
}
default: {
diff --git a/src/nvim/eval/encode.c b/src/nvim/eval/encode.c
index c651a50be9..54daf7557e 100644
--- a/src/nvim/eval/encode.c
+++ b/src/nvim/eval/encode.c
@@ -6,6 +6,7 @@
#include <msgpack.h>
#include <inttypes.h>
+#include <stddef.h>
#include <assert.h>
#include <math.h>
@@ -22,6 +23,7 @@
#include "nvim/ascii.h"
#include "nvim/vim.h" // For _()
#include "nvim/lib/kvec.h"
+#include "nvim/eval/typval_encode.h"
#define ga_concat(a, b) ga_concat(a, (char_u *)b)
#define utf_ptr2char(b) utf_ptr2char((char_u *)b)
@@ -32,29 +34,6 @@
#define convert_setup(vcp, from, to) \
(convert_setup(vcp, (char_u *)from, (char_u *)to))
-/// Structure representing current VimL to messagepack conversion state
-typedef struct {
- enum {
- kMPConvDict, ///< Convert dict_T *dictionary.
- kMPConvList, ///< Convert list_T *list.
- kMPConvPairs, ///< Convert mapping represented as a list_T* of pairs.
- } type;
- union {
- struct {
- dict_T *dict; ///< Currently converted dictionary.
- hashitem_T *hi; ///< Currently converted dictionary item.
- size_t todo; ///< Amount of items left to process.
- } d; ///< State of dictionary conversion.
- struct {
- list_T *list; ///< Currently converted list.
- listitem_T *li; ///< Currently converted list item.
- } l; ///< State of list or generic mapping conversion.
- } data; ///< Data to convert.
-} MPConvStackVal;
-
-/// Stack used to convert VimL values to messagepack.
-typedef kvec_t(MPConvStackVal) MPConvStack;
-
const char *const encode_special_var_names[] = {
[kSpecialVarNull] = "null",
[kSpecialVarTrue] = "true",
@@ -275,368 +254,7 @@ int encode_read_from_list(ListReaderState *const state, char *const buf,
: OK);
}
-/// Code for checking whether container references itself
-///
-/// @param[in,out] val Container to check.
-/// @param copyID_attr Name of the container attribute that holds copyID.
-/// After checking whether value of this attribute is
-/// copyID (variable) it is set to copyID.
-#define CHECK_SELF_REFERENCE(val, copyID_attr, conv_type) \
- do { \
- if ((val)->copyID_attr == copyID) { \
- CONV_RECURSE((val), conv_type); \
- } \
- (val)->copyID_attr = copyID; \
- } while (0)
-
-#define TV_STRLEN(tv) \
- (tv->vval.v_string == NULL ? 0 : STRLEN(tv->vval.v_string))
-
-/// Define functions which convert VimL value to something else
-///
-/// Creates function `vim_to_{name}(firstargtype firstargname, typval_T *const
-/// tv)` which returns OK or FAIL and helper functions.
-///
-/// @param firstargtype Type of the first argument. It will be used to return
-/// the results.
-/// @param firstargname Name of the first argument.
-/// @param name Name of the target converter.
-#define DEFINE_VIML_CONV_FUNCTIONS(scope, name, firstargtype, firstargname) \
-static int name##_convert_one_value(firstargtype firstargname, \
- MPConvStack *const mpstack, \
- typval_T *const tv, \
- const int copyID, \
- const char *const objname) \
- FUNC_ATTR_NONNULL_ALL FUNC_ATTR_WARN_UNUSED_RESULT \
-{ \
- switch (tv->v_type) { \
- case VAR_STRING: { \
- CONV_STRING(tv->vval.v_string, TV_STRLEN(tv)); \
- break; \
- } \
- case VAR_NUMBER: { \
- CONV_NUMBER(tv->vval.v_number); \
- break; \
- } \
- case VAR_FLOAT: { \
- CONV_FLOAT(tv->vval.v_float); \
- break; \
- } \
- case VAR_FUNC: { \
- CONV_FUNC(tv->vval.v_string); \
- break; \
- } \
- case VAR_LIST: { \
- if (tv->vval.v_list == NULL || tv->vval.v_list->lv_len == 0) { \
- CONV_EMPTY_LIST(); \
- break; \
- } \
- CHECK_SELF_REFERENCE(tv->vval.v_list, lv_copyID, kMPConvList); \
- CONV_LIST_START(tv->vval.v_list); \
- kv_push(MPConvStackVal, *mpstack, ((MPConvStackVal) { \
- .type = kMPConvList, \
- .data = { \
- .l = { \
- .list = tv->vval.v_list, \
- .li = tv->vval.v_list->lv_first, \
- }, \
- }, \
- })); \
- break; \
- } \
- case VAR_SPECIAL: { \
- switch (tv->vval.v_special) { \
- case kSpecialVarNull: { \
- CONV_NIL(); \
- break; \
- } \
- case kSpecialVarTrue: \
- case kSpecialVarFalse: { \
- CONV_BOOL(tv->vval.v_special == kSpecialVarTrue); \
- break; \
- } \
- } \
- break; \
- } \
- case VAR_DICT: { \
- if (tv->vval.v_dict == NULL \
- || tv->vval.v_dict->dv_hashtab.ht_used == 0) { \
- CONV_EMPTY_DICT(); \
- break; \
- } \
- const dictitem_T *type_di; \
- const dictitem_T *val_di; \
- if (CONV_ALLOW_SPECIAL \
- && tv->vval.v_dict->dv_hashtab.ht_used == 2 \
- && (type_di = dict_find((dict_T *) tv->vval.v_dict, \
- (char_u *) "_TYPE", -1)) != NULL \
- && type_di->di_tv.v_type == VAR_LIST \
- && (val_di = dict_find((dict_T *) tv->vval.v_dict, \
- (char_u *) "_VAL", -1)) != NULL) { \
- size_t i; \
- for (i = 0; i < ARRAY_SIZE(eval_msgpack_type_lists); i++) { \
- if (type_di->di_tv.vval.v_list == eval_msgpack_type_lists[i]) { \
- break; \
- } \
- } \
- if (i == ARRAY_SIZE(eval_msgpack_type_lists)) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- switch ((MessagePackType) i) { \
- case kMPNil: { \
- CONV_NIL(); \
- break; \
- } \
- case kMPBoolean: { \
- if (val_di->di_tv.v_type != VAR_NUMBER) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- CONV_BOOL(val_di->di_tv.vval.v_number); \
- break; \
- } \
- case kMPInteger: { \
- const list_T *val_list; \
- varnumber_T sign; \
- varnumber_T highest_bits; \
- varnumber_T high_bits; \
- varnumber_T low_bits; \
- /* List of 4 integers; first is signed (should be 1 or -1, but */ \
- /* this is not checked), second is unsigned and have at most */ \
- /* one (sign is -1) or two (sign is 1) non-zero bits (number of */ \
- /* bits is not checked), other unsigned and have at most 31 */ \
- /* non-zero bits (number of bits is not checked).*/ \
- if (val_di->di_tv.v_type != VAR_LIST \
- || (val_list = val_di->di_tv.vval.v_list) == NULL \
- || val_list->lv_len != 4 \
- || val_list->lv_first->li_tv.v_type != VAR_NUMBER \
- || (sign = val_list->lv_first->li_tv.vval.v_number) == 0 \
- || val_list->lv_first->li_next->li_tv.v_type != VAR_NUMBER \
- || (highest_bits = \
- val_list->lv_first->li_next->li_tv.vval.v_number) < 0 \
- || val_list->lv_last->li_prev->li_tv.v_type != VAR_NUMBER \
- || (high_bits = \
- val_list->lv_last->li_prev->li_tv.vval.v_number) < 0 \
- || val_list->lv_last->li_tv.v_type != VAR_NUMBER \
- || (low_bits = val_list->lv_last->li_tv.vval.v_number) < 0) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- uint64_t number = ((uint64_t) (((uint64_t) highest_bits) << 62) \
- | (uint64_t) (((uint64_t) high_bits) << 31) \
- | (uint64_t) low_bits); \
- if (sign > 0) { \
- CONV_UNSIGNED_NUMBER(number); \
- } else { \
- CONV_NUMBER(-number); \
- } \
- break; \
- } \
- case kMPFloat: { \
- if (val_di->di_tv.v_type != VAR_FLOAT) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- CONV_FLOAT(val_di->di_tv.vval.v_float); \
- break; \
- } \
- case kMPString: \
- case kMPBinary: { \
- const bool is_string = ((MessagePackType) i == kMPString); \
- if (val_di->di_tv.v_type != VAR_LIST) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- size_t len; \
- char *buf; \
- if (!encode_vim_list_to_buf(val_di->di_tv.vval.v_list, &len, \
- &buf)) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- if (is_string) { \
- CONV_STR_STRING(buf, len); \
- } else { \
- CONV_STRING(buf, len); \
- } \
- xfree(buf); \
- break; \
- } \
- case kMPArray: { \
- if (val_di->di_tv.v_type != VAR_LIST) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- CHECK_SELF_REFERENCE(val_di->di_tv.vval.v_list, lv_copyID, \
- kMPConvList); \
- CONV_LIST_START(val_di->di_tv.vval.v_list); \
- kv_push(MPConvStackVal, *mpstack, ((MPConvStackVal) { \
- .type = kMPConvList, \
- .data = { \
- .l = { \
- .list = val_di->di_tv.vval.v_list, \
- .li = val_di->di_tv.vval.v_list->lv_first, \
- }, \
- }, \
- })); \
- break; \
- } \
- case kMPMap: { \
- if (val_di->di_tv.v_type != VAR_LIST) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- list_T *const val_list = val_di->di_tv.vval.v_list; \
- if (val_list == NULL || val_list->lv_len == 0) { \
- CONV_EMPTY_DICT(); \
- break; \
- } \
- for (const listitem_T *li = val_list->lv_first; li != NULL; \
- li = li->li_next) { \
- if (li->li_tv.v_type != VAR_LIST \
- || li->li_tv.vval.v_list->lv_len != 2) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- } \
- CHECK_SELF_REFERENCE(val_list, lv_copyID, kMPConvPairs); \
- CONV_DICT_START(val_list->lv_len); \
- kv_push(MPConvStackVal, *mpstack, ((MPConvStackVal) { \
- .type = kMPConvPairs, \
- .data = { \
- .l = { \
- .list = val_list, \
- .li = val_list->lv_first, \
- }, \
- }, \
- })); \
- break; \
- } \
- case kMPExt: { \
- const list_T *val_list; \
- varnumber_T type; \
- if (val_di->di_tv.v_type != VAR_LIST \
- || (val_list = val_di->di_tv.vval.v_list) == NULL \
- || val_list->lv_len != 2 \
- || (val_list->lv_first->li_tv.v_type != VAR_NUMBER) \
- || (type = val_list->lv_first->li_tv.vval.v_number) > INT8_MAX \
- || type < INT8_MIN \
- || (val_list->lv_last->li_tv.v_type != VAR_LIST)) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- size_t len; \
- char *buf; \
- if (!encode_vim_list_to_buf(val_list->lv_last->li_tv.vval.v_list, \
- &len, &buf)) { \
- goto name##_convert_one_value_regular_dict; \
- } \
- CONV_EXT_STRING(buf, len, type); \
- xfree(buf); \
- break; \
- } \
- } \
- break; \
- } \
-name##_convert_one_value_regular_dict: \
- CHECK_SELF_REFERENCE(tv->vval.v_dict, dv_copyID, kMPConvDict); \
- CONV_DICT_START(tv->vval.v_dict->dv_hashtab.ht_used); \
- kv_push(MPConvStackVal, *mpstack, ((MPConvStackVal) { \
- .type = kMPConvDict, \
- .data = { \
- .d = { \
- .dict = tv->vval.v_dict, \
- .hi = tv->vval.v_dict->dv_hashtab.ht_array, \
- .todo = tv->vval.v_dict->dv_hashtab.ht_used, \
- }, \
- }, \
- })); \
- break; \
- } \
- case VAR_UNKNOWN: { \
- EMSG2(_(e_intern2), #name "_convert_one_value()"); \
- return FAIL; \
- } \
- } \
- return OK; \
-} \
-\
-scope int encode_vim_to_##name(firstargtype firstargname, typval_T *const tv, \
- const char *const objname) \
- FUNC_ATTR_WARN_UNUSED_RESULT \
-{ \
- const int copyID = get_copyID(); \
- MPConvStack mpstack; \
- kv_init(mpstack); \
- if (name##_convert_one_value(firstargname, &mpstack, tv, copyID, objname) \
- == FAIL) { \
- goto encode_vim_to_##name##_error_ret; \
- } \
- while (kv_size(mpstack)) { \
- MPConvStackVal *cur_mpsv = &kv_A(mpstack, kv_size(mpstack) - 1); \
- typval_T *cur_tv = NULL; \
- switch (cur_mpsv->type) { \
- case kMPConvDict: { \
- if (!cur_mpsv->data.d.todo) { \
- (void) kv_pop(mpstack); \
- cur_mpsv->data.d.dict->dv_copyID = copyID - 1; \
- CONV_DICT_END(); \
- continue; \
- } else if (cur_mpsv->data.d.todo \
- != cur_mpsv->data.d.dict->dv_hashtab.ht_used) { \
- CONV_DICT_BETWEEN_ITEMS(); \
- } \
- while (HASHITEM_EMPTY(cur_mpsv->data.d.hi)) { \
- cur_mpsv->data.d.hi++; \
- } \
- dictitem_T *const di = HI2DI(cur_mpsv->data.d.hi); \
- cur_mpsv->data.d.todo--; \
- cur_mpsv->data.d.hi++; \
- CONV_STR_STRING(&di->di_key[0], STRLEN(&di->di_key[0])); \
- CONV_DICT_AFTER_KEY(); \
- cur_tv = &di->di_tv; \
- break; \
- } \
- case kMPConvList: { \
- if (cur_mpsv->data.l.li == NULL) { \
- (void) kv_pop(mpstack); \
- cur_mpsv->data.l.list->lv_copyID = copyID - 1; \
- CONV_LIST_END(cur_mpsv->data.l.list); \
- continue; \
- } else if (cur_mpsv->data.l.li != cur_mpsv->data.l.list->lv_first) { \
- CONV_LIST_BETWEEN_ITEMS(); \
- } \
- cur_tv = &cur_mpsv->data.l.li->li_tv; \
- cur_mpsv->data.l.li = cur_mpsv->data.l.li->li_next; \
- break; \
- } \
- case kMPConvPairs: { \
- if (cur_mpsv->data.l.li == NULL) { \
- (void) kv_pop(mpstack); \
- cur_mpsv->data.l.list->lv_copyID = copyID - 1; \
- CONV_DICT_END(); \
- continue; \
- } else if (cur_mpsv->data.l.li != cur_mpsv->data.l.list->lv_first) { \
- CONV_DICT_BETWEEN_ITEMS(); \
- } \
- const list_T *const kv_pair = cur_mpsv->data.l.li->li_tv.vval.v_list; \
- CONV_SPECIAL_DICT_KEY_CHECK(name, kv_pair); \
- if (name##_convert_one_value(firstargname, &mpstack, \
- &kv_pair->lv_first->li_tv, copyID, \
- objname) == FAIL) { \
- goto encode_vim_to_##name##_error_ret; \
- } \
- CONV_DICT_AFTER_KEY(); \
- cur_tv = &kv_pair->lv_last->li_tv; \
- cur_mpsv->data.l.li = cur_mpsv->data.l.li->li_next; \
- break; \
- } \
- } \
- assert(cur_tv != NULL); \
- if (name##_convert_one_value(firstargname, &mpstack, cur_tv, copyID, \
- objname) == FAIL) { \
- goto encode_vim_to_##name##_error_ret; \
- } \
- } \
- kv_destroy(mpstack); \
- return OK; \
-encode_vim_to_##name##_error_ret: \
- kv_destroy(mpstack); \
- return FAIL; \
-}
-
-#define CONV_STRING(buf, len) \
+#define TYPVAL_ENCODE_CONV_STRING(buf, len) \
do { \
const char *const buf_ = (const char *) buf; \
if (buf == NULL) { \
@@ -655,19 +273,19 @@ encode_vim_to_##name##_error_ret: \
} \
} while (0)
-#define CONV_STR_STRING(buf, len) \
- CONV_STRING(buf, len)
+#define TYPVAL_ENCODE_CONV_STR_STRING(buf, len) \
+ TYPVAL_ENCODE_CONV_STRING(buf, len)
-#define CONV_EXT_STRING(buf, len, type)
+#define TYPVAL_ENCODE_CONV_EXT_STRING(buf, len, type)
-#define CONV_NUMBER(num) \
+#define TYPVAL_ENCODE_CONV_NUMBER(num) \
do { \
char numbuf[NUMBUFLEN]; \
vim_snprintf(numbuf, ARRAY_SIZE(numbuf), "%" PRId64, (int64_t) (num)); \
ga_concat(gap, numbuf); \
} while (0)
-#define CONV_FLOAT(flt) \
+#define TYPVAL_ENCODE_CONV_FLOAT(flt) \
do { \
const float_T flt_ = (flt); \
switch (fpclassify(flt_)) { \
@@ -690,51 +308,51 @@ encode_vim_to_##name##_error_ret: \
} \
} while (0)
-#define CONV_FUNC(fun) \
+#define TYPVAL_ENCODE_CONV_FUNC(fun) \
do { \
ga_concat(gap, "function("); \
- CONV_STRING(fun, STRLEN(fun)); \
+ TYPVAL_ENCODE_CONV_STRING(fun, STRLEN(fun)); \
ga_append(gap, ')'); \
} while (0)
-#define CONV_EMPTY_LIST() \
+#define TYPVAL_ENCODE_CONV_EMPTY_LIST() \
ga_concat(gap, "[]")
-#define CONV_LIST_START(lst) \
+#define TYPVAL_ENCODE_CONV_LIST_START(len) \
ga_append(gap, '[')
-#define CONV_EMPTY_DICT() \
+#define TYPVAL_ENCODE_CONV_EMPTY_DICT() \
ga_concat(gap, "{}")
-#define CONV_NIL() \
+#define TYPVAL_ENCODE_CONV_NIL() \
ga_concat(gap, "v:null")
-#define CONV_BOOL(num) \
+#define TYPVAL_ENCODE_CONV_BOOL(num) \
ga_concat(gap, ((num)? "v:true": "v:false"))
-#define CONV_UNSIGNED_NUMBER(num)
+#define TYPVAL_ENCODE_CONV_UNSIGNED_NUMBER(num)
-#define CONV_DICT_START(len) \
+#define TYPVAL_ENCODE_CONV_DICT_START(len) \
ga_append(gap, '{')
-#define CONV_DICT_END() \
+#define TYPVAL_ENCODE_CONV_DICT_END() \
ga_append(gap, '}')
-#define CONV_DICT_AFTER_KEY() \
+#define TYPVAL_ENCODE_CONV_DICT_AFTER_KEY() \
ga_concat(gap, ": ")
-#define CONV_DICT_BETWEEN_ITEMS() \
+#define TYPVAL_ENCODE_CONV_DICT_BETWEEN_ITEMS() \
ga_concat(gap, ", ")
-#define CONV_SPECIAL_DICT_KEY_CHECK(name, kv_pair)
+#define TYPVAL_ENCODE_CONV_SPECIAL_DICT_KEY_CHECK(label, kv_pair)
-#define CONV_LIST_END(lst) \
+#define TYPVAL_ENCODE_CONV_LIST_END() \
ga_append(gap, ']')
-#define CONV_LIST_BETWEEN_ITEMS() \
- CONV_DICT_BETWEEN_ITEMS()
+#define TYPVAL_ENCODE_CONV_LIST_BETWEEN_ITEMS() \
+ TYPVAL_ENCODE_CONV_DICT_BETWEEN_ITEMS()
-#define CONV_RECURSE(val, conv_type) \
+#define TYPVAL_ENCODE_CONV_RECURSE(val, conv_type) \
do { \
if (!did_echo_string_emsg) { \
/* Only give this message once for a recursive call to avoid */ \
@@ -764,12 +382,12 @@ encode_vim_to_##name##_error_ret: \
return OK; \
} while (0)
-#define CONV_ALLOW_SPECIAL false
+#define TYPVAL_ENCODE_ALLOW_SPECIALS false
-DEFINE_VIML_CONV_FUNCTIONS(static, string, garray_T *const, gap)
+TYPVAL_ENCODE_DEFINE_CONV_FUNCTIONS(static, string, garray_T *const, gap)
-#undef CONV_RECURSE
-#define CONV_RECURSE(val, conv_type) \
+#undef TYPVAL_ENCODE_CONV_RECURSE
+#define TYPVAL_ENCODE_CONV_RECURSE(val, conv_type) \
do { \
char ebuf[NUMBUFLEN + 7]; \
size_t backref = 0; \
@@ -796,10 +414,10 @@ DEFINE_VIML_CONV_FUNCTIONS(static, string, garray_T *const, gap)
return OK; \
} while (0)
-DEFINE_VIML_CONV_FUNCTIONS(, echo, garray_T *const, gap)
+TYPVAL_ENCODE_DEFINE_CONV_FUNCTIONS(, echo, garray_T *const, gap)
-#undef CONV_RECURSE
-#define CONV_RECURSE(val, conv_type) \
+#undef TYPVAL_ENCODE_CONV_RECURSE
+#define TYPVAL_ENCODE_CONV_RECURSE(val, conv_type) \
do { \
if (!did_echo_string_emsg) { \
/* Only give this message once for a recursive call to avoid */ \
@@ -811,27 +429,27 @@ DEFINE_VIML_CONV_FUNCTIONS(, echo, garray_T *const, gap)
return OK; \
} while (0)
-#undef CONV_ALLOW_SPECIAL
-#define CONV_ALLOW_SPECIAL true
+#undef TYPVAL_ENCODE_ALLOW_SPECIALS
+#define TYPVAL_ENCODE_ALLOW_SPECIALS true
-#undef CONV_NIL
-#define CONV_NIL() \
+#undef TYPVAL_ENCODE_CONV_NIL
+#define TYPVAL_ENCODE_CONV_NIL() \
ga_concat(gap, "null")
-#undef CONV_BOOL
-#define CONV_BOOL(num) \
+#undef TYPVAL_ENCODE_CONV_BOOL
+#define TYPVAL_ENCODE_CONV_BOOL(num) \
ga_concat(gap, ((num)? "true": "false"))
-#undef CONV_UNSIGNED_NUMBER
-#define CONV_UNSIGNED_NUMBER(num) \
+#undef TYPVAL_ENCODE_CONV_UNSIGNED_NUMBER
+#define TYPVAL_ENCODE_CONV_UNSIGNED_NUMBER(num) \
do { \
char numbuf[NUMBUFLEN]; \
vim_snprintf(numbuf, ARRAY_SIZE(numbuf), "%" PRIu64, (num)); \
ga_concat(gap, numbuf); \
} while (0)
-#undef CONV_FLOAT
-#define CONV_FLOAT(flt) \
+#undef TYPVAL_ENCODE_CONV_FLOAT
+#define TYPVAL_ENCODE_CONV_FLOAT(flt) \
do { \
const float_T flt_ = (flt); \
switch (fpclassify(flt_)) { \
@@ -1019,24 +637,24 @@ static inline int convert_to_json_string(garray_T *const gap,
return OK;
}
-#undef CONV_STRING
-#define CONV_STRING(buf, len) \
+#undef TYPVAL_ENCODE_CONV_STRING
+#define TYPVAL_ENCODE_CONV_STRING(buf, len) \
do { \
if (convert_to_json_string(gap, (const char *) (buf), (len)) != OK) { \
return FAIL; \
} \
} while (0)
-#undef CONV_EXT_STRING
-#define CONV_EXT_STRING(buf, len, type) \
+#undef TYPVAL_ENCODE_CONV_EXT_STRING
+#define TYPVAL_ENCODE_CONV_EXT_STRING(buf, len, type) \
do { \
xfree(buf); \
EMSG(_("E474: Unable to convert EXT string to JSON")); \
return FAIL; \
} while (0)
-#undef CONV_FUNC
-#define CONV_FUNC(fun) \
+#undef TYPVAL_ENCODE_CONV_FUNC
+#define TYPVAL_ENCODE_CONV_FUNC(fun) \
return conv_error(_("E474: Error while dumping %s, %s: " \
"attempt to dump function reference"), \
mpstack, objname)
@@ -1080,38 +698,38 @@ static inline bool check_json_key(const typval_T *const tv)
return true;
}
-#undef CONV_SPECIAL_DICT_KEY_CHECK
-#define CONV_SPECIAL_DICT_KEY_CHECK(name, kv_pair) \
+#undef TYPVAL_ENCODE_CONV_SPECIAL_DICT_KEY_CHECK
+#define TYPVAL_ENCODE_CONV_SPECIAL_DICT_KEY_CHECK(label, kv_pair) \
do { \
if (!check_json_key(&kv_pair->lv_first->li_tv)) { \
EMSG(_("E474: Invalid key in special dictionary")); \
- goto encode_vim_to_##name##_error_ret; \
+ goto label; \
} \
} while (0)
-DEFINE_VIML_CONV_FUNCTIONS(static, json, garray_T *const, gap)
-
-#undef CONV_STRING
-#undef CONV_STR_STRING
-#undef CONV_EXT_STRING
-#undef CONV_NUMBER
-#undef CONV_FLOAT
-#undef CONV_FUNC
-#undef CONV_EMPTY_LIST
-#undef CONV_LIST_START
-#undef CONV_EMPTY_DICT
-#undef CONV_NIL
-#undef CONV_BOOL
-#undef CONV_UNSIGNED_NUMBER
-#undef CONV_DICT_START
-#undef CONV_DICT_END
-#undef CONV_DICT_AFTER_KEY
-#undef CONV_DICT_BETWEEN_ITEMS
-#undef CONV_SPECIAL_DICT_KEY_CHECK
-#undef CONV_LIST_END
-#undef CONV_LIST_BETWEEN_ITEMS
-#undef CONV_RECURSE
-#undef CONV_ALLOW_SPECIAL
+TYPVAL_ENCODE_DEFINE_CONV_FUNCTIONS(static, json, garray_T *const, gap)
+
+#undef TYPVAL_ENCODE_CONV_STRING
+#undef TYPVAL_ENCODE_CONV_STR_STRING
+#undef TYPVAL_ENCODE_CONV_EXT_STRING
+#undef TYPVAL_ENCODE_CONV_NUMBER
+#undef TYPVAL_ENCODE_CONV_FLOAT
+#undef TYPVAL_ENCODE_CONV_FUNC
+#undef TYPVAL_ENCODE_CONV_EMPTY_LIST
+#undef TYPVAL_ENCODE_CONV_LIST_START
+#undef TYPVAL_ENCODE_CONV_EMPTY_DICT
+#undef TYPVAL_ENCODE_CONV_NIL
+#undef TYPVAL_ENCODE_CONV_BOOL
+#undef TYPVAL_ENCODE_CONV_UNSIGNED_NUMBER
+#undef TYPVAL_ENCODE_CONV_DICT_START
+#undef TYPVAL_ENCODE_CONV_DICT_END
+#undef TYPVAL_ENCODE_CONV_DICT_AFTER_KEY
+#undef TYPVAL_ENCODE_CONV_DICT_BETWEEN_ITEMS
+#undef TYPVAL_ENCODE_CONV_SPECIAL_DICT_KEY_CHECK
+#undef TYPVAL_ENCODE_CONV_LIST_END
+#undef TYPVAL_ENCODE_CONV_LIST_BETWEEN_ITEMS
+#undef TYPVAL_ENCODE_CONV_RECURSE
+#undef TYPVAL_ENCODE_ALLOW_SPECIALS
/// Return a string with the string representation of a variable.
/// Puts quotes around strings, so that they can be parsed back by eval().
@@ -1181,7 +799,7 @@ char *encode_tv2json(typval_T *tv, size_t *len)
return (char *) ga.ga_data;
}
-#define CONV_STRING(buf, len) \
+#define TYPVAL_ENCODE_CONV_STRING(buf, len) \
do { \
if (buf == NULL) { \
msgpack_pack_bin(packer, 0); \
@@ -1192,7 +810,7 @@ char *encode_tv2json(typval_T *tv, size_t *len)
} \
} while (0)
-#define CONV_STR_STRING(buf, len) \
+#define TYPVAL_ENCODE_CONV_STR_STRING(buf, len) \
do { \
if (buf == NULL) { \
msgpack_pack_str(packer, 0); \
@@ -1203,7 +821,7 @@ char *encode_tv2json(typval_T *tv, size_t *len)
} \
} while (0)
-#define CONV_EXT_STRING(buf, len, type) \
+#define TYPVAL_ENCODE_CONV_EXT_STRING(buf, len, type) \
do { \
if (buf == NULL) { \
msgpack_pack_ext(packer, 0, (int8_t) type); \
@@ -1214,30 +832,30 @@ char *encode_tv2json(typval_T *tv, size_t *len)
} \
} while (0)
-#define CONV_NUMBER(num) \
+#define TYPVAL_ENCODE_CONV_NUMBER(num) \
msgpack_pack_int64(packer, (int64_t) (num))
-#define CONV_FLOAT(flt) \
+#define TYPVAL_ENCODE_CONV_FLOAT(flt) \
msgpack_pack_double(packer, (double) (flt))
-#define CONV_FUNC(fun) \
+#define TYPVAL_ENCODE_CONV_FUNC(fun) \
return conv_error(_("E951: Error while dumping %s, %s: " \
"attempt to dump function reference"), \
mpstack, objname)
-#define CONV_EMPTY_LIST() \
+#define TYPVAL_ENCODE_CONV_EMPTY_LIST() \
msgpack_pack_array(packer, 0)
-#define CONV_LIST_START(lst) \
- msgpack_pack_array(packer, (size_t) (lst)->lv_len)
+#define TYPVAL_ENCODE_CONV_LIST_START(len) \
+ msgpack_pack_array(packer, (size_t) (len))
-#define CONV_EMPTY_DICT() \
+#define TYPVAL_ENCODE_CONV_EMPTY_DICT() \
msgpack_pack_map(packer, 0)
-#define CONV_NIL() \
+#define TYPVAL_ENCODE_CONV_NIL() \
msgpack_pack_nil(packer)
-#define CONV_BOOL(num) \
+#define TYPVAL_ENCODE_CONV_BOOL(num) \
do { \
if ((num)) { \
msgpack_pack_true(packer); \
@@ -1246,51 +864,51 @@ char *encode_tv2json(typval_T *tv, size_t *len)
} \
} while (0)
-#define CONV_UNSIGNED_NUMBER(num) \
+#define TYPVAL_ENCODE_CONV_UNSIGNED_NUMBER(num) \
msgpack_pack_uint64(packer, (num))
-#define CONV_DICT_START(len) \
+#define TYPVAL_ENCODE_CONV_DICT_START(len) \
msgpack_pack_map(packer, (size_t) (len))
-#define CONV_DICT_END()
+#define TYPVAL_ENCODE_CONV_DICT_END()
-#define CONV_DICT_AFTER_KEY()
+#define TYPVAL_ENCODE_CONV_DICT_AFTER_KEY()
-#define CONV_DICT_BETWEEN_ITEMS()
+#define TYPVAL_ENCODE_CONV_DICT_BETWEEN_ITEMS()
-#define CONV_SPECIAL_DICT_KEY_CHECK(name, kv_pair)
+#define TYPVAL_ENCODE_CONV_SPECIAL_DICT_KEY_CHECK(label, kv_pair)
-#define CONV_LIST_END(lst)
+#define TYPVAL_ENCODE_CONV_LIST_END()
-#define CONV_LIST_BETWEEN_ITEMS()
+#define TYPVAL_ENCODE_CONV_LIST_BETWEEN_ITEMS()
-#define CONV_RECURSE(val, conv_type) \
+#define TYPVAL_ENCODE_CONV_RECURSE(val, conv_type) \
return conv_error(_("E952: Unable to dump %s: " \
"container references itself in %s"), \
mpstack, objname)
-#define CONV_ALLOW_SPECIAL true
-
-DEFINE_VIML_CONV_FUNCTIONS(, msgpack, msgpack_packer *const, packer)
-
-#undef CONV_STRING
-#undef CONV_STR_STRING
-#undef CONV_EXT_STRING
-#undef CONV_NUMBER
-#undef CONV_FLOAT
-#undef CONV_FUNC
-#undef CONV_EMPTY_LIST
-#undef CONV_LIST_START
-#undef CONV_EMPTY_DICT
-#undef CONV_NIL
-#undef CONV_BOOL
-#undef CONV_UNSIGNED_NUMBER
-#undef CONV_DICT_START
-#undef CONV_DICT_END
-#undef CONV_DICT_AFTER_KEY
-#undef CONV_DICT_BETWEEN_ITEMS
-#undef CONV_SPECIAL_DICT_KEY_CHECK
-#undef CONV_LIST_END
-#undef CONV_LIST_BETWEEN_ITEMS
-#undef CONV_RECURSE
-#undef CONV_ALLOW_SPECIAL
+#define TYPVAL_ENCODE_ALLOW_SPECIALS true
+
+TYPVAL_ENCODE_DEFINE_CONV_FUNCTIONS(, msgpack, msgpack_packer *const, packer)
+
+#undef TYPVAL_ENCODE_CONV_STRING
+#undef TYPVAL_ENCODE_CONV_STR_STRING
+#undef TYPVAL_ENCODE_CONV_EXT_STRING
+#undef TYPVAL_ENCODE_CONV_NUMBER
+#undef TYPVAL_ENCODE_CONV_FLOAT
+#undef TYPVAL_ENCODE_CONV_FUNC
+#undef TYPVAL_ENCODE_CONV_EMPTY_LIST
+#undef TYPVAL_ENCODE_CONV_LIST_START
+#undef TYPVAL_ENCODE_CONV_EMPTY_DICT
+#undef TYPVAL_ENCODE_CONV_NIL
+#undef TYPVAL_ENCODE_CONV_BOOL
+#undef TYPVAL_ENCODE_CONV_UNSIGNED_NUMBER
+#undef TYPVAL_ENCODE_CONV_DICT_START
+#undef TYPVAL_ENCODE_CONV_DICT_END
+#undef TYPVAL_ENCODE_CONV_DICT_AFTER_KEY
+#undef TYPVAL_ENCODE_CONV_DICT_BETWEEN_ITEMS
+#undef TYPVAL_ENCODE_CONV_SPECIAL_DICT_KEY_CHECK
+#undef TYPVAL_ENCODE_CONV_LIST_END
+#undef TYPVAL_ENCODE_CONV_LIST_BETWEEN_ITEMS
+#undef TYPVAL_ENCODE_CONV_RECURSE
+#undef TYPVAL_ENCODE_ALLOW_SPECIALS
diff --git a/src/nvim/eval/typval_encode.h b/src/nvim/eval/typval_encode.h
new file mode 100644
index 0000000000..f70a6c9e94
--- /dev/null
+++ b/src/nvim/eval/typval_encode.h
@@ -0,0 +1,563 @@
+/// @file eval/typval_convert.h
+///
+/// Contains set of macros used to convert (possibly recursive) typval_T into
+/// something else. For these macros to work the following macros must be
+/// defined:
+
+/// @def TYPVAL_ENCODE_CONV_NIL
+/// @brief Macros used to convert NIL value
+///
+/// Is called both for special dictionary (unless #TYPVAL_ENCODE_ALLOW_SPECIALS
+/// is false) and `v:null`. Accepts no arguments, but still must be
+/// a function-like macros.
+
+/// @def TYPVAL_ENCODE_CONV_BOOL
+/// @brief Macros used to convert boolean value
+///
+/// Is called both for special dictionary (unless #TYPVAL_ENCODE_ALLOW_SPECIALS
+/// is false) and `v:true`/`v:false`.
+///
+/// @param num Boolean value to convert. Value is an expression which
+/// evaluates to some integer.
+
+/// @def TYPVAL_ENCODE_CONV_NUMBER
+/// @brief Macros used to convert integer
+///
+/// @param num Integer to convert, must accept both varnumber_T and int64_t.
+
+/// @def TYPVAL_ENCODE_CONV_UNSIGNED_NUMBER
+/// @brief Macros used to convert unsigned integer
+///
+/// Not used if #TYPVAL_ENCODE_ALLOW_SPECIALS is false, but still must be
+/// defined.
+///
+/// @param num Integer to convert, must accept uint64_t.
+
+/// @def TYPVAL_ENCODE_CONV_FLOAT
+/// @brief Macros used to convert floating-point number
+///
+/// @param flt Number to convert, must accept float_T.
+
+/// @def TYPVAL_ENCODE_CONV_STRING
+/// @brief Macros used to convert plain string
+///
+/// Is used to convert VAR_STRING objects as well as BIN strings represented as
+/// special dictionary.
+///
+/// @param buf String to convert. Is a char[] buffer, not NUL-terminated.
+/// @param len String length.
+
+/// @def TYPVAL_ENCODE_CONV_STR_STRING
+/// @brief Like #TYPVAL_ENCODE_CONV_STRING, but for STR strings
+///
+/// Is used to convert dictionary keys and STR strings represented as special
+/// dictionaries.
+
+/// @def TYPVAL_ENCODE_CONV_EXT_STRING
+/// @brief Macros used to convert EXT string
+///
+/// Is used to convert EXT strings represented as special dictionaries. Never
+/// actually used if #TYPVAL_ENCODE_ALLOW_SPECIALS is false, but still must be
+/// defined.
+///
+/// @param buf String to convert. Is a char[] buffer, not NUL-terminated.
+/// @param len String length.
+/// @param type EXT type.
+
+/// @def TYPVAL_ENCODE_CONV_FUNC
+/// @brief Macros used to convert a function reference
+///
+/// @param fun Function name.
+
+/// @def TYPVAL_ENCODE_CONV_EMPTY_LIST
+/// @brief Macros used to convert an empty list
+///
+/// Accepts no arguments, but still must be a function-like macros.
+
+/// @def TYPVAL_ENCODE_CONV_EMPTY_DICT
+/// @brief Macros used to convert an empty dictionary
+///
+/// Accepts no arguments, but still must be a function-like macros.
+
+/// @def TYPVAL_ENCODE_CONV_LIST_START
+/// @brief Macros used before starting to convert non-empty list
+///
+/// @param len List length. Is an expression which evaluates to an integer.
+
+/// @def TYPVAL_ENCODE_CONV_LIST_BETWEEN_ITEMS
+/// @brief Macros used after finishing converting non-last list item
+///
+/// Accepts no arguments, but still must be a function-like macros.
+
+/// @def TYPVAL_ENCODE_CONV_LIST_END
+/// @brief Macros used after converting non-empty list
+///
+/// Accepts no arguments, but still must be a function-like macros.
+
+/// @def TYPVAL_ENCODE_CONV_DICT_START
+/// @brief Macros used before starting to convert non-empty dictionary
+///
+/// @param len Dictionary length. Is an expression which evaluates to an
+/// integer.
+
+/// @def TYPVAL_ENCODE_CONV_SPECIAL_DICT_KEY_CHECK
+/// @brief Macros used to check special dictionary key
+///
+/// @param label Label for goto in case check was not successfull.
+/// @param kv_pair List with two elements: key and value.
+
+/// @def TYPVAL_ENCODE_CONV_DICT_AFTER_KEY
+/// @brief Macros used after finishing converting dictionary key
+///
+/// Accepts no arguments, but still must be a function-like macros.
+
+/// @def TYPVAL_ENCODE_CONV_DICT_BETWEEN_ITEMS
+/// @brief Macros used after finishing converting non-last dictionary value
+///
+/// Accepts no arguments, but still must be a function-like macros.
+
+/// @def TYPVAL_ENCODE_CONV_DICT_END
+/// @brief Macros used after converting non-empty dictionary
+///
+/// Accepts no arguments, but still must be a function-like macros.
+
+/// @def TYPVAL_ENCODE_CONV_RECURSE
+/// @brief Macros used when self-containing container is detected
+///
+/// @param val Container for which this situation was detected.
+/// @param conv_type Type of the stack entry, @see MPConvStackValType.
+
+/// @def TYPVAL_ENCODE_ALLOW_SPECIALS
+/// @brief Macros that specifies whether special dictionaries are special
+///
+/// Must be something that evaluates to boolean, most likely `true` or `false`.
+/// If it is false then special dictionaries are not treated specially.
+#ifndef NVIM_EVAL_TYPVAL_ENCODE_H
+#define NVIM_EVAL_TYPVAL_ENCODE_H
+
+#include <stddef.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include "nvim/lib/kvec.h"
+#include "nvim/eval_defs.h"
+#include "nvim/eval/encode.h"
+#include "nvim/func_attr.h"
+
+/// Type of the stack entry
+typedef enum {
+ kMPConvDict, ///< Convert dict_T *dictionary.
+ kMPConvList, ///< Convert list_T *list.
+ kMPConvPairs, ///< Convert mapping represented as a list_T* of pairs.
+} MPConvStackValType;
+
+/// Structure representing current VimL to messagepack conversion state
+typedef struct {
+ MPConvStackValType type; ///< Type of the stack entry.
+ union {
+ struct {
+ dict_T *dict; ///< Currently converted dictionary.
+ hashitem_T *hi; ///< Currently converted dictionary item.
+ size_t todo; ///< Amount of items left to process.
+ } d; ///< State of dictionary conversion.
+ struct {
+ list_T *list; ///< Currently converted list.
+ listitem_T *li; ///< Currently converted list item.
+ } l; ///< State of list or generic mapping conversion.
+ } data; ///< Data to convert.
+} MPConvStackVal;
+
+/// Stack used to convert VimL values to messagepack.
+typedef kvec_t(MPConvStackVal) MPConvStack;
+
+// Defines for MPConvStack
+#define _mp_size kv_size
+#define _mp_init kv_init
+#define _mp_destroy kv_destroy
+#define _mp_push kv_push
+#define _mp_pop kv_pop
+#define _mp_last kv_last
+
+/// Code for checking whether container references itself
+///
+/// @param[in,out] val Container to check.
+/// @param copyID_attr Name of the container attribute that holds copyID.
+/// After checking whether value of this attribute is
+/// copyID (variable) it is set to copyID.
+#define _TYPVAL_ENCODE_CHECK_SELF_REFERENCE(val, copyID_attr, conv_type) \
+ do { \
+ if ((val)->copyID_attr == copyID) { \
+ TYPVAL_ENCODE_CONV_RECURSE((val), conv_type); \
+ } \
+ (val)->copyID_attr = copyID; \
+ } while (0)
+
+/// Length of the string stored in typval_T
+///
+/// @param[in] tv String for which to compute length for. Must be typval_T
+/// with VAR_STRING.
+///
+/// @return Length of the string stored in typval_T, including 0 for NULL
+/// string.
+static inline size_t tv_strlen(const typval_T *const tv)
+ FUNC_ATTR_ALWAYS_INLINE FUNC_ATTR_PURE FUNC_ATTR_WARN_UNUSED_RESULT
+ FUNC_ATTR_NONNULL_ALL
+{
+ assert(tv->v_type == VAR_STRING);
+ return (tv->vval.v_string == NULL
+ ? 0
+ : strlen((char *) tv->vval.v_string));
+}
+
+/// Define functions which convert VimL value to something else
+///
+/// Creates function `vim_to_{name}(firstargtype firstargname, typval_T *const
+/// tv)` which returns OK or FAIL and helper functions.
+///
+/// @param scope Scope of the main function: either nothing or `static`.
+/// @param firstargtype Type of the first argument. It will be used to return
+/// the results.
+/// @param firstargname Name of the first argument.
+/// @param name Name of the target converter.
+#define TYPVAL_ENCODE_DEFINE_CONV_FUNCTIONS(scope, name, firstargtype, \
+ firstargname) \
+static int name##_convert_one_value(firstargtype firstargname, \
+ MPConvStack *const mpstack, \
+ typval_T *const tv, \
+ const int copyID, \
+ const char *const objname) \
+ FUNC_ATTR_NONNULL_ALL FUNC_ATTR_WARN_UNUSED_RESULT \
+{ \
+ switch (tv->v_type) { \
+ case VAR_STRING: { \
+ TYPVAL_ENCODE_CONV_STRING(tv->vval.v_string, tv_strlen(tv)); \
+ break; \
+ } \
+ case VAR_NUMBER: { \
+ TYPVAL_ENCODE_CONV_NUMBER(tv->vval.v_number); \
+ break; \
+ } \
+ case VAR_FLOAT: { \
+ TYPVAL_ENCODE_CONV_FLOAT(tv->vval.v_float); \
+ break; \
+ } \
+ case VAR_FUNC: { \
+ TYPVAL_ENCODE_CONV_FUNC(tv->vval.v_string); \
+ break; \
+ } \
+ case VAR_LIST: { \
+ if (tv->vval.v_list == NULL || tv->vval.v_list->lv_len == 0) { \
+ TYPVAL_ENCODE_CONV_EMPTY_LIST(); \
+ break; \
+ } \
+ _TYPVAL_ENCODE_CHECK_SELF_REFERENCE(tv->vval.v_list, lv_copyID, \
+ kMPConvList); \
+ TYPVAL_ENCODE_CONV_LIST_START(tv->vval.v_list->lv_len); \
+ _mp_push(*mpstack, ((MPConvStackVal) { \
+ .type = kMPConvList, \
+ .data = { \
+ .l = { \
+ .list = tv->vval.v_list, \
+ .li = tv->vval.v_list->lv_first, \
+ }, \
+ }, \
+ })); \
+ break; \
+ } \
+ case VAR_SPECIAL: { \
+ switch (tv->vval.v_special) { \
+ case kSpecialVarNull: { \
+ TYPVAL_ENCODE_CONV_NIL(); \
+ break; \
+ } \
+ case kSpecialVarTrue: \
+ case kSpecialVarFalse: { \
+ TYPVAL_ENCODE_CONV_BOOL(tv->vval.v_special == kSpecialVarTrue); \
+ break; \
+ } \
+ } \
+ break; \
+ } \
+ case VAR_DICT: { \
+ if (tv->vval.v_dict == NULL \
+ || tv->vval.v_dict->dv_hashtab.ht_used == 0) { \
+ TYPVAL_ENCODE_CONV_EMPTY_DICT(); \
+ break; \
+ } \
+ const dictitem_T *type_di; \
+ const dictitem_T *val_di; \
+ if (TYPVAL_ENCODE_ALLOW_SPECIALS \
+ && tv->vval.v_dict->dv_hashtab.ht_used == 2 \
+ && (type_di = dict_find((dict_T *) tv->vval.v_dict, \
+ (char_u *) "_TYPE", -1)) != NULL \
+ && type_di->di_tv.v_type == VAR_LIST \
+ && (val_di = dict_find((dict_T *) tv->vval.v_dict, \
+ (char_u *) "_VAL", -1)) != NULL) { \
+ size_t i; \
+ for (i = 0; i < ARRAY_SIZE(eval_msgpack_type_lists); i++) { \
+ if (type_di->di_tv.vval.v_list == eval_msgpack_type_lists[i]) { \
+ break; \
+ } \
+ } \
+ if (i == ARRAY_SIZE(eval_msgpack_type_lists)) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ switch ((MessagePackType) i) { \
+ case kMPNil: { \
+ TYPVAL_ENCODE_CONV_NIL(); \
+ break; \
+ } \
+ case kMPBoolean: { \
+ if (val_di->di_tv.v_type != VAR_NUMBER) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ TYPVAL_ENCODE_CONV_BOOL(val_di->di_tv.vval.v_number); \
+ break; \
+ } \
+ case kMPInteger: { \
+ const list_T *val_list; \
+ varnumber_T sign; \
+ varnumber_T highest_bits; \
+ varnumber_T high_bits; \
+ varnumber_T low_bits; \
+ /* List of 4 integers; first is signed (should be 1 or -1, but */ \
+ /* this is not checked), second is unsigned and have at most */ \
+ /* one (sign is -1) or two (sign is 1) non-zero bits (number of */ \
+ /* bits is not checked), other unsigned and have at most 31 */ \
+ /* non-zero bits (number of bits is not checked).*/ \
+ if (val_di->di_tv.v_type != VAR_LIST \
+ || (val_list = val_di->di_tv.vval.v_list) == NULL \
+ || val_list->lv_len != 4 \
+ || val_list->lv_first->li_tv.v_type != VAR_NUMBER \
+ || (sign = val_list->lv_first->li_tv.vval.v_number) == 0 \
+ || val_list->lv_first->li_next->li_tv.v_type != VAR_NUMBER \
+ || (highest_bits = \
+ val_list->lv_first->li_next->li_tv.vval.v_number) < 0 \
+ || val_list->lv_last->li_prev->li_tv.v_type != VAR_NUMBER \
+ || (high_bits = \
+ val_list->lv_last->li_prev->li_tv.vval.v_number) < 0 \
+ || val_list->lv_last->li_tv.v_type != VAR_NUMBER \
+ || (low_bits = val_list->lv_last->li_tv.vval.v_number) < 0) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ uint64_t number = ((uint64_t) (((uint64_t) highest_bits) << 62) \
+ | (uint64_t) (((uint64_t) high_bits) << 31) \
+ | (uint64_t) low_bits); \
+ if (sign > 0) { \
+ TYPVAL_ENCODE_CONV_UNSIGNED_NUMBER(number); \
+ } else { \
+ TYPVAL_ENCODE_CONV_NUMBER(-number); \
+ } \
+ break; \
+ } \
+ case kMPFloat: { \
+ if (val_di->di_tv.v_type != VAR_FLOAT) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ TYPVAL_ENCODE_CONV_FLOAT(val_di->di_tv.vval.v_float); \
+ break; \
+ } \
+ case kMPString: \
+ case kMPBinary: { \
+ const bool is_string = ((MessagePackType) i == kMPString); \
+ if (val_di->di_tv.v_type != VAR_LIST) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ size_t len; \
+ char *buf; \
+ if (!encode_vim_list_to_buf(val_di->di_tv.vval.v_list, &len, \
+ &buf)) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ if (is_string) { \
+ TYPVAL_ENCODE_CONV_STR_STRING(buf, len); \
+ } else { \
+ TYPVAL_ENCODE_CONV_STRING(buf, len); \
+ } \
+ xfree(buf); \
+ break; \
+ } \
+ case kMPArray: { \
+ if (val_di->di_tv.v_type != VAR_LIST) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ _TYPVAL_ENCODE_CHECK_SELF_REFERENCE(val_di->di_tv.vval.v_list, \
+ lv_copyID, kMPConvList); \
+ TYPVAL_ENCODE_CONV_LIST_START(val_di->di_tv.vval.v_list->lv_len); \
+ _mp_push(*mpstack, ((MPConvStackVal) { \
+ .type = kMPConvList, \
+ .data = { \
+ .l = { \
+ .list = val_di->di_tv.vval.v_list, \
+ .li = val_di->di_tv.vval.v_list->lv_first, \
+ }, \
+ }, \
+ })); \
+ break; \
+ } \
+ case kMPMap: { \
+ if (val_di->di_tv.v_type != VAR_LIST) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ list_T *const val_list = val_di->di_tv.vval.v_list; \
+ if (val_list == NULL || val_list->lv_len == 0) { \
+ TYPVAL_ENCODE_CONV_EMPTY_DICT(); \
+ break; \
+ } \
+ for (const listitem_T *li = val_list->lv_first; li != NULL; \
+ li = li->li_next) { \
+ if (li->li_tv.v_type != VAR_LIST \
+ || li->li_tv.vval.v_list->lv_len != 2) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ } \
+ _TYPVAL_ENCODE_CHECK_SELF_REFERENCE(val_list, lv_copyID, \
+ kMPConvPairs); \
+ TYPVAL_ENCODE_CONV_DICT_START(val_list->lv_len); \
+ _mp_push(*mpstack, ((MPConvStackVal) { \
+ .type = kMPConvPairs, \
+ .data = { \
+ .l = { \
+ .list = val_list, \
+ .li = val_list->lv_first, \
+ }, \
+ }, \
+ })); \
+ break; \
+ } \
+ case kMPExt: { \
+ const list_T *val_list; \
+ varnumber_T type; \
+ if (val_di->di_tv.v_type != VAR_LIST \
+ || (val_list = val_di->di_tv.vval.v_list) == NULL \
+ || val_list->lv_len != 2 \
+ || (val_list->lv_first->li_tv.v_type != VAR_NUMBER) \
+ || (type = val_list->lv_first->li_tv.vval.v_number) > INT8_MAX \
+ || type < INT8_MIN \
+ || (val_list->lv_last->li_tv.v_type != VAR_LIST)) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ size_t len; \
+ char *buf; \
+ if (!encode_vim_list_to_buf(val_list->lv_last->li_tv.vval.v_list, \
+ &len, &buf)) { \
+ goto name##_convert_one_value_regular_dict; \
+ } \
+ TYPVAL_ENCODE_CONV_EXT_STRING(buf, len, type); \
+ xfree(buf); \
+ break; \
+ } \
+ } \
+ break; \
+ } \
+name##_convert_one_value_regular_dict: \
+ _TYPVAL_ENCODE_CHECK_SELF_REFERENCE(tv->vval.v_dict, dv_copyID, \
+ kMPConvDict); \
+ TYPVAL_ENCODE_CONV_DICT_START(tv->vval.v_dict->dv_hashtab.ht_used); \
+ _mp_push(*mpstack, ((MPConvStackVal) { \
+ .type = kMPConvDict, \
+ .data = { \
+ .d = { \
+ .dict = tv->vval.v_dict, \
+ .hi = tv->vval.v_dict->dv_hashtab.ht_array, \
+ .todo = tv->vval.v_dict->dv_hashtab.ht_used, \
+ }, \
+ }, \
+ })); \
+ break; \
+ } \
+ case VAR_UNKNOWN: { \
+ EMSG2(_(e_intern2), #name "_convert_one_value()"); \
+ return FAIL; \
+ } \
+ } \
+ return OK; \
+} \
+\
+scope int encode_vim_to_##name(firstargtype firstargname, typval_T *const tv, \
+ const char *const objname) \
+ FUNC_ATTR_WARN_UNUSED_RESULT \
+{ \
+ const int copyID = get_copyID(); \
+ MPConvStack mpstack; \
+ _mp_init(mpstack); \
+ if (name##_convert_one_value(firstargname, &mpstack, tv, copyID, objname) \
+ == FAIL) { \
+ goto encode_vim_to_##name##_error_ret; \
+ } \
+ while (_mp_size(mpstack)) { \
+ MPConvStackVal *cur_mpsv = &_mp_last(mpstack); \
+ typval_T *cur_tv = NULL; \
+ switch (cur_mpsv->type) { \
+ case kMPConvDict: { \
+ if (!cur_mpsv->data.d.todo) { \
+ (void) _mp_pop(mpstack); \
+ cur_mpsv->data.d.dict->dv_copyID = copyID - 1; \
+ TYPVAL_ENCODE_CONV_DICT_END(); \
+ continue; \
+ } else if (cur_mpsv->data.d.todo \
+ != cur_mpsv->data.d.dict->dv_hashtab.ht_used) { \
+ TYPVAL_ENCODE_CONV_DICT_BETWEEN_ITEMS(); \
+ } \
+ while (HASHITEM_EMPTY(cur_mpsv->data.d.hi)) { \
+ cur_mpsv->data.d.hi++; \
+ } \
+ dictitem_T *const di = HI2DI(cur_mpsv->data.d.hi); \
+ cur_mpsv->data.d.todo--; \
+ cur_mpsv->data.d.hi++; \
+ TYPVAL_ENCODE_CONV_STR_STRING(&di->di_key[0], \
+ strlen((char *) &di->di_key[0])); \
+ TYPVAL_ENCODE_CONV_DICT_AFTER_KEY(); \
+ cur_tv = &di->di_tv; \
+ break; \
+ } \
+ case kMPConvList: { \
+ if (cur_mpsv->data.l.li == NULL) { \
+ (void) _mp_pop(mpstack); \
+ cur_mpsv->data.l.list->lv_copyID = copyID - 1; \
+ TYPVAL_ENCODE_CONV_LIST_END(); \
+ continue; \
+ } else if (cur_mpsv->data.l.li != cur_mpsv->data.l.list->lv_first) { \
+ TYPVAL_ENCODE_CONV_LIST_BETWEEN_ITEMS(); \
+ } \
+ cur_tv = &cur_mpsv->data.l.li->li_tv; \
+ cur_mpsv->data.l.li = cur_mpsv->data.l.li->li_next; \
+ break; \
+ } \
+ case kMPConvPairs: { \
+ if (cur_mpsv->data.l.li == NULL) { \
+ (void) _mp_pop(mpstack); \
+ cur_mpsv->data.l.list->lv_copyID = copyID - 1; \
+ TYPVAL_ENCODE_CONV_DICT_END(); \
+ continue; \
+ } else if (cur_mpsv->data.l.li != cur_mpsv->data.l.list->lv_first) { \
+ TYPVAL_ENCODE_CONV_DICT_BETWEEN_ITEMS(); \
+ } \
+ const list_T *const kv_pair = cur_mpsv->data.l.li->li_tv.vval.v_list; \
+ TYPVAL_ENCODE_CONV_SPECIAL_DICT_KEY_CHECK( \
+ encode_vim_to_##name##_error_ret, kv_pair); \
+ if (name##_convert_one_value(firstargname, &mpstack, \
+ &kv_pair->lv_first->li_tv, copyID, \
+ objname) == FAIL) { \
+ goto encode_vim_to_##name##_error_ret; \
+ } \
+ TYPVAL_ENCODE_CONV_DICT_AFTER_KEY(); \
+ cur_tv = &kv_pair->lv_last->li_tv; \
+ cur_mpsv->data.l.li = cur_mpsv->data.l.li->li_next; \
+ break; \
+ } \
+ } \
+ assert(cur_tv != NULL); \
+ if (name##_convert_one_value(firstargname, &mpstack, cur_tv, copyID, \
+ objname) == FAIL) { \
+ goto encode_vim_to_##name##_error_ret; \
+ } \
+ } \
+ _mp_destroy(mpstack); \
+ return OK; \
+encode_vim_to_##name##_error_ret: \
+ _mp_destroy(mpstack); \
+ return FAIL; \
+}
+
+#endif // NVIM_EVAL_TYPVAL_ENCODE_H
diff --git a/src/nvim/event/defs.h b/src/nvim/event/defs.h
index b802866a3d..e5335d9f25 100644
--- a/src/nvim/event/defs.h
+++ b/src/nvim/event/defs.h
@@ -14,19 +14,19 @@ typedef struct message {
} Event;
typedef void(*event_scheduler)(Event event, void *data);
-#define VA_EVENT_INIT(event, p, h, a) \
- do { \
- assert(a <= EVENT_HANDLER_MAX_ARGC); \
- (event)->priority = p; \
- (event)->handler = h; \
- if (a) { \
- va_list args; \
- va_start(args, a); \
- for (int i = 0; i < a; i++) { \
- (event)->argv[i] = va_arg(args, void *); \
- } \
- va_end(args); \
- } \
+#define VA_EVENT_INIT(event, p, h, a) \
+ do { \
+ assert(a <= EVENT_HANDLER_MAX_ARGC); \
+ (event)->priority = p; \
+ (event)->handler = h; \
+ if (a) { \
+ va_list args; \
+ va_start(args, a); \
+ for (int i = 0; i < a; i++) { \
+ (event)->argv[i] = va_arg(args, void *); \
+ } \
+ va_end(args); \
+ } \
} while (0)
static inline Event event_create(int priority, argv_callback cb, int argc, ...)
diff --git a/src/nvim/event/libuv_process.c b/src/nvim/event/libuv_process.c
index 9ef3468284..a68badcc8f 100644
--- a/src/nvim/event/libuv_process.c
+++ b/src/nvim/event/libuv_process.c
@@ -25,7 +25,7 @@ bool libuv_process_spawn(LibuvProcess *uvproc)
uvproc->uvopts.flags |= UV_PROCESS_DETACHED;
}
uvproc->uvopts.exit_cb = exit_cb;
- uvproc->uvopts.cwd = NULL;
+ uvproc->uvopts.cwd = proc->cwd;
uvproc->uvopts.env = NULL;
uvproc->uvopts.stdio = uvproc->uvstdio;
uvproc->uvopts.stdio_count = 3;
diff --git a/src/nvim/event/loop.h b/src/nvim/event/loop.h
index 0c1fcb5ed9..407aa4245f 100644
--- a/src/nvim/event/loop.h
+++ b/src/nvim/event/loop.h
@@ -26,43 +26,43 @@ typedef struct loop {
int recursive;
} Loop;
-#define CREATE_EVENT(queue, handler, argc, ...) \
- do { \
- if (queue) { \
- queue_put((queue), (handler), argc, __VA_ARGS__); \
- } else { \
- void *argv[argc] = {__VA_ARGS__}; \
- (handler)(argv); \
- } \
+#define CREATE_EVENT(queue, handler, argc, ...) \
+ do { \
+ if (queue) { \
+ queue_put((queue), (handler), argc, __VA_ARGS__); \
+ } else { \
+ void *argv[argc] = { __VA_ARGS__ }; \
+ (handler)(argv); \
+ } \
} while (0)
// Poll for events until a condition or timeout
-#define LOOP_PROCESS_EVENTS_UNTIL(loop, queue, timeout, condition) \
- do { \
- int remaining = timeout; \
- uint64_t before = (remaining > 0) ? os_hrtime() : 0; \
- while (!(condition)) { \
- LOOP_PROCESS_EVENTS(loop, queue, remaining); \
- if (remaining == 0) { \
- break; \
- } else if (remaining > 0) { \
- uint64_t now = os_hrtime(); \
- remaining -= (int) ((now - before) / 1000000); \
- before = now; \
- if (remaining <= 0) { \
- break; \
- } \
- } \
- } \
+#define LOOP_PROCESS_EVENTS_UNTIL(loop, queue, timeout, condition) \
+ do { \
+ int remaining = timeout; \
+ uint64_t before = (remaining > 0) ? os_hrtime() : 0; \
+ while (!(condition)) { \
+ LOOP_PROCESS_EVENTS(loop, queue, remaining); \
+ if (remaining == 0) { \
+ break; \
+ } else if (remaining > 0) { \
+ uint64_t now = os_hrtime(); \
+ remaining -= (int) ((now - before) / 1000000); \
+ before = now; \
+ if (remaining <= 0) { \
+ break; \
+ } \
+ } \
+ } \
} while (0)
-#define LOOP_PROCESS_EVENTS(loop, queue, timeout) \
- do { \
- if (queue && !queue_empty(queue)) { \
- queue_process_events(queue); \
- } else { \
- loop_poll_events(loop, timeout); \
- } \
+#define LOOP_PROCESS_EVENTS(loop, queue, timeout) \
+ do { \
+ if (queue && !queue_empty(queue)) { \
+ queue_process_events(queue); \
+ } else { \
+ loop_poll_events(loop, timeout); \
+ } \
} while (0)
diff --git a/src/nvim/event/process.c b/src/nvim/event/process.c
index 9bb62891c7..1fffdbf957 100644
--- a/src/nvim/event/process.c
+++ b/src/nvim/event/process.c
@@ -9,7 +9,7 @@
#include "nvim/event/wstream.h"
#include "nvim/event/process.h"
#include "nvim/event/libuv_process.h"
-#include "nvim/event/pty_process.h"
+#include "nvim/os/pty_process.h"
#include "nvim/globals.h"
#include "nvim/log.h"
@@ -22,11 +22,11 @@
#define TERM_TIMEOUT 1000000000
#define KILL_TIMEOUT (TERM_TIMEOUT * 2)
-#define CLOSE_PROC_STREAM(proc, stream) \
- do { \
- if (proc->stream && !proc->stream->closed) { \
- stream_close(proc->stream, NULL); \
- } \
+#define CLOSE_PROC_STREAM(proc, stream) \
+ do { \
+ if (proc->stream && !proc->stream->closed) { \
+ stream_close(proc->stream, NULL); \
+ } \
} while (0)
static bool process_is_tearing_down = false;
diff --git a/src/nvim/event/process.h b/src/nvim/event/process.h
index e23c8ea60f..a4c6e7eeb2 100644
--- a/src/nvim/event/process.h
+++ b/src/nvim/event/process.h
@@ -21,6 +21,7 @@ struct process {
int pid, status, refcount;
// set to the hrtime of when process_stop was called for the process.
uint64_t stopped_time;
+ char *cwd;
char **argv;
Stream *in, *out, *err;
process_exit_cb cb;
@@ -40,6 +41,7 @@ static inline Process process_init(Loop *loop, ProcessType type, void *data)
.status = 0,
.refcount = 0,
.stopped_time = 0,
+ .cwd = NULL,
.argv = NULL,
.in = NULL,
.out = NULL,
diff --git a/src/nvim/ex_docmd.c b/src/nvim/ex_docmd.c
index 5096f17df0..6864e2b914 100644
--- a/src/nvim/ex_docmd.c
+++ b/src/nvim/ex_docmd.c
@@ -6866,6 +6866,9 @@ void post_chdir(CdScope scope)
curwin->w_localdir = vim_strsave(NameBuff);
}
break;
+ case kCdScopeInvalid:
+ // We should never get here
+ assert(false);
}
shorten_fnames(TRUE);
@@ -6993,7 +6996,7 @@ void do_sleep(long msec)
ui_flush(); // flush before waiting
for (long left = msec; !got_int && left > 0; left -= 1000L) {
int next = left > 1000l ? 1000 : (int)left;
- LOOP_PROCESS_EVENTS_UNTIL(&loop, loop.events, (int)next, got_int);
+ LOOP_PROCESS_EVENTS_UNTIL(&main_loop, main_loop.events, (int)next, got_int);
os_breakcheck();
}
}
@@ -7857,19 +7860,26 @@ static void ex_stopinsert(exarg_T *eap)
*/
void exec_normal_cmd(char_u *cmd, int remap, bool silent)
{
+ // Stuff the argument into the typeahead buffer.
+ ins_typebuf(cmd, remap, 0, true, silent);
+ exec_normal(false);
+}
+
+/// Execute normal_cmd() until there is no typeahead left.
+///
+/// @param was_typed whether or not something was typed
+void exec_normal(bool was_typed)
+{
oparg_T oa;
- /*
- * Stuff the argument into the typeahead buffer.
- * Execute normal_cmd() until there is no typeahead left.
- */
clear_oparg(&oa);
- finish_op = FALSE;
- ins_typebuf(cmd, remap, 0, TRUE, silent);
- while ((!stuff_empty() || (!typebuf_typed() && typebuf.tb_len > 0))
+ finish_op = false;
+ while ((!stuff_empty()
+ || ((was_typed || !typebuf_typed())
+ && typebuf.tb_len > 0))
&& !got_int) {
update_topline_cursor();
- normal_cmd(&oa, TRUE); /* execute a Normal mode cmd */
+ normal_cmd(&oa, true); // execute a Normal mode cmd
}
}
diff --git a/src/nvim/ex_docmd.h b/src/nvim/ex_docmd.h
index dbfc64e2f1..bafad20169 100644
--- a/src/nvim/ex_docmd.h
+++ b/src/nvim/ex_docmd.h
@@ -26,6 +26,7 @@
/// `getcwd()`. When using scopes as limits (e.g. in loops) don't use the scopes
/// directly, use `MIN_CD_SCOPE` and `MAX_CD_SCOPE` instead.
typedef enum {
+ kCdScopeInvalid = -1,
kCdScopeWindow, ///< Affects one window.
kCdScopeTab, ///< Affects one tab page.
kCdScopeGlobal, ///< Affects the entire instance of Neovim.
diff --git a/src/nvim/ex_getln.c b/src/nvim/ex_getln.c
index db21fddedb..65144dace8 100644
--- a/src/nvim/ex_getln.c
+++ b/src/nvim/ex_getln.c
@@ -358,7 +358,7 @@ static int command_line_execute(VimState *state, int key)
s->c = key;
if (s->c == K_EVENT) {
- queue_process_events(loop.events);
+ queue_process_events(main_loop.events);
redrawcmdline();
return 1;
}
diff --git a/src/nvim/file_search.c b/src/nvim/file_search.c
index beefc4238e..f7555c99fa 100644
--- a/src/nvim/file_search.c
+++ b/src/nvim/file_search.c
@@ -1378,7 +1378,7 @@ find_file_in_path_option (
/* copy file name into NameBuff, expanding environment variables */
save_char = ptr[len];
ptr[len] = NUL;
- expand_env(ptr, NameBuff, MAXPATHL);
+ expand_env_esc(ptr, NameBuff, MAXPATHL, false, true, NULL);
ptr[len] = save_char;
xfree(ff_file_to_find);
diff --git a/src/nvim/func_attr.h b/src/nvim/func_attr.h
index af8558d40d..ea017ab0c8 100644
--- a/src/nvim/func_attr.h
+++ b/src/nvim/func_attr.h
@@ -41,169 +41,174 @@
// $ gcc -E -dM - </dev/null
// $ echo | clang -dM -E -
+#ifndef NVIM_FUNC_ATTR_H
+#define NVIM_FUNC_ATTR_H
+#undef NVIM_FUNC_ATTR_H
+
#ifdef FUNC_ATTR_MALLOC
- #undef FUNC_ATTR_MALLOC
+# undef FUNC_ATTR_MALLOC
#endif
#ifdef FUNC_ATTR_ALLOC_SIZE
- #undef FUNC_ATTR_ALLOC_SIZE
+# undef FUNC_ATTR_ALLOC_SIZE
#endif
#ifdef FUNC_ATTR_ALLOC_SIZE_PROD
- #undef FUNC_ATTR_ALLOC_SIZE_PROD
+# undef FUNC_ATTR_ALLOC_SIZE_PROD
#endif
#ifdef FUNC_ATTR_ALLOC_ALIGN
- #undef FUNC_ATTR_ALLOC_ALIGN
+# undef FUNC_ATTR_ALLOC_ALIGN
#endif
#ifdef FUNC_ATTR_PURE
- #undef FUNC_ATTR_PURE
+# undef FUNC_ATTR_PURE
#endif
#ifdef FUNC_ATTR_CONST
- #undef FUNC_ATTR_CONST
+# undef FUNC_ATTR_CONST
#endif
#ifdef FUNC_ATTR_WARN_UNUSED_RESULT
- #undef FUNC_ATTR_WARN_UNUSED_RESULT
+# undef FUNC_ATTR_WARN_UNUSED_RESULT
#endif
#ifdef FUNC_ATTR_ALWAYS_INLINE
- #undef FUNC_ATTR_ALWAYS_INLINE
+# undef FUNC_ATTR_ALWAYS_INLINE
#endif
#ifdef FUNC_ATTR_UNUSED
- #undef FUNC_ATTR_UNUSED
+# undef FUNC_ATTR_UNUSED
#endif
#ifdef FUNC_ATTR_NONNULL_ALL
- #undef FUNC_ATTR_NONNULL_ALL
+# undef FUNC_ATTR_NONNULL_ALL
#endif
#ifdef FUNC_ATTR_NONNULL_ARG
- #undef FUNC_ATTR_NONNULL_ARG
+# undef FUNC_ATTR_NONNULL_ARG
#endif
#ifdef FUNC_ATTR_NONNULL_RET
- #undef FUNC_ATTR_NONNULL_RET
+# undef FUNC_ATTR_NONNULL_RET
#endif
#ifndef DID_REAL_ATTR
- #define DID_REAL_ATTR
- #ifdef __GNUC__
- // place defines for all gnulikes here, for now that's gcc, clang and
- // intel.
-
- // place these after the argument list of the function declaration
- // (not definition), like so:
- // void myfunc(void) REAL_FATTR_ALWAYS_INLINE;
- #define REAL_FATTR_MALLOC __attribute__((malloc))
- #define REAL_FATTR_ALLOC_ALIGN(x) __attribute__((alloc_align(x)))
- #define REAL_FATTR_PURE __attribute__ ((pure))
- #define REAL_FATTR_CONST __attribute__((const))
- #define REAL_FATTR_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
- #define REAL_FATTR_ALWAYS_INLINE __attribute__((always_inline))
- #define REAL_FATTR_UNUSED __attribute__((unused))
- #define REAL_FATTR_NONNULL_ALL __attribute__((nonnull))
- #define REAL_FATTR_NONNULL_ARG(...) __attribute__((nonnull(__VA_ARGS__)))
-
- #ifdef __clang__
- // clang only
- #elif defined(__INTEL_COMPILER)
- // intel only
- #else
- #define GCC_VERSION \
+# define DID_REAL_ATTR
+# ifdef __GNUC__
+// place defines for all gnulikes here, for now that's gcc, clang and
+// intel.
+
+// place these after the argument list of the function declaration
+// (not definition), like so:
+// void myfunc(void) REAL_FATTR_ALWAYS_INLINE;
+# define REAL_FATTR_MALLOC __attribute__((malloc))
+# define REAL_FATTR_ALLOC_ALIGN(x) __attribute__((alloc_align(x)))
+# define REAL_FATTR_PURE __attribute__ ((pure))
+# define REAL_FATTR_CONST __attribute__((const))
+# define REAL_FATTR_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+# define REAL_FATTR_ALWAYS_INLINE __attribute__((always_inline))
+# define REAL_FATTR_UNUSED __attribute__((unused))
+# define REAL_FATTR_NONNULL_ALL __attribute__((nonnull))
+# define REAL_FATTR_NONNULL_ARG(...) __attribute__((nonnull(__VA_ARGS__)))
+
+# ifdef __clang__
+// clang only
+# elif defined(__INTEL_COMPILER)
+// intel only
+# else
+# define GCC_VERSION \
(__GNUC__ * 10000 + \
- __GNUC_MINOR__ * 100 + \
- __GNUC_PATCHLEVEL__)
- // gcc only
- #define REAL_FATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x)))
- #define REAL_FATTR_ALLOC_SIZE_PROD(x,y) __attribute__((alloc_size(x,y)))
- #if GCC_VERSION >= 40900
- #define REAL_FATTR_NONNULL_RET __attribute__((returns_nonnull))
- #endif
- #endif
- #endif
-
- // define function attributes that haven't been defined for this specific
- // compiler.
-
- #ifndef REAL_FATTR_MALLOC
- #define REAL_FATTR_MALLOC
- #endif
-
- #ifndef REAL_FATTR_ALLOC_SIZE
- #define REAL_FATTR_ALLOC_SIZE(x)
- #endif
-
- #ifndef REAL_FATTR_ALLOC_SIZE_PROD
- #define REAL_FATTR_ALLOC_SIZE_PROD(x,y)
- #endif
-
- #ifndef REAL_FATTR_ALLOC_ALIGN
- #define REAL_FATTR_ALLOC_ALIGN(x)
- #endif
-
- #ifndef REAL_FATTR_PURE
- #define REAL_FATTR_PURE
- #endif
-
- #ifndef REAL_FATTR_CONST
- #define REAL_FATTR_CONST
- #endif
-
- #ifndef REAL_FATTR_WARN_UNUSED_RESULT
- #define REAL_FATTR_WARN_UNUSED_RESULT
- #endif
-
- #ifndef REAL_FATTR_ALWAYS_INLINE
- #define REAL_FATTR_ALWAYS_INLINE
- #endif
-
- #ifndef REAL_FATTR_UNUSED
- #define REAL_FATTR_UNUSED
- #endif
-
- #ifndef REAL_FATTR_NONNULL_ALL
- #define REAL_FATTR_NONNULL_ALL
- #endif
-
- #ifndef REAL_FATTR_NONNULL_ARG
- #define REAL_FATTR_NONNULL_ARG(...)
- #endif
-
- #ifndef REAL_FATTR_NONNULL_RET
- #define REAL_FATTR_NONNULL_RET
- #endif
+ __GNUC_MINOR__ * 100 + \
+ __GNUC_PATCHLEVEL__)
+// gcc only
+# define REAL_FATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x)))
+# define REAL_FATTR_ALLOC_SIZE_PROD(x, y) __attribute__((alloc_size(x, y)))
+# if GCC_VERSION >= 40900
+# define REAL_FATTR_NONNULL_RET __attribute__((returns_nonnull))
+# endif
+# endif
+# endif
+
+// define function attributes that haven't been defined for this specific
+// compiler.
+
+# ifndef REAL_FATTR_MALLOC
+# define REAL_FATTR_MALLOC
+# endif
+
+# ifndef REAL_FATTR_ALLOC_SIZE
+# define REAL_FATTR_ALLOC_SIZE(x)
+# endif
+
+# ifndef REAL_FATTR_ALLOC_SIZE_PROD
+# define REAL_FATTR_ALLOC_SIZE_PROD(x, y)
+# endif
+
+# ifndef REAL_FATTR_ALLOC_ALIGN
+# define REAL_FATTR_ALLOC_ALIGN(x)
+# endif
+
+# ifndef REAL_FATTR_PURE
+# define REAL_FATTR_PURE
+# endif
+
+# ifndef REAL_FATTR_CONST
+# define REAL_FATTR_CONST
+# endif
+
+# ifndef REAL_FATTR_WARN_UNUSED_RESULT
+# define REAL_FATTR_WARN_UNUSED_RESULT
+# endif
+
+# ifndef REAL_FATTR_ALWAYS_INLINE
+# define REAL_FATTR_ALWAYS_INLINE
+# endif
+
+# ifndef REAL_FATTR_UNUSED
+# define REAL_FATTR_UNUSED
+# endif
+
+# ifndef REAL_FATTR_NONNULL_ALL
+# define REAL_FATTR_NONNULL_ALL
+# endif
+
+# ifndef REAL_FATTR_NONNULL_ARG
+# define REAL_FATTR_NONNULL_ARG(...)
+# endif
+
+# ifndef REAL_FATTR_NONNULL_RET
+# define REAL_FATTR_NONNULL_RET
+# endif
#endif
#ifdef DEFINE_FUNC_ATTRIBUTES
- #define FUNC_API_ASYNC
- #define FUNC_API_NOEXPORT
- #define FUNC_ATTR_MALLOC REAL_FATTR_MALLOC
- #define FUNC_ATTR_ALLOC_SIZE(x) REAL_FATTR_ALLOC_SIZE(x)
- #define FUNC_ATTR_ALLOC_SIZE_PROD(x,y) REAL_FATTR_ALLOC_SIZE_PROD(x,y)
- #define FUNC_ATTR_ALLOC_ALIGN(x) REAL_FATTR_ALLOC_ALIGN(x)
- #define FUNC_ATTR_PURE REAL_FATTR_PURE
- #define FUNC_ATTR_CONST REAL_FATTR_CONST
- #define FUNC_ATTR_WARN_UNUSED_RESULT REAL_FATTR_WARN_UNUSED_RESULT
- #define FUNC_ATTR_ALWAYS_INLINE REAL_FATTR_ALWAYS_INLINE
- #define FUNC_ATTR_UNUSED REAL_FATTR_UNUSED
- #define FUNC_ATTR_NONNULL_ALL REAL_FATTR_NONNULL_ALL
- #define FUNC_ATTR_NONNULL_ARG(...) REAL_FATTR_NONNULL_ARG(__VA_ARGS__)
- #define FUNC_ATTR_NONNULL_RET REAL_FATTR_NONNULL_RET
+# define FUNC_API_ASYNC
+# define FUNC_API_NOEXPORT
+# define FUNC_ATTR_MALLOC REAL_FATTR_MALLOC
+# define FUNC_ATTR_ALLOC_SIZE(x) REAL_FATTR_ALLOC_SIZE(x)
+# define FUNC_ATTR_ALLOC_SIZE_PROD(x, y) REAL_FATTR_ALLOC_SIZE_PROD(x, y)
+# define FUNC_ATTR_ALLOC_ALIGN(x) REAL_FATTR_ALLOC_ALIGN(x)
+# define FUNC_ATTR_PURE REAL_FATTR_PURE
+# define FUNC_ATTR_CONST REAL_FATTR_CONST
+# define FUNC_ATTR_WARN_UNUSED_RESULT REAL_FATTR_WARN_UNUSED_RESULT
+# define FUNC_ATTR_ALWAYS_INLINE REAL_FATTR_ALWAYS_INLINE
+# define FUNC_ATTR_UNUSED REAL_FATTR_UNUSED
+# define FUNC_ATTR_NONNULL_ALL REAL_FATTR_NONNULL_ALL
+# define FUNC_ATTR_NONNULL_ARG(...) REAL_FATTR_NONNULL_ARG(__VA_ARGS__)
+# define FUNC_ATTR_NONNULL_RET REAL_FATTR_NONNULL_RET
#elif !defined(DO_NOT_DEFINE_EMPTY_ATTRIBUTES)
- #define FUNC_ATTR_MALLOC
- #define FUNC_ATTR_ALLOC_SIZE(x)
- #define FUNC_ATTR_ALLOC_SIZE_PROD(x,y)
- #define FUNC_ATTR_ALLOC_ALIGN(x)
- #define FUNC_ATTR_PURE
- #define FUNC_ATTR_CONST
- #define FUNC_ATTR_WARN_UNUSED_RESULT
- #define FUNC_ATTR_ALWAYS_INLINE
- #define FUNC_ATTR_UNUSED
- #define FUNC_ATTR_NONNULL_ALL
- #define FUNC_ATTR_NONNULL_ARG(...)
- #define FUNC_ATTR_NONNULL_RET
+# define FUNC_ATTR_MALLOC
+# define FUNC_ATTR_ALLOC_SIZE(x)
+# define FUNC_ATTR_ALLOC_SIZE_PROD(x, y)
+# define FUNC_ATTR_ALLOC_ALIGN(x)
+# define FUNC_ATTR_PURE
+# define FUNC_ATTR_CONST
+# define FUNC_ATTR_WARN_UNUSED_RESULT
+# define FUNC_ATTR_ALWAYS_INLINE
+# define FUNC_ATTR_UNUSED
+# define FUNC_ATTR_NONNULL_ALL
+# define FUNC_ATTR_NONNULL_ARG(...)
+# define FUNC_ATTR_NONNULL_RET
#endif
+#endif // NVIM_FUNC_ATTR_H
diff --git a/src/nvim/garray.h b/src/nvim/garray.h
index 642eaf54f0..5d7806bbfa 100644
--- a/src/nvim/garray.h
+++ b/src/nvim/garray.h
@@ -21,10 +21,10 @@ typedef struct growarray {
#define GA_EMPTY(ga_ptr) ((ga_ptr)->ga_len <= 0)
-#define GA_APPEND(item_type, gap, item) \
- do { \
- ga_grow(gap, 1); \
- ((item_type *)(gap)->ga_data)[(gap)->ga_len++] = (item); \
+#define GA_APPEND(item_type, gap, item) \
+ do { \
+ ga_grow(gap, 1); \
+ ((item_type *)(gap)->ga_data)[(gap)->ga_len++] = (item); \
} while (0)
#define GA_APPEND_VIA_PTR(item_type, gap) \
@@ -49,16 +49,16 @@ static inline void *ga_append_via_ptr(garray_T *gap, size_t item_size)
/// @param gap the garray to be freed
/// @param item_type type of the item in the garray
/// @param free_item_fn free function that takes (*item_type) as parameter
-#define GA_DEEP_CLEAR(gap, item_type, free_item_fn) \
- do { \
- garray_T *_gap = (gap); \
- if (_gap->ga_data != NULL) { \
- for (int i = 0; i < _gap->ga_len; i++) { \
- item_type *_item = &(((item_type *)_gap->ga_data)[i]); \
- free_item_fn(_item); \
- } \
- } \
- ga_clear(_gap); \
+#define GA_DEEP_CLEAR(gap, item_type, free_item_fn) \
+ do { \
+ garray_T *_gap = (gap); \
+ if (_gap->ga_data != NULL) { \
+ for (int i = 0; i < _gap->ga_len; i++) { \
+ item_type *_item = &(((item_type *)_gap->ga_data)[i]); \
+ free_item_fn(_item); \
+ } \
+ } \
+ ga_clear(_gap); \
} while (false)
#define FREE_PTR_PTR(ptr) xfree(*(ptr))
diff --git a/src/nvim/getchar.c b/src/nvim/getchar.c
index dbf0322d78..ae1857f318 100644
--- a/src/nvim/getchar.c
+++ b/src/nvim/getchar.c
@@ -1754,10 +1754,11 @@ static int vgetorpeek(int advance)
|| ((compl_cont_status & CONT_LOCAL)
&& (c1 == Ctrl_N || c1 == Ctrl_P)))
) {
- if (c1 == K_SPECIAL)
+ if (c1 == K_SPECIAL) {
nolmaplen = 2;
- else {
- LANGMAP_ADJUST(c1, (State & (CMDLINE | INSERT)) == 0);
+ } else {
+ LANGMAP_ADJUST(c1, (State & (CMDLINE | INSERT)) == 0
+ && get_real_state() != SELECTMODE);
nolmaplen = 0;
}
/* First try buffer-local mappings. */
diff --git a/src/nvim/globals.h b/src/nvim/globals.h
index 9cef1feb35..4ac687fa4f 100644
--- a/src/nvim/globals.h
+++ b/src/nvim/globals.h
@@ -1243,7 +1243,6 @@ EXTERN char *ignoredp;
// If a msgpack-rpc channel should be started over stdin/stdout
EXTERN bool embedded_mode INIT(= false);
-EXTERN Loop loop;
/// Used to track the status of external functions.
/// Currently only used for iconv().
diff --git a/src/nvim/lib/klist.h b/src/nvim/lib/klist.h
index 1280a927e8..7ee100ab8c 100644
--- a/src/nvim/lib/klist.h
+++ b/src/nvim/lib/klist.h
@@ -33,35 +33,37 @@
#include "nvim/func_attr.h"
-#define KMEMPOOL_INIT(name, kmptype_t, kmpfree_f) \
- typedef struct { \
- size_t cnt, n, max; \
- kmptype_t **buf; \
- } kmp_##name##_t; \
- static inline kmp_##name##_t *kmp_init_##name(void) { \
- return xcalloc(1, sizeof(kmp_##name##_t)); \
- } \
- static inline void kmp_destroy_##name(kmp_##name##_t *mp) \
- REAL_FATTR_UNUSED; \
- static inline void kmp_destroy_##name(kmp_##name##_t *mp) { \
- size_t k; \
- for (k = 0; k < mp->n; ++k) { \
- kmpfree_f(mp->buf[k]); xfree(mp->buf[k]); \
- } \
- xfree(mp->buf); xfree(mp); \
- } \
- static inline kmptype_t *kmp_alloc_##name(kmp_##name##_t *mp) { \
- ++mp->cnt; \
- if (mp->n == 0) return xcalloc(1, sizeof(kmptype_t)); \
- return mp->buf[--mp->n]; \
- } \
+#define KMEMPOOL_INIT(name, kmptype_t, kmpfree_f) \
+ typedef struct { \
+ size_t cnt, n, max; \
+ kmptype_t **buf; \
+ } kmp_##name##_t; \
+ static inline kmp_##name##_t *kmp_init_##name(void) { \
+ return xcalloc(1, sizeof(kmp_##name##_t)); \
+ } \
+ static inline void kmp_destroy_##name(kmp_##name##_t *mp) \
+ REAL_FATTR_UNUSED; \
+ static inline void kmp_destroy_##name(kmp_##name##_t *mp) { \
+ size_t k; \
+ for (k = 0; k < mp->n; k++) { \
+ kmpfree_f(mp->buf[k]); xfree(mp->buf[k]); \
+ } \
+ xfree(mp->buf); xfree(mp); \
+ } \
+ static inline kmptype_t *kmp_alloc_##name(kmp_##name##_t *mp) { \
+ mp->cnt++; \
+ if (mp->n == 0) { \
+ return xcalloc(1, sizeof(kmptype_t)); \
+ } \
+ return mp->buf[--mp->n]; \
+ } \
static inline void kmp_free_##name(kmp_##name##_t *mp, kmptype_t *p) { \
- --mp->cnt; \
- if (mp->n == mp->max) { \
- mp->max = mp->max? mp->max<<1 : 16; \
+ mp->cnt--; \
+ if (mp->n == mp->max) { \
+ mp->max = mp->max ? (mp->max << 1) : 16; \
mp->buf = xrealloc(mp->buf, sizeof(kmptype_t *) * mp->max); \
- } \
- mp->buf[mp->n++] = p; \
+ } \
+ mp->buf[mp->n++] = p; \
}
#define kmempool_t(name) kmp_##name##_t
@@ -70,54 +72,56 @@
#define kmp_alloc(name, mp) kmp_alloc_##name(mp)
#define kmp_free(name, mp, p) kmp_free_##name(mp, p)
-#define KLIST_INIT(name, kltype_t, kmpfree_t) \
- struct __kl1_##name { \
- kltype_t data; \
- struct __kl1_##name *next; \
- }; \
- typedef struct __kl1_##name kl1_##name; \
- KMEMPOOL_INIT(name, kl1_##name, kmpfree_t) \
- typedef struct { \
- kl1_##name *head, *tail; \
- kmp_##name##_t *mp; \
- size_t size; \
- } kl_##name##_t; \
- static inline kl_##name##_t *kl_init_##name(void) { \
- kl_##name##_t *kl = xcalloc(1, sizeof(kl_##name##_t)); \
- kl->mp = kmp_init(name); \
- kl->head = kl->tail = kmp_alloc(name, kl->mp); \
- kl->head->next = 0; \
- return kl; \
- } \
- static inline void kl_destroy_##name(kl_##name##_t *kl) \
- REAL_FATTR_UNUSED; \
- static inline void kl_destroy_##name(kl_##name##_t *kl) { \
- kl1_##name *p; \
- for (p = kl->head; p != kl->tail; p = p->next) \
- kmp_free(name, kl->mp, p); \
- kmp_free(name, kl->mp, p); \
- kmp_destroy(name, kl->mp); \
- xfree(kl); \
- } \
- static inline void kl_push_##name(kl_##name##_t *kl, kltype_t d) { \
- kl1_##name *q, *p = kmp_alloc(name, kl->mp); \
- q = kl->tail; p->next = 0; kl->tail->next = p; kl->tail = p; \
- ++kl->size; \
- q->data = d; \
- } \
- static inline kltype_t kl_shift_at_##name(kl_##name##_t *kl, \
- kl1_##name **n) { \
- assert((*n)->next); \
- kl1_##name *p; \
- --kl->size; \
- p = *n; \
- *n = (*n)->next; \
- if (p == kl->head) kl->head = *n; \
- kltype_t d = p->data; \
- kmp_free(name, kl->mp, p); \
- return d; \
- } \
-
+#define KLIST_INIT(name, kltype_t, kmpfree_t) \
+ struct __kl1_##name { \
+ kltype_t data; \
+ struct __kl1_##name *next; \
+ }; \
+ typedef struct __kl1_##name kl1_##name; \
+ KMEMPOOL_INIT(name, kl1_##name, kmpfree_t) \
+ typedef struct { \
+ kl1_##name *head, *tail; \
+ kmp_##name##_t *mp; \
+ size_t size; \
+ } kl_##name##_t; \
+ static inline kl_##name##_t *kl_init_##name(void) { \
+ kl_##name##_t *kl = xcalloc(1, sizeof(kl_##name##_t)); \
+ kl->mp = kmp_init(name); \
+ kl->head = kl->tail = kmp_alloc(name, kl->mp); \
+ kl->head->next = 0; \
+ return kl; \
+ } \
+ static inline void kl_destroy_##name(kl_##name##_t *kl) \
+ REAL_FATTR_UNUSED; \
+ static inline void kl_destroy_##name(kl_##name##_t *kl) { \
+ kl1_##name *p; \
+ for (p = kl->head; p != kl->tail; p = p->next) { \
+ kmp_free(name, kl->mp, p); \
+ } \
+ kmp_free(name, kl->mp, p); \
+ kmp_destroy(name, kl->mp); \
+ xfree(kl); \
+ } \
+ static inline void kl_push_##name(kl_##name##_t *kl, kltype_t d) { \
+ kl1_##name *q, *p = kmp_alloc(name, kl->mp); \
+ q = kl->tail; p->next = 0; kl->tail->next = p; kl->tail = p; \
+ kl->size++; \
+ q->data = d; \
+ } \
+ static inline kltype_t kl_shift_at_##name(kl_##name##_t *kl, \
+ kl1_##name **n) { \
+ assert((*n)->next); \
+ kl1_##name *p; \
+ kl->size--; \
+ p = *n; \
+ *n = (*n)->next; \
+ if (p == kl->head) { \
+ kl->head = *n; \
+ } \
+ kltype_t d = p->data; \
+ kmp_free(name, kl->mp, p); \
+ return d; \
+ }
#define kliter_t(name) kl1_##name
#define klist_t(name) kl_##name##_t
diff --git a/src/nvim/lib/kvec.h b/src/nvim/lib/kvec.h
index b41ef0cc9f..36c91c86b2 100644
--- a/src/nvim/lib/kvec.h
+++ b/src/nvim/lib/kvec.h
@@ -1,59 +1,61 @@
-/* The MIT License
-
- Copyright (c) 2008, by Attractive Chaos <attractor@live.co.uk>
-
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sublicense, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-*/
-
-/*
- An example:
-
-#include "kvec.h"
-int main() {
- kvec_t(int) array;
- kv_init(array);
- kv_push(int, array, 10); // append
- kv_a(int, array, 20) = 5; // dynamic
- kv_A(array, 20) = 4; // static
- kv_destroy(array);
- return 0;
-}
-*/
-
-/*
- 2008-09-22 (0.1.0):
+// The MIT License
+//
+// Copyright (c) 2008, by Attractive Chaos <attractor@live.co.uk>
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+// BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+// An example:
+//
+// #include "kvec.h"
+// int main() {
+// kvec_t(int) array = KV_INITIAL_VALUE;
+// kv_push(array, 10); // append
+// kv_a(array, 20) = 5; // dynamic
+// kv_A(array, 20) = 4; // static
+// kv_destroy(array);
+// return 0;
+// }
+
+#ifndef NVIM_LIB_KVEC_H
+#define NVIM_LIB_KVEC_H
- * The initial version.
+#include <stdlib.h>
+#include <string.h>
-*/
+#include "nvim/memory.h"
-#ifndef AC_KVEC_H
-#define AC_KVEC_H
+#define kv_roundup32(x) \
+ ((--(x)), \
+ ((x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16), \
+ (++(x)))
-#include <stdlib.h>
-#include "nvim/memory.h"
+#define KV_INITIAL_VALUE { .size = 0, .capacity = 0, .items = NULL }
-#define kv_roundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+#define kvec_t(type) \
+ struct { \
+ size_t size; \
+ size_t capacity; \
+ type *items; \
+ }
-#define kvec_t(type) struct { size_t size, capacity; type *items; }
#define kv_init(v) ((v).size = (v).capacity = 0, (v).items = 0)
#define kv_destroy(v) xfree((v).items)
#define kv_A(v, i) ((v).items[(i)])
@@ -62,31 +64,130 @@ int main() {
#define kv_max(v) ((v).capacity)
#define kv_last(v) kv_A(v, kv_size(v) - 1)
-#define kv_resize(type, v, s) ((v).capacity = (s), (v).items = (type*)xrealloc((v).items, sizeof(type) * (v).capacity))
-
-#define kv_copy(type, v1, v0) do { \
- if ((v1).capacity < (v0).size) kv_resize(type, v1, (v0).size); \
- (v1).size = (v0).size; \
- memcpy((v1).items, (v0).items, sizeof(type) * (v0).size); \
- } while (0) \
-
-#define kv_push(type, v, x) do { \
- if ((v).size == (v).capacity) { \
- (v).capacity = (v).capacity? (v).capacity<<1 : 8; \
- (v).items = (type*)xrealloc((v).items, sizeof(type) * (v).capacity); \
- } \
- (v).items[(v).size++] = (x); \
- } while (0)
-
-#define kv_pushp(type, v) ((((v).size == (v).capacity)? \
- ((v).capacity = ((v).capacity? (v).capacity<<1 : 8), \
- (v).items = (type*)xrealloc((v).items, sizeof(type) * (v).capacity), 0) \
- : 0), ((v).items + ((v).size++)))
-
-#define kv_a(type, v, i) (((v).capacity <= (size_t)(i)? \
- ((v).capacity = (v).size = (i) + 1, kv_roundup32((v).capacity), \
- (v).items = (type*)xrealloc((v).items, sizeof(type) * (v).capacity), 0) \
- : (v).size <= (size_t)(i)? (v).size = (i) + 1 \
- : 0), (v).items[(i)])
-
-#endif
+#define kv_resize(v, s) \
+ ((v).capacity = (s), \
+ (v).items = xrealloc((v).items, sizeof((v).items[0]) * (v).capacity))
+
+#define kv_resize_full(v) \
+ kv_resize(v, (v).capacity ? (v).capacity << 1 : 8)
+
+#define kv_copy(v1, v0) \
+ do { \
+ if ((v1).capacity < (v0).size) { \
+ kv_resize(v1, (v0).size); \
+ } \
+ (v1).size = (v0).size; \
+ memcpy((v1).items, (v0).items, sizeof((v1).items[0]) * (v0).size); \
+ } while (0) \
+
+#define kv_pushp(v) \
+ ((((v).size == (v).capacity) ? (kv_resize_full(v), 0) : 0), \
+ ((v).items + ((v).size++)))
+
+#define kv_push(v, x) \
+ (*kv_pushp(v) = (x))
+
+#define kv_a(v, i) \
+ (((v).capacity <= (size_t) (i) \
+ ? ((v).capacity = (v).size = (i) + 1, \
+ kv_roundup32((v).capacity), \
+ kv_resize((v), (v).capacity), 0) \
+ : ((v).size <= (size_t) (i) \
+ ? (v).size = (i) + 1 \
+ : 0)), \
+ (v).items[(i)])
+
+/// Type of a vector with a few first members allocated on stack
+///
+/// Is compatible with #kv_A, #kv_pop, #kv_size, #kv_max, #kv_last.
+/// Is not compatible with #kv_resize, #kv_resize_full, #kv_copy, #kv_push,
+/// #kv_pushp, #kv_a, #kv_destroy.
+///
+/// @param[in] type Type of vector elements.
+/// @param[in] init_size Number of the elements in the initial array.
+#define kvec_withinit_t(type, INIT_SIZE) \
+ struct { \
+ size_t size; \
+ size_t capacity; \
+ type *items; \
+ type init_array[INIT_SIZE]; \
+ }
+
+/// Initialize vector with preallocated array
+///
+/// @param[out] v Vector to initialize.
+#define kvi_init(v) \
+ ((v).capacity = ARRAY_SIZE((v).init_array), \
+ (v).size = 0, \
+ (v).items = (v).init_array)
+
+/// Move data to a new destination and free source
+static inline void *_memcpy_free(void *const restrict dest,
+ void *const restrict src,
+ const size_t size)
+ FUNC_ATTR_NONNULL_ALL FUNC_ATTR_NONNULL_RET FUNC_ATTR_ALWAYS_INLINE
+{
+ memcpy(dest, src, size);
+ xfree(src);
+ return dest;
+}
+
+/// Resize vector with preallocated array
+///
+/// @param[out] v Vector to resize.
+/// @param[in] s New size.
+#define kvi_resize(v, s) \
+ ((v).capacity = ((s) > ARRAY_SIZE((v).init_array) \
+ ? (s) \
+ : ARRAY_SIZE((v).init_array)), \
+ (v).items = ((v).capacity == ARRAY_SIZE((v).init_array) \
+ ? ((v).items == (v).init_array \
+ ? (v).items \
+ : _memcpy_free((v).init_array, (v).items, \
+ (v).size * sizeof((v).items[0]))) \
+ : ((v).items == (v).init_array \
+ ? memcpy(xmalloc((v).capacity * sizeof((v).items[0])), \
+ (v).items, \
+ (v).size * sizeof((v).items[0])) \
+ : xrealloc((v).items, \
+ (v).capacity * sizeof((v).items[0])))))
+
+/// Resize vector with preallocated array when it is full
+///
+/// @param[out] v Vector to resize.
+#define kvi_resize_full(v) \
+ /* ARRAY_SIZE((v).init_array) is the minimal capacity of this vector. */ \
+ /* Thus when vector is full capacity may not be zero and it is safe */ \
+ /* not to bother with checking whether (v).capacity is 0. But now */ \
+ /* capacity is not guaranteed to have size that is a power of 2. */ \
+ kvi_resize(v, ((v).capacity == ARRAY_SIZE((v).init_array) \
+ ? ((v).capacity++, kv_roundup32((v).capacity)) \
+ : (v).capacity << 1))
+
+/// Get location where to store new element to a vector with preallocated array
+///
+/// @param[in,out] v Vector to push to.
+///
+/// @return Pointer to the place where new value should be stored.
+#define kvi_pushp(v) \
+ ((((v).size == (v).capacity) ? (kvi_resize_full(v), 0) : 0), \
+ ((v).items + ((v).size++)))
+
+/// Push value to a vector with preallocated array
+///
+/// @param[out] v Vector to push to.
+/// @param[in] x Value to push.
+#define kvi_push(v, x) \
+ (*kvi_pushp(v) = (x))
+
+/// Free array of elements of a vector with preallocated array if needed
+///
+/// @param[out] v Vector to free.
+#define kvi_destroy(v) \
+ do { \
+ if ((v).items != (v).init_array) { \
+ xfree((v).items); \
+ } \
+ } while (0)
+
+#endif // NVIM_LIB_KVEC_H
diff --git a/src/nvim/lib/queue.h b/src/nvim/lib/queue.h
index fe02b454ea..9fcedf298f 100644
--- a/src/nvim/lib/queue.h
+++ b/src/nvim/lib/queue.h
@@ -1,92 +1,94 @@
-/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
+// Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#ifndef QUEUE_H_
-#define QUEUE_H_
+#ifndef NVIM_LIB_QUEUE_H
+#define NVIM_LIB_QUEUE_H
-typedef void *QUEUE[2];
+#include <stddef.h>
-/* Private macros. */
-#define QUEUE_NEXT(q) (*(QUEUE **) &((*(q))[0]))
-#define QUEUE_PREV(q) (*(QUEUE **) &((*(q))[1]))
-#define QUEUE_PREV_NEXT(q) (QUEUE_NEXT(QUEUE_PREV(q)))
-#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q)))
+#include "nvim/func_attr.h"
-/* Public macros. */
-#define QUEUE_DATA(ptr, type, field) \
- ((type *) ((char *) (ptr) - ((char *) &((type *) 0)->field)))
+typedef struct _queue {
+ struct _queue *next;
+ struct _queue *prev;
+} QUEUE;
-#define QUEUE_FOREACH(q, h) \
- for ((q) = QUEUE_NEXT(h); (q) != (h); (q) = QUEUE_NEXT(q))
+// Public macros.
+#define QUEUE_DATA(ptr, type, field) \
+ ((type *)((char *)(ptr) - offsetof(type, field)))
-#define QUEUE_EMPTY(q) \
- ((const QUEUE *) (q) == (const QUEUE *) QUEUE_NEXT(q))
+#define QUEUE_FOREACH(q, h) \
+ for ( /* NOLINT(readability/braces) */ \
+ (q) = (h)->next; (q) != (h); (q) = (q)->next)
-#define QUEUE_HEAD(q) \
- (QUEUE_NEXT(q))
+// ffi.cdef is unable to swallow `bool` in place of `int` here.
+static inline int QUEUE_EMPTY(const QUEUE *const q)
+ FUNC_ATTR_ALWAYS_INLINE FUNC_ATTR_PURE FUNC_ATTR_WARN_UNUSED_RESULT
+{
+ return q == q->next;
+}
-#define QUEUE_INIT(q) \
- do { \
- QUEUE_NEXT(q) = (q); \
- QUEUE_PREV(q) = (q); \
- } \
- while (0)
+#define QUEUE_HEAD(q) (q)->next
-#define QUEUE_ADD(h, n) \
- do { \
- QUEUE_PREV_NEXT(h) = QUEUE_NEXT(n); \
- QUEUE_NEXT_PREV(n) = QUEUE_PREV(h); \
- QUEUE_PREV(h) = QUEUE_PREV(n); \
- QUEUE_PREV_NEXT(h) = (h); \
- } \
- while (0)
+static inline void QUEUE_INIT(QUEUE *const q) FUNC_ATTR_ALWAYS_INLINE
+{
+ q->next = q;
+ q->prev = q;
+}
-#define QUEUE_SPLIT(h, q, n) \
- do { \
- QUEUE_PREV(n) = QUEUE_PREV(h); \
- QUEUE_PREV_NEXT(n) = (n); \
- QUEUE_NEXT(n) = (q); \
- QUEUE_PREV(h) = QUEUE_PREV(q); \
- QUEUE_PREV_NEXT(h) = (h); \
- QUEUE_PREV(q) = (n); \
- } \
- while (0)
+static inline void QUEUE_ADD(QUEUE *const h, QUEUE *const n)
+ FUNC_ATTR_ALWAYS_INLINE
+{
+ h->prev->next = n->next;
+ n->next->prev = h->prev;
+ h->prev = n->prev;
+ h->prev->next = h;
+}
-#define QUEUE_INSERT_HEAD(h, q) \
- do { \
- QUEUE_NEXT(q) = QUEUE_NEXT(h); \
- QUEUE_PREV(q) = (h); \
- QUEUE_NEXT_PREV(q) = (q); \
- QUEUE_NEXT(h) = (q); \
- } \
- while (0)
+static inline void QUEUE_SPLIT(QUEUE *const h, QUEUE *const q, QUEUE *const n)
+ FUNC_ATTR_ALWAYS_INLINE
+{
+ n->prev = h->prev;
+ n->prev->next = n;
+ n->next = q;
+ h->prev = q->prev;
+ h->prev->next = h;
+ q->prev = n;
+}
-#define QUEUE_INSERT_TAIL(h, q) \
- do { \
- QUEUE_NEXT(q) = (h); \
- QUEUE_PREV(q) = QUEUE_PREV(h); \
- QUEUE_PREV_NEXT(q) = (q); \
- QUEUE_PREV(h) = (q); \
- } \
- while (0)
+static inline void QUEUE_INSERT_HEAD(QUEUE *const h, QUEUE *const q)
+ FUNC_ATTR_ALWAYS_INLINE
+{
+ q->next = h->next;
+ q->prev = h;
+ q->next->prev = q;
+ h->next = q;
+}
-#define QUEUE_REMOVE(q) \
- do { \
- QUEUE_PREV_NEXT(q) = QUEUE_NEXT(q); \
- QUEUE_NEXT_PREV(q) = QUEUE_PREV(q); \
- } \
- while (0)
+static inline void QUEUE_INSERT_TAIL(QUEUE *const h, QUEUE *const q)
+ FUNC_ATTR_ALWAYS_INLINE
+{
+ q->next = h;
+ q->prev = h->prev;
+ q->prev->next = q;
+ h->prev = q;
+}
-#endif /* QUEUE_H_ */
+static inline void QUEUE_REMOVE(QUEUE *const q) FUNC_ATTR_ALWAYS_INLINE
+{
+ q->prev->next = q->next;
+ q->next->prev = q->prev;
+}
+
+#endif // NVIM_LIB_QUEUE_H
diff --git a/src/nvim/lib/ringbuf.h b/src/nvim/lib/ringbuf.h
index cb71500bb7..12b75ec65a 100644
--- a/src/nvim/lib/ringbuf.h
+++ b/src/nvim/lib/ringbuf.h
@@ -65,12 +65,12 @@
/// @param TypeName Ring buffer type name. Actual type name will be
/// `{TypeName}RingBuffer`.
/// @param RBType Type of the single ring buffer element.
-#define RINGBUF_TYPEDEF(TypeName, RBType) \
-typedef struct { \
- RBType *buf; \
- RBType *next; \
- RBType *first; \
- RBType *buf_end; \
+#define RINGBUF_TYPEDEF(TypeName, RBType) \
+typedef struct { \
+ RBType *buf; \
+ RBType *next; \
+ RBType *first; \
+ RBType *buf_end; \
} TypeName##RingBuffer;
/// Initialize a new ring buffer
@@ -84,198 +84,196 @@ typedef struct { \
/// a macros like `#define RBFREE(item)` (to skip freeing).
///
/// Intended function signature: `void *rbfree(RBType *)`;
-#define RINGBUF_INIT(TypeName, funcprefix, RBType, rbfree) \
- \
- \
-static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
- REAL_FATTR_WARN_UNUSED_RESULT; \
-static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
-{ \
- assert(size != 0); \
- RBType *buf = xmalloc(size * sizeof(RBType)); \
- return (TypeName##RingBuffer) { \
- .buf = buf, \
- .next = buf, \
- .first = NULL, \
- .buf_end = buf + size - 1, \
- }; \
-} \
- \
-static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
- REAL_FATTR_UNUSED; \
-static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
-{ \
- if (rb == NULL) { \
- return; \
- } \
- RINGBUF_FORALL(rb, RBType, rbitem) { \
- rbfree(rbitem); \
- } \
- xfree(rb->buf); \
-} \
- \
-static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
- REAL_FATTR_UNUSED; \
-static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
-{ \
- xfree(rb->buf); \
-} \
- \
-static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
- RBType item) \
- REAL_FATTR_NONNULL_ARG(1); \
-static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
- RBType item) \
-{ \
- if (rb->next == rb->first) { \
- rbfree(rb->first); \
- rb->first = _RINGBUF_NEXT(rb, rb->first); \
- } else if (rb->first == NULL) { \
- rb->first = rb->next; \
- } \
- *rb->next = item; \
- rb->next = _RINGBUF_NEXT(rb, rb->next); \
-} \
- \
-static inline ptrdiff_t funcprefix##_rb_find_idx( \
- const TypeName##RingBuffer *const rb, const RBType *const item_p) \
- REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
-static inline ptrdiff_t funcprefix##_rb_find_idx( \
- const TypeName##RingBuffer *const rb, const RBType *const item_p) \
-{ \
- assert(rb->buf <= item_p); \
- assert(rb->buf_end >= item_p); \
- if (rb->first == NULL) { \
- return -1; \
- } else if (item_p >= rb->first) { \
- return item_p - rb->first; \
- } else { \
- return item_p - rb->buf + rb->buf_end - rb->first + 1; \
- } \
-} \
- \
-static inline size_t funcprefix##_rb_size( \
- const TypeName##RingBuffer *const rb) \
- REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
-static inline size_t funcprefix##_rb_size( \
- const TypeName##RingBuffer *const rb) \
-{ \
- return (size_t) (rb->buf_end - rb->buf) + 1; \
-} \
- \
-static inline size_t funcprefix##_rb_length( \
- const TypeName##RingBuffer *const rb) \
- REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
-static inline size_t funcprefix##_rb_length( \
- const TypeName##RingBuffer *const rb) \
-{ \
- return _RINGBUF_LENGTH(rb); \
-} \
- \
-static inline RBType *funcprefix##_rb_idx_p( \
- const TypeName##RingBuffer *const rb, const size_t idx) \
- REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
-static inline RBType *funcprefix##_rb_idx_p( \
- const TypeName##RingBuffer *const rb, const size_t idx) \
-{ \
- assert(idx <= funcprefix##_rb_size(rb)); \
- assert(idx <= funcprefix##_rb_length(rb)); \
- if (rb->first + idx > rb->buf_end) { \
- return rb->buf + ((rb->first + idx) - (rb->buf_end + 1)); \
- } else { \
- return rb->first + idx; \
- } \
-} \
- \
+#define RINGBUF_INIT(TypeName, funcprefix, RBType, rbfree) \
+static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
+ REAL_FATTR_WARN_UNUSED_RESULT; \
+static inline TypeName##RingBuffer funcprefix##_rb_new(const size_t size) \
+{ \
+ assert(size != 0); \
+ RBType *buf = xmalloc(size * sizeof(RBType)); \
+ return (TypeName##RingBuffer) { \
+ .buf = buf, \
+ .next = buf, \
+ .first = NULL, \
+ .buf_end = buf + size - 1, \
+ }; \
+} \
+\
+static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
+ REAL_FATTR_UNUSED; \
+static inline void funcprefix##_rb_free(TypeName##RingBuffer *const rb) \
+{ \
+ if (rb == NULL) { \
+ return; \
+ } \
+ RINGBUF_FORALL(rb, RBType, rbitem) { \
+ rbfree(rbitem); \
+ } \
+ xfree(rb->buf); \
+} \
+\
+static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
+ REAL_FATTR_UNUSED; \
+static inline void funcprefix##_rb_dealloc(TypeName##RingBuffer *const rb) \
+{ \
+ xfree(rb->buf); \
+} \
+\
+static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
+ RBType item) \
+ REAL_FATTR_NONNULL_ARG(1); \
+static inline void funcprefix##_rb_push(TypeName##RingBuffer *const rb, \
+ RBType item) \
+{ \
+ if (rb->next == rb->first) { \
+ rbfree(rb->first); \
+ rb->first = _RINGBUF_NEXT(rb, rb->first); \
+ } else if (rb->first == NULL) { \
+ rb->first = rb->next; \
+ } \
+ *rb->next = item; \
+ rb->next = _RINGBUF_NEXT(rb, rb->next); \
+} \
+\
+static inline ptrdiff_t funcprefix##_rb_find_idx( \
+ const TypeName##RingBuffer *const rb, const RBType *const item_p) \
+ REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
+static inline ptrdiff_t funcprefix##_rb_find_idx( \
+ const TypeName##RingBuffer *const rb, const RBType *const item_p) \
+{ \
+ assert(rb->buf <= item_p); \
+ assert(rb->buf_end >= item_p); \
+ if (rb->first == NULL) { \
+ return -1; \
+ } else if (item_p >= rb->first) { \
+ return item_p - rb->first; \
+ } else { \
+ return item_p - rb->buf + rb->buf_end - rb->first + 1; \
+ } \
+} \
+\
+static inline size_t funcprefix##_rb_size( \
+ const TypeName##RingBuffer *const rb) \
+ REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
+static inline size_t funcprefix##_rb_size( \
+ const TypeName##RingBuffer *const rb) \
+{ \
+ return (size_t) (rb->buf_end - rb->buf) + 1; \
+} \
+\
+static inline size_t funcprefix##_rb_length( \
+ const TypeName##RingBuffer *const rb) \
+ REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
+static inline size_t funcprefix##_rb_length( \
+ const TypeName##RingBuffer *const rb) \
+{ \
+ return _RINGBUF_LENGTH(rb); \
+} \
+\
+static inline RBType *funcprefix##_rb_idx_p( \
+ const TypeName##RingBuffer *const rb, const size_t idx) \
+ REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE; \
+static inline RBType *funcprefix##_rb_idx_p( \
+ const TypeName##RingBuffer *const rb, const size_t idx) \
+{ \
+ assert(idx <= funcprefix##_rb_size(rb)); \
+ assert(idx <= funcprefix##_rb_length(rb)); \
+ if (rb->first + idx > rb->buf_end) { \
+ return rb->buf + ((rb->first + idx) - (rb->buf_end + 1)); \
+ } else { \
+ return rb->first + idx; \
+ } \
+} \
+\
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
- const size_t idx) \
- REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
+ const size_t idx) \
+ REAL_FATTR_NONNULL_ALL REAL_FATTR_PURE REAL_FATTR_UNUSED; \
static inline RBType funcprefix##_rb_idx(const TypeName##RingBuffer *const rb, \
- const size_t idx) \
-{ \
- return *funcprefix##_rb_idx_p(rb, idx); \
-} \
- \
-static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
- const size_t idx, \
- RBType item) \
- REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
-static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
- const size_t idx, \
- RBType item) \
-{ \
- assert(idx <= funcprefix##_rb_size(rb)); \
- assert(idx <= funcprefix##_rb_length(rb)); \
- const size_t length = funcprefix##_rb_length(rb); \
- if (idx == length) { \
- funcprefix##_rb_push(rb, item); \
- return; \
- } \
- RBType *const insertpos = funcprefix##_rb_idx_p(rb, idx); \
- if (insertpos == rb->next) { \
- funcprefix##_rb_push(rb, item); \
- return; \
- } \
- if (length == funcprefix##_rb_size(rb)) { \
- rbfree(rb->first); \
- } \
- if (insertpos < rb->next) { \
- memmove(insertpos + 1, insertpos, \
- (size_t) ((uintptr_t) rb->next - (uintptr_t) insertpos)); \
- } else { \
- assert(insertpos > rb->first); \
- assert(rb->next <= rb->first); \
- memmove(rb->buf + 1, rb->buf, \
- (size_t) ((uintptr_t) rb->next - (uintptr_t) rb->buf)); \
- *rb->buf = *rb->buf_end; \
- memmove(insertpos + 1, insertpos, \
+ const size_t idx) \
+{ \
+ return *funcprefix##_rb_idx_p(rb, idx); \
+} \
+\
+static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
+ const size_t idx, \
+ RBType item) \
+ REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
+static inline void funcprefix##_rb_insert(TypeName##RingBuffer *const rb, \
+ const size_t idx, \
+ RBType item) \
+{ \
+ assert(idx <= funcprefix##_rb_size(rb)); \
+ assert(idx <= funcprefix##_rb_length(rb)); \
+ const size_t length = funcprefix##_rb_length(rb); \
+ if (idx == length) { \
+ funcprefix##_rb_push(rb, item); \
+ return; \
+ } \
+ RBType *const insertpos = funcprefix##_rb_idx_p(rb, idx); \
+ if (insertpos == rb->next) { \
+ funcprefix##_rb_push(rb, item); \
+ return; \
+ } \
+ if (length == funcprefix##_rb_size(rb)) { \
+ rbfree(rb->first); \
+ } \
+ if (insertpos < rb->next) { \
+ memmove(insertpos + 1, insertpos, \
+ (size_t) ((uintptr_t) rb->next - (uintptr_t) insertpos)); \
+ } else { \
+ assert(insertpos > rb->first); \
+ assert(rb->next <= rb->first); \
+ memmove(rb->buf + 1, rb->buf, \
+ (size_t) ((uintptr_t) rb->next - (uintptr_t) rb->buf)); \
+ *rb->buf = *rb->buf_end; \
+ memmove(insertpos + 1, insertpos, \
(size_t) ((uintptr_t) (rb->buf_end + 1) - (uintptr_t) insertpos)); \
- } \
- *insertpos = item; \
- if (length == funcprefix##_rb_size(rb)) { \
- rb->first = _RINGBUF_NEXT(rb, rb->first); \
- } \
- rb->next = _RINGBUF_NEXT(rb, rb->next); \
-} \
- \
-static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
- const size_t idx) \
- REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
-static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
- const size_t idx) \
-{ \
- assert(idx < funcprefix##_rb_size(rb)); \
- assert(idx < funcprefix##_rb_length(rb)); \
- RBType *const rmpos = funcprefix##_rb_idx_p(rb, idx); \
- rbfree(rmpos); \
- if (rmpos == rb->next - 1) { \
- rb->next--; \
- if (rb->first == rb->next) { \
- rb->first = NULL; \
- rb->next = rb->buf; \
- } \
- } else if (rmpos == rb->first) { \
- rb->first = _RINGBUF_NEXT(rb, rb->first); \
- if (rb->first == rb->next) { \
- rb->first = NULL; \
- rb->next = rb->buf; \
- } \
- } else if (rb->first < rb->next || rb->next == rb->buf) { \
- assert(rmpos > rb->first); \
- assert(rmpos <= _RINGBUF_PREV(rb, rb->next)); \
- memmove(rb->first + 1, rb->first, \
- (size_t) ((uintptr_t) rmpos - (uintptr_t) rb->first)); \
- rb->first = _RINGBUF_NEXT(rb, rb->first); \
- } else if (rmpos < rb->next) { \
- memmove(rmpos, rmpos + 1, \
- (size_t) ((uintptr_t) rb->next - (uintptr_t) rmpos)); \
- rb->next = _RINGBUF_PREV(rb, rb->next); \
- } else { \
- assert(rb->first < rb->buf_end); \
- memmove(rb->first + 1, rb->first, \
- (size_t) ((uintptr_t) rmpos - (uintptr_t) rb->first)); \
- rb->first = _RINGBUF_NEXT(rb, rb->first); \
- } \
+ } \
+ *insertpos = item; \
+ if (length == funcprefix##_rb_size(rb)) { \
+ rb->first = _RINGBUF_NEXT(rb, rb->first); \
+ } \
+ rb->next = _RINGBUF_NEXT(rb, rb->next); \
+} \
+\
+static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
+ const size_t idx) \
+ REAL_FATTR_NONNULL_ARG(1) REAL_FATTR_UNUSED; \
+static inline void funcprefix##_rb_remove(TypeName##RingBuffer *const rb, \
+ const size_t idx) \
+{ \
+ assert(idx < funcprefix##_rb_size(rb)); \
+ assert(idx < funcprefix##_rb_length(rb)); \
+ RBType *const rmpos = funcprefix##_rb_idx_p(rb, idx); \
+ rbfree(rmpos); \
+ if (rmpos == rb->next - 1) { \
+ rb->next--; \
+ if (rb->first == rb->next) { \
+ rb->first = NULL; \
+ rb->next = rb->buf; \
+ } \
+ } else if (rmpos == rb->first) { \
+ rb->first = _RINGBUF_NEXT(rb, rb->first); \
+ if (rb->first == rb->next) { \
+ rb->first = NULL; \
+ rb->next = rb->buf; \
+ } \
+ } else if (rb->first < rb->next || rb->next == rb->buf) { \
+ assert(rmpos > rb->first); \
+ assert(rmpos <= _RINGBUF_PREV(rb, rb->next)); \
+ memmove(rb->first + 1, rb->first, \
+ (size_t) ((uintptr_t) rmpos - (uintptr_t) rb->first)); \
+ rb->first = _RINGBUF_NEXT(rb, rb->first); \
+ } else if (rmpos < rb->next) { \
+ memmove(rmpos, rmpos + 1, \
+ (size_t) ((uintptr_t) rb->next - (uintptr_t) rmpos)); \
+ rb->next = _RINGBUF_PREV(rb, rb->next); \
+ } else { \
+ assert(rb->first < rb->buf_end); \
+ memmove(rb->first + 1, rb->first, \
+ (size_t) ((uintptr_t) rmpos - (uintptr_t) rb->first)); \
+ rb->first = _RINGBUF_NEXT(rb, rb->first); \
+ } \
}
#endif // NVIM_LIB_RINGBUF_H
diff --git a/src/nvim/log.c b/src/nvim/log.c
index 773d497881..c31af6b287 100644
--- a/src/nvim/log.c
+++ b/src/nvim/log.c
@@ -10,7 +10,14 @@
#include "nvim/os/os.h"
#include "nvim/os/time.h"
-#define USR_LOG_FILE "$HOME" _PATHSEPSTR ".nvimlog"
+/// First location of the log file used by log_path_init()
+#define USR_LOG_FILE "$NVIM_LOG_FILE"
+
+/// Fall back location of the log file used by log_path_init()
+#define USR_LOG_FILE_2 "$HOME" _PATHSEPSTR ".nvimlog"
+
+/// Cached location of the log file set by log_path_init()
+static char expanded_log_file_path[MAXPATHL + 1] = { 0 };
static uv_mutex_t mutex;
@@ -18,6 +25,35 @@ static uv_mutex_t mutex;
# include "log.c.generated.h"
#endif
+/// Initialize path to log file
+///
+/// Tries to use #USR_LOG_FILE, then falls back #USR_LOG_FILE_2. Path to log
+/// file is cached, so only the first call has effect, unless first call was not
+/// successful. To make initialization not succeed either a bug in expand_env()
+/// is needed or both `$NVIM_LOG_FILE` and `$HOME` environment variables
+/// undefined.
+///
+/// @return true if path was initialized, false otherwise.
+static bool log_path_init(void)
+{
+ if (expanded_log_file_path[0]) {
+ return true;
+ }
+ expand_env((char_u *)USR_LOG_FILE, (char_u *)expanded_log_file_path,
+ sizeof(expanded_log_file_path) - 1);
+ // if the log file path expansion failed then fall back to stderr
+ if (strcmp(USR_LOG_FILE, expanded_log_file_path) == 0) {
+ memset(expanded_log_file_path, 0, sizeof(expanded_log_file_path));
+ expand_env((char_u *)USR_LOG_FILE_2, (char_u *)expanded_log_file_path,
+ sizeof(expanded_log_file_path) - 1);
+ if (strcmp(USR_LOG_FILE_2, expanded_log_file_path) == 0) {
+ memset(expanded_log_file_path, 0, sizeof(expanded_log_file_path));
+ return false;
+ }
+ }
+ return true;
+}
+
void log_init(void)
{
uv_mutex_init(&mutex);
@@ -73,30 +109,17 @@ FILE *open_log_file(void)
return stderr;
}
- // expand USR_LOG_FILE and open the file
- FILE *log_file;
+ // expand USR_LOG_FILE if needed and open the file
+ FILE *log_file = NULL;
opening_log_file = true;
- {
- static char expanded_log_file_path[MAXPATHL + 1];
-
- expand_env((char_u *)USR_LOG_FILE, (char_u *)expanded_log_file_path,
- MAXPATHL);
- // if the log file path expansion failed then fall back to stderr
- if (strcmp(USR_LOG_FILE, expanded_log_file_path) == 0) {
- goto open_log_file_error;
- }
-
+ if (log_path_init()) {
log_file = fopen(expanded_log_file_path, "a");
- if (log_file == NULL) {
- goto open_log_file_error;
- }
}
opening_log_file = false;
- return log_file;
-
-open_log_file_error:
- opening_log_file = false;
+ if (log_file != NULL) {
+ return log_file;
+ }
do_log_to_file(stderr, ERROR_LOG_LEVEL, __func__, __LINE__, true,
"Couldn't open USR_LOG_FILE, logging to stderr! This may be "
diff --git a/src/nvim/main.c b/src/nvim/main.c
index 71a972e8f6..ba545c0815 100644
--- a/src/nvim/main.c
+++ b/src/nvim/main.c
@@ -120,6 +120,8 @@ typedef struct {
# include "main.c.generated.h"
#endif
+Loop main_loop;
+
static char *argv0;
// Error messages
@@ -133,7 +135,7 @@ static const char *err_extra_cmd =
void event_init(void)
{
- loop_init(&loop, NULL);
+ loop_init(&main_loop, NULL);
// early msgpack-rpc initialization
msgpack_rpc_init_method_table();
msgpack_rpc_helpers_init();
@@ -151,19 +153,20 @@ void event_init(void)
void event_teardown(void)
{
- if (!loop.events) {
+ if (!main_loop.events) {
return;
}
- queue_process_events(loop.events);
+ queue_process_events(main_loop.events);
input_stop();
channel_teardown();
- process_teardown(&loop);
+ process_teardown(&main_loop);
+ timer_teardown();
server_teardown();
signal_teardown();
terminal_teardown();
- loop_close(&loop);
+ loop_close(&main_loop);
}
/// Performs early initialization.
diff --git a/src/nvim/main.h b/src/nvim/main.h
index 084e247b7e..86d25fe657 100644
--- a/src/nvim/main.h
+++ b/src/nvim/main.h
@@ -2,6 +2,9 @@
#define NVIM_MAIN_H
#include "nvim/normal.h"
+#include "nvim/event/loop.h"
+
+extern Loop main_loop;
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "main.h.generated.h"
diff --git a/src/nvim/map.c b/src/nvim/map.c
index d4262ae9a8..03439e7a9c 100644
--- a/src/nvim/map.c
+++ b/src/nvim/map.c
@@ -34,88 +34,88 @@
#define INITIALIZER_DECLARE(T, U, ...) const U INITIALIZER(T, U) = __VA_ARGS__
#define DEFAULT_INITIALIZER {0}
-#define MAP_IMPL(T, U, ...) \
- INITIALIZER_DECLARE(T, U, __VA_ARGS__); \
- __KHASH_IMPL(T##_##U##_map,, T, U, 1, T##_hash, T##_eq) \
- \
- Map(T, U) *map_##T##_##U##_new() \
- { \
- Map(T, U) *rv = xmalloc(sizeof(Map(T, U))); \
- rv->table = kh_init(T##_##U##_map); \
- return rv; \
- } \
- \
- void map_##T##_##U##_free(Map(T, U) *map) \
- { \
- kh_destroy(T##_##U##_map, map->table); \
- xfree(map); \
- } \
- \
- U map_##T##_##U##_get(Map(T, U) *map, T key) \
- { \
- khiter_t k; \
- \
+#define MAP_IMPL(T, U, ...) \
+ INITIALIZER_DECLARE(T, U, __VA_ARGS__); \
+ __KHASH_IMPL(T##_##U##_map,, T, U, 1, T##_hash, T##_eq) \
+ \
+ Map(T, U) *map_##T##_##U##_new() \
+ { \
+ Map(T, U) *rv = xmalloc(sizeof(Map(T, U))); \
+ rv->table = kh_init(T##_##U##_map); \
+ return rv; \
+ } \
+ \
+ void map_##T##_##U##_free(Map(T, U) *map) \
+ { \
+ kh_destroy(T##_##U##_map, map->table); \
+ xfree(map); \
+ } \
+ \
+ U map_##T##_##U##_get(Map(T, U) *map, T key) \
+ { \
+ khiter_t k; \
+ \
if ((k = kh_get(T##_##U##_map, map->table, key)) == kh_end(map->table)) { \
- return INITIALIZER(T, U); \
- } \
- \
- return kh_val(map->table, k); \
- } \
- \
- bool map_##T##_##U##_has(Map(T, U) *map, T key) \
- { \
- return kh_get(T##_##U##_map, map->table, key) != kh_end(map->table); \
- } \
- \
- U map_##T##_##U##_put(Map(T, U) *map, T key, U value) \
- { \
- int ret; \
- U rv = INITIALIZER(T, U); \
- khiter_t k = kh_put(T##_##U##_map, map->table, key, &ret); \
- \
- if (!ret) { \
- rv = kh_val(map->table, k); \
- } \
- \
- kh_val(map->table, k) = value; \
- return rv; \
- } \
- \
- U *map_##T##_##U##_ref(Map(T, U) *map, T key, bool put) \
- { \
- int ret; \
- khiter_t k; \
- if (put) { \
- k = kh_put(T##_##U##_map, map->table, key, &ret); \
- if (ret) { \
- kh_val(map->table, k) = INITIALIZER(T, U); \
- } \
- } else { \
- k = kh_get(T##_##U##_map, map->table, key); \
- if (k == kh_end(map->table)) { \
- return NULL; \
- } \
- } \
- \
- return &kh_val(map->table, k); \
- } \
- \
- U map_##T##_##U##_del(Map(T, U) *map, T key) \
- { \
- U rv = INITIALIZER(T, U); \
- khiter_t k; \
- \
+ return INITIALIZER(T, U); \
+ } \
+ \
+ return kh_val(map->table, k); \
+ } \
+ \
+ bool map_##T##_##U##_has(Map(T, U) *map, T key) \
+ { \
+ return kh_get(T##_##U##_map, map->table, key) != kh_end(map->table); \
+ } \
+ \
+ U map_##T##_##U##_put(Map(T, U) *map, T key, U value) \
+ { \
+ int ret; \
+ U rv = INITIALIZER(T, U); \
+ khiter_t k = kh_put(T##_##U##_map, map->table, key, &ret); \
+ \
+ if (!ret) { \
+ rv = kh_val(map->table, k); \
+ } \
+ \
+ kh_val(map->table, k) = value; \
+ return rv; \
+ } \
+ \
+ U *map_##T##_##U##_ref(Map(T, U) *map, T key, bool put) \
+ { \
+ int ret; \
+ khiter_t k; \
+ if (put) { \
+ k = kh_put(T##_##U##_map, map->table, key, &ret); \
+ if (ret) { \
+ kh_val(map->table, k) = INITIALIZER(T, U); \
+ } \
+ } else { \
+ k = kh_get(T##_##U##_map, map->table, key); \
+ if (k == kh_end(map->table)) { \
+ return NULL; \
+ } \
+ } \
+ \
+ return &kh_val(map->table, k); \
+ } \
+ \
+ U map_##T##_##U##_del(Map(T, U) *map, T key) \
+ { \
+ U rv = INITIALIZER(T, U); \
+ khiter_t k; \
+ \
if ((k = kh_get(T##_##U##_map, map->table, key)) != kh_end(map->table)) { \
- rv = kh_val(map->table, k); \
- kh_del(T##_##U##_map, map->table, k); \
- } \
- \
- return rv; \
- } \
- \
- void map_##T##_##U##_clear(Map(T, U) *map) \
- { \
- kh_clear(T##_##U##_map, map->table); \
+ rv = kh_val(map->table, k); \
+ kh_del(T##_##U##_map, map->table, k); \
+ } \
+ \
+ return rv; \
+ } \
+ \
+ void map_##T##_##U##_clear(Map(T, U) *map) \
+ { \
+ kh_clear(T##_##U##_map, map->table); \
}
static inline khint_t String_hash(String s)
diff --git a/src/nvim/map.h b/src/nvim/map.h
index e90cc360ce..c7d9894bd1 100644
--- a/src/nvim/map.h
+++ b/src/nvim/map.h
@@ -8,20 +8,20 @@
#include "nvim/msgpack_rpc/defs.h"
#include "nvim/bufhl_defs.h"
-#define MAP_DECLS(T, U) \
- KHASH_DECLARE(T##_##U##_map, T, U) \
- \
- typedef struct { \
- khash_t(T##_##U##_map) *table; \
- } Map(T, U); \
- \
- Map(T, U) *map_##T##_##U##_new(void); \
- void map_##T##_##U##_free(Map(T, U) *map); \
- U map_##T##_##U##_get(Map(T, U) *map, T key); \
- bool map_##T##_##U##_has(Map(T, U) *map, T key); \
- U map_##T##_##U##_put(Map(T, U) *map, T key, U value); \
- U *map_##T##_##U##_ref(Map(T, U) *map, T key, bool put); \
- U map_##T##_##U##_del(Map(T, U) *map, T key); \
+#define MAP_DECLS(T, U) \
+ KHASH_DECLARE(T##_##U##_map, T, U) \
+ \
+ typedef struct { \
+ khash_t(T##_##U##_map) *table; \
+ } Map(T, U); \
+ \
+ Map(T, U) *map_##T##_##U##_new(void); \
+ void map_##T##_##U##_free(Map(T, U) *map); \
+ U map_##T##_##U##_get(Map(T, U) *map, T key); \
+ bool map_##T##_##U##_has(Map(T, U) *map, T key); \
+ U map_##T##_##U##_put(Map(T, U) *map, T key, U value); \
+ U *map_##T##_##U##_ref(Map(T, U) *map, T key, bool put); \
+ U map_##T##_##U##_del(Map(T, U) *map, T key); \
void map_##T##_##U##_clear(Map(T, U) *map);
MAP_DECLS(int, int)
diff --git a/src/nvim/mbyte.c b/src/nvim/mbyte.c
index 3495203c43..0ba9f8b076 100644
--- a/src/nvim/mbyte.c
+++ b/src/nvim/mbyte.c
@@ -1304,7 +1304,7 @@ int utfc_ptr2char(const char_u *p, int *pcc)
*/
int utfc_ptr2char_len(const char_u *p, int *pcc, int maxlen)
{
-#define IS_COMPOSING(s1, s2, s3) \
+#define IS_COMPOSING(s1, s2, s3) \
(i == 0 ? UTF_COMPOSINGLIKE((s1), (s2)) : utf_iscomposing((s3)))
assert(maxlen > 0);
diff --git a/src/nvim/msgpack_rpc/channel.c b/src/nvim/msgpack_rpc/channel.c
index 3a6d7c1434..0d7d5a247e 100644
--- a/src/nvim/msgpack_rpc/channel.c
+++ b/src/nvim/msgpack_rpc/channel.c
@@ -16,6 +16,7 @@
#include "nvim/event/socket.h"
#include "nvim/msgpack_rpc/helpers.h"
#include "nvim/vim.h"
+#include "nvim/main.h"
#include "nvim/ascii.h"
#include "nvim/memory.h"
#include "nvim/os_unix.h"
@@ -119,7 +120,7 @@ void channel_teardown(void)
uint64_t channel_from_process(char **argv)
{
Channel *channel = register_channel(kChannelTypeProc);
- channel->data.process.uvproc = libuv_process_init(&loop, channel);
+ channel->data.process.uvproc = libuv_process_init(&main_loop, channel);
Process *proc = &channel->data.process.uvproc.process;
proc->argv = argv;
proc->in = &channel->data.process.in;
@@ -127,7 +128,7 @@ uint64_t channel_from_process(char **argv)
proc->err = &channel->data.process.err;
proc->cb = process_exit;
if (!process_spawn(proc)) {
- loop_poll_events(&loop, 0);
+ loop_poll_events(&main_loop, 0);
decref(channel);
return 0;
}
@@ -179,7 +180,7 @@ bool channel_send_event(uint64_t id, char *name, Array args)
// Pending request, queue the notification for later sending.
String method = cstr_as_string(name);
WBuffer *buffer = serialize_request(id, 0, method, args, &out_buffer, 1);
- kv_push(WBuffer *, channel->delayed_notifications, buffer);
+ kv_push(channel->delayed_notifications, buffer);
} else {
send_event(channel, name, args);
}
@@ -217,10 +218,10 @@ Object channel_send_call(uint64_t id,
send_request(channel, request_id, method_name, args);
// Push the frame
- ChannelCallFrame frame = {request_id, false, false, NIL};
- kv_push(ChannelCallFrame *, channel->call_stack, &frame);
+ ChannelCallFrame frame = { request_id, false, false, NIL };
+ kv_push(channel->call_stack, &frame);
channel->pending_requests++;
- LOOP_PROCESS_EVENTS_UNTIL(&loop, channel->events, -1, frame.returned);
+ LOOP_PROCESS_EVENTS_UNTIL(&main_loop, channel->events, -1, frame.returned);
(void)kv_pop(channel->call_stack);
channel->pending_requests--;
@@ -316,11 +317,11 @@ void channel_from_stdio(void)
Channel *channel = register_channel(kChannelTypeStdio);
incref(channel); // stdio channels are only closed on exit
// read stream
- rstream_init_fd(&loop, &channel->data.std.in, 0, CHANNEL_BUFFER_SIZE,
- channel);
+ rstream_init_fd(&main_loop, &channel->data.std.in, 0, CHANNEL_BUFFER_SIZE,
+ channel);
rstream_start(&channel->data.std.in, parse_msgpack);
// write stream
- wstream_init_fd(&loop, &channel->data.std.out, 1, 0, NULL);
+ wstream_init_fd(&main_loop, &channel->data.std.out, 1, 0, NULL);
}
static void forward_stderr(Stream *stream, RBuffer *rbuf, size_t count,
@@ -574,13 +575,12 @@ static void send_event(Channel *channel,
static void broadcast_event(char *name, Array args)
{
- kvec_t(Channel *) subscribed;
- kv_init(subscribed);
+ kvec_t(Channel *) subscribed = KV_INITIAL_VALUE;
Channel *channel;
map_foreach_value(channels, channel, {
if (pmap_has(cstr_t)(channel->subscribed_events, name)) {
- kv_push(Channel *, subscribed, channel);
+ kv_push(subscribed, channel);
}
});
@@ -600,7 +600,7 @@ static void broadcast_event(char *name, Array args)
for (size_t i = 0; i < kv_size(subscribed); i++) {
Channel *channel = kv_A(subscribed, i);
if (channel->pending_requests) {
- kv_push(WBuffer *, channel->delayed_notifications, buffer);
+ kv_push(channel->delayed_notifications, buffer);
} else {
channel_write(channel, buffer);
}
@@ -647,7 +647,7 @@ static void close_channel(Channel *channel)
case kChannelTypeStdio:
stream_close(&channel->data.std.in, NULL);
stream_close(&channel->data.std.out, NULL);
- queue_put(loop.fast_events, exit_event, 1, channel);
+ queue_put(main_loop.fast_events, exit_event, 1, channel);
return;
default:
abort();
@@ -692,7 +692,7 @@ static void close_cb(Stream *stream, void *data)
static Channel *register_channel(ChannelType type)
{
Channel *rv = xmalloc(sizeof(Channel));
- rv->events = queue_new_child(loop.events);
+ rv->events = queue_new_child(main_loop.events);
rv->type = type;
rv->refcount = 1;
rv->closed = false;
diff --git a/src/nvim/msgpack_rpc/helpers.c b/src/nvim/msgpack_rpc/helpers.c
index 0049ae6b95..c643bae0e8 100644
--- a/src/nvim/msgpack_rpc/helpers.c
+++ b/src/nvim/msgpack_rpc/helpers.c
@@ -18,39 +18,39 @@
static msgpack_zone zone;
static msgpack_sbuffer sbuffer;
-#define HANDLE_TYPE_CONVERSION_IMPL(t, lt) \
- bool msgpack_rpc_to_##lt(msgpack_object *obj, t *arg) \
- FUNC_ATTR_NONNULL_ALL \
- { \
- if (obj->type != MSGPACK_OBJECT_EXT \
- || obj->via.ext.type != kObjectType##t) { \
- return false; \
- } \
- \
- msgpack_object data; \
- msgpack_unpack_return ret = msgpack_unpack(obj->via.ext.ptr, \
- obj->via.ext.size, \
- NULL, \
- &zone, \
- &data); \
- \
- if (ret != MSGPACK_UNPACK_SUCCESS) { \
- return false; \
- } \
- \
- *arg = data.via.u64; \
- return true; \
- } \
- \
- void msgpack_rpc_from_##lt(t o, msgpack_packer *res) \
- FUNC_ATTR_NONNULL_ARG(2) \
- { \
- msgpack_packer pac; \
- msgpack_packer_init(&pac, &sbuffer, msgpack_sbuffer_write); \
- msgpack_pack_uint64(&pac, o); \
- msgpack_pack_ext(res, sbuffer.size, kObjectType##t); \
- msgpack_pack_ext_body(res, sbuffer.data, sbuffer.size); \
- msgpack_sbuffer_clear(&sbuffer); \
+#define HANDLE_TYPE_CONVERSION_IMPL(t, lt) \
+ bool msgpack_rpc_to_##lt(msgpack_object *obj, t *arg) \
+ FUNC_ATTR_NONNULL_ALL \
+ { \
+ if (obj->type != MSGPACK_OBJECT_EXT \
+ || obj->via.ext.type != kObjectType##t) { \
+ return false; \
+ } \
+ \
+ msgpack_object data; \
+ msgpack_unpack_return ret = msgpack_unpack(obj->via.ext.ptr, \
+ obj->via.ext.size, \
+ NULL, \
+ &zone, \
+ &data); \
+ \
+ if (ret != MSGPACK_UNPACK_SUCCESS) { \
+ return false; \
+ } \
+ \
+ *arg = data.via.u64; \
+ return true; \
+ } \
+ \
+ void msgpack_rpc_from_##lt(t o, msgpack_packer *res) \
+ FUNC_ATTR_NONNULL_ARG(2) \
+ { \
+ msgpack_packer pac; \
+ msgpack_packer_init(&pac, &sbuffer, msgpack_sbuffer_write); \
+ msgpack_pack_uint64(&pac, o); \
+ msgpack_pack_ext(res, sbuffer.size, kObjectType##t); \
+ msgpack_pack_ext_body(res, sbuffer.data, sbuffer.size); \
+ msgpack_sbuffer_clear(&sbuffer); \
}
void msgpack_rpc_helpers_init(void)
diff --git a/src/nvim/msgpack_rpc/server.c b/src/nvim/msgpack_rpc/server.c
index 6cc56ba3dd..abbd3e8aff 100644
--- a/src/nvim/msgpack_rpc/server.c
+++ b/src/nvim/msgpack_rpc/server.c
@@ -12,6 +12,7 @@
#include "nvim/eval.h"
#include "nvim/garray.h"
#include "nvim/vim.h"
+#include "nvim/main.h"
#include "nvim/memory.h"
#include "nvim/log.h"
#include "nvim/fileio.h"
@@ -108,7 +109,7 @@ int server_start(const char *endpoint)
}
SocketWatcher *watcher = xmalloc(sizeof(SocketWatcher));
- socket_watcher_init(&loop, watcher, endpoint, NULL);
+ socket_watcher_init(&main_loop, watcher, endpoint, NULL);
// Check if a watcher for the endpoint already exists
for (int i = 0; i < watchers.ga_len; i++) {
diff --git a/src/nvim/normal.c b/src/nvim/normal.c
index 6baf95739d..c95e5e1a15 100644
--- a/src/nvim/normal.c
+++ b/src/nvim/normal.c
@@ -974,7 +974,7 @@ static int normal_execute(VimState *state, int key)
s->old_col = curwin->w_curswant;
s->c = key;
- LANGMAP_ADJUST(s->c, true);
+ LANGMAP_ADJUST(s->c, get_real_state() != SELECTMODE);
// If a mapping was started in Visual or Select mode, remember the length
// of the mapping. This is used below to not return to Insert mode for as
@@ -7880,7 +7880,7 @@ static void nv_event(cmdarg_T *cap)
// not safe to perform garbage collection because there could be unreferenced
// lists or dicts being used.
may_garbage_collect = false;
- queue_process_events(loop.events);
+ queue_process_events(main_loop.events);
cap->retval |= CA_COMMAND_BUSY; // don't call edit() now
}
diff --git a/src/nvim/option.c b/src/nvim/option.c
index ba4f390828..ac906d82ad 100644
--- a/src/nvim/option.c
+++ b/src/nvim/option.c
@@ -2129,6 +2129,7 @@ void check_buf_options(buf_T *buf)
check_string_option(&buf->b_p_nf);
check_string_option(&buf->b_p_qe);
check_string_option(&buf->b_p_syn);
+ check_string_option(&buf->b_s.b_syn_isk);
check_string_option(&buf->b_s.b_p_spc);
check_string_option(&buf->b_s.b_p_spf);
check_string_option(&buf->b_s.b_p_spl);
@@ -5607,6 +5608,7 @@ void buf_copy_options(buf_T *buf, int flags)
/* Don't copy 'syntax', it must be set */
buf->b_p_syn = empty_option;
buf->b_p_smc = p_smc;
+ buf->b_s.b_syn_isk = empty_option;
buf->b_s.b_p_spc = vim_strsave(p_spc);
(void)compile_cap_prog(&buf->b_s);
buf->b_s.b_p_spf = vim_strsave(p_spf);
diff --git a/src/nvim/os/fs.c b/src/nvim/os/fs.c
index 143a7160b0..08122828bd 100644
--- a/src/nvim/os/fs.c
+++ b/src/nvim/os/fs.c
@@ -398,7 +398,6 @@ int os_setperm(const char_u *name, int perm)
/// @note If the `owner` or `group` is specified as `-1`, then that ID is not
/// changed.
int os_fchown(int file_descriptor, uv_uid_t owner, uv_gid_t group)
- FUNC_ATTR_NONNULL_ALL
{
uv_fs_t request;
int result = uv_fs_fchown(&fs_loop, &request, file_descriptor,
diff --git a/src/nvim/os/input.c b/src/nvim/os/input.c
index 7687b14f02..0c46dc96ee 100644
--- a/src/nvim/os/input.c
+++ b/src/nvim/os/input.c
@@ -60,7 +60,7 @@ void input_start(int fd)
}
global_fd = fd;
- rstream_init_fd(&loop, &read_stream, fd, READ_BUFFER_SIZE, NULL);
+ rstream_init_fd(&main_loop, &read_stream, fd, READ_BUFFER_SIZE, NULL);
rstream_start(&read_stream, read_cb);
}
@@ -87,8 +87,8 @@ static void create_cursorhold_event(void)
// have been called(inbuf_poll would return kInputAvail)
// TODO(tarruda): Cursorhold should be implemented as a timer set during the
// `state_check` callback for the states where it can be triggered.
- assert(!events_enabled || queue_empty(loop.events));
- queue_put(loop.events, cursorhold_event, 0);
+ assert(!events_enabled || queue_empty(main_loop.events));
+ queue_put(main_loop.events, cursorhold_event, 0);
}
// Low level input function
@@ -147,7 +147,7 @@ bool os_char_avail(void)
void os_breakcheck(void)
{
if (!got_int) {
- loop_poll_events(&loop, 0);
+ loop_poll_events(&main_loop, 0);
}
}
@@ -322,7 +322,7 @@ static bool input_poll(int ms)
prof_inchar_enter();
}
- LOOP_PROCESS_EVENTS_UNTIL(&loop, NULL, ms, input_ready() || input_eof);
+ LOOP_PROCESS_EVENTS_UNTIL(&main_loop, NULL, ms, input_ready() || input_eof);
if (do_profiling == PROF_YES && ms) {
prof_inchar_exit();
@@ -419,5 +419,5 @@ static void read_error_exit(void)
static bool pending_events(void)
{
- return events_enabled && !queue_empty(loop.events);
+ return events_enabled && !queue_empty(main_loop.events);
}
diff --git a/src/nvim/os/pty_process.h b/src/nvim/os/pty_process.h
new file mode 100644
index 0000000000..94923499ca
--- /dev/null
+++ b/src/nvim/os/pty_process.h
@@ -0,0 +1,9 @@
+#ifndef NVIM_OS_PTY_PROCESS_H
+#define NVIM_OS_PTY_PROCESS_H
+
+#ifdef WIN32
+# include "nvim/os/pty_process_win.h"
+#else
+# include "nvim/os/pty_process_unix.h"
+#endif
+#endif // NVIM_OS_PTY_PROCESS_H
diff --git a/src/nvim/event/pty_process.c b/src/nvim/os/pty_process_unix.c
index 8eef72f12f..436de030ba 100644
--- a/src/nvim/event/pty_process.c
+++ b/src/nvim/os/pty_process_unix.c
@@ -26,11 +26,12 @@
#include "nvim/event/rstream.h"
#include "nvim/event/wstream.h"
#include "nvim/event/process.h"
-#include "nvim/event/pty_process.h"
+#include "nvim/os/pty_process_unix.h"
#include "nvim/log.h"
+#include "nvim/os/os.h"
#ifdef INCLUDE_GENERATED_DECLARATIONS
-# include "event/pty_process.c.generated.h"
+# include "os/pty_process_unix.c.generated.h"
#endif
bool pty_process_spawn(PtyProcess *ptyproc)
@@ -44,7 +45,7 @@ bool pty_process_spawn(PtyProcess *ptyproc)
Process *proc = (Process *)ptyproc;
assert(!proc->err);
uv_signal_start(&proc->loop->children_watcher, chld_handler, SIGCHLD);
- ptyproc->winsize = (struct winsize){ptyproc->height, ptyproc->width, 0, 0};
+ ptyproc->winsize = (struct winsize){ ptyproc->height, ptyproc->width, 0, 0 };
uv_disable_stdio_inheritance();
int master;
int pid = forkpty(&master, NULL, &termios, &ptyproc->winsize);
@@ -86,11 +87,10 @@ error:
return false;
}
-void pty_process_resize(PtyProcess *ptyproc, uint16_t width,
- uint16_t height)
+void pty_process_resize(PtyProcess *ptyproc, uint16_t width, uint16_t height)
FUNC_ATTR_NONNULL_ALL
{
- ptyproc->winsize = (struct winsize){height, width, 0, 0};
+ ptyproc->winsize = (struct winsize){ height, width, 0, 0 };
ioctl(ptyproc->tty_fd, TIOCSWINSZ, &ptyproc->winsize);
}
@@ -132,6 +132,12 @@ static void init_child(PtyProcess *ptyproc) FUNC_ATTR_NONNULL_ALL
signal(SIGTERM, SIG_DFL);
signal(SIGALRM, SIG_DFL);
+ Process *proc = (Process *)ptyproc;
+ if (proc->cwd && os_chdir(proc->cwd) != 0) {
+ fprintf(stderr, "chdir failed: %s\n", strerror(errno));
+ return;
+ }
+
setenv("TERM", ptyproc->term_name ? ptyproc->term_name : "ansi", 1);
execvp(ptyproc->process.argv[0], ptyproc->process.argv);
fprintf(stderr, "execvp failed: %s\n", strerror(errno));
diff --git a/src/nvim/event/pty_process.h b/src/nvim/os/pty_process_unix.h
index 446d7fd3c8..f7c57b3839 100644
--- a/src/nvim/event/pty_process.h
+++ b/src/nvim/os/pty_process_unix.h
@@ -1,5 +1,5 @@
-#ifndef NVIM_EVENT_PTY_PROCESS_H
-#define NVIM_EVENT_PTY_PROCESS_H
+#ifndef NVIM_OS_PTY_PROCESS_UNIX_H
+#define NVIM_OS_PTY_PROCESS_UNIX_H
#include <sys/ioctl.h>
@@ -25,6 +25,7 @@ static inline PtyProcess pty_process_init(Loop *loop, void *data)
}
#ifdef INCLUDE_GENERATED_DECLARATIONS
-# include "event/pty_process.h.generated.h"
+# include "os/pty_process_unix.h.generated.h"
#endif
-#endif // NVIM_EVENT_PTY_PROCESS_H
+
+#endif // NVIM_OS_PTY_PROCESS_UNIX_H
diff --git a/src/nvim/os/pty_process_win.h b/src/nvim/os/pty_process_win.h
new file mode 100644
index 0000000000..20cc589925
--- /dev/null
+++ b/src/nvim/os/pty_process_win.h
@@ -0,0 +1,28 @@
+#ifndef NVIM_OS_PTY_PROCESS_WIN_H
+#define NVIM_OS_PTY_PROCESS_WIN_H
+
+#include "nvim/event/libuv_process.h"
+
+typedef struct pty_process {
+ Process process;
+ char *term_name;
+ uint16_t width, height;
+} PtyProcess;
+
+#define pty_process_spawn(job) libuv_process_spawn((LibuvProcess *)job)
+#define pty_process_close(job) libuv_process_close((LibuvProcess *)job)
+#define pty_process_close_master(job) libuv_process_close((LibuvProcess *)job)
+#define pty_process_resize(job, width, height)
+#define pty_process_teardown(loop)
+
+static inline PtyProcess pty_process_init(Loop *loop, void *data)
+{
+ PtyProcess rv;
+ rv.process = process_init(loop, kProcessTypePty, data);
+ rv.term_name = NULL;
+ rv.width = 80;
+ rv.height = 24;
+ return rv;
+}
+
+#endif // NVIM_OS_PTY_PROCESS_WIN_H
diff --git a/src/nvim/os/shell.c b/src/nvim/os/shell.c
index f5a1637c94..988620b145 100644
--- a/src/nvim/os/shell.c
+++ b/src/nvim/os/shell.c
@@ -14,6 +14,7 @@
#include "nvim/os/shell.h"
#include "nvim/os/signal.h"
#include "nvim/types.h"
+#include "nvim/main.h"
#include "nvim/vim.h"
#include "nvim/message.h"
#include "nvim/memory.h"
@@ -205,16 +206,16 @@ static int do_os_system(char **argv,
xstrlcpy(prog, argv[0], MAXPATHL);
Stream in, out, err;
- LibuvProcess uvproc = libuv_process_init(&loop, &buf);
+ LibuvProcess uvproc = libuv_process_init(&main_loop, &buf);
Process *proc = &uvproc.process;
- Queue *events = queue_new_child(loop.events);
+ Queue *events = queue_new_child(main_loop.events);
proc->events = events;
proc->argv = argv;
proc->in = input != NULL ? &in : NULL;
proc->out = &out;
proc->err = &err;
if (!process_spawn(proc)) {
- loop_poll_events(&loop, 0);
+ loop_poll_events(&main_loop, 0);
// Failed, probably due to `sh` not being executable
if (!silent) {
MSG_PUTS(_("\nCannot execute "));
diff --git a/src/nvim/os/signal.c b/src/nvim/os/signal.c
index 0ff6016e32..4abc9cfc36 100644
--- a/src/nvim/os/signal.c
+++ b/src/nvim/os/signal.c
@@ -8,6 +8,7 @@
#include "nvim/globals.h"
#include "nvim/memline.h"
#include "nvim/eval.h"
+#include "nvim/main.h"
#include "nvim/memory.h"
#include "nvim/misc1.h"
#include "nvim/misc2.h"
@@ -28,10 +29,10 @@ static bool rejecting_deadly;
void signal_init(void)
{
- signal_watcher_init(&loop, &spipe, NULL);
- signal_watcher_init(&loop, &shup, NULL);
- signal_watcher_init(&loop, &squit, NULL);
- signal_watcher_init(&loop, &sterm, NULL);
+ signal_watcher_init(&main_loop, &spipe, NULL);
+ signal_watcher_init(&main_loop, &shup, NULL);
+ signal_watcher_init(&main_loop, &squit, NULL);
+ signal_watcher_init(&main_loop, &sterm, NULL);
#ifdef SIGPIPE
signal_watcher_start(&spipe, on_signal, SIGPIPE);
#endif
@@ -41,7 +42,7 @@ void signal_init(void)
#endif
signal_watcher_start(&sterm, on_signal, SIGTERM);
#ifdef SIGPWR
- signal_watcher_init(&loop, &spwr, NULL);
+ signal_watcher_init(&main_loop, &spwr, NULL);
signal_watcher_start(&spwr, on_signal, SIGPWR);
#endif
}
diff --git a/src/nvim/os/time.c b/src/nvim/os/time.c
index 188f0802c9..2205ad0958 100644
--- a/src/nvim/os/time.c
+++ b/src/nvim/os/time.c
@@ -9,6 +9,7 @@
#include "nvim/os/time.h"
#include "nvim/event/loop.h"
#include "nvim/vim.h"
+#include "nvim/main.h"
static uv_mutex_t delay_mutex;
static uv_cond_t delay_cond;
@@ -43,7 +44,7 @@ void os_delay(uint64_t milliseconds, bool ignoreinput)
if (milliseconds > INT_MAX) {
milliseconds = INT_MAX;
}
- LOOP_PROCESS_EVENTS_UNTIL(&loop, NULL, (int)milliseconds, got_int);
+ LOOP_PROCESS_EVENTS_UNTIL(&main_loop, NULL, (int)milliseconds, got_int);
} else {
os_microdelay(milliseconds * 1000);
}
diff --git a/src/nvim/po/eo.po b/src/nvim/po/eo.po
index 5b0cb2260b..6bc76506ae 100644
--- a/src/nvim/po/eo.po
+++ b/src/nvim/po/eo.po
@@ -23,8 +23,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Vim(Esperanto)\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2015-07-30 17:54+0200\n"
-"PO-Revision-Date: 2015-07-30 18:00+0200\n"
+"POT-Creation-Date: 2016-02-13 23:42+0100\n"
+"PO-Revision-Date: 2016-02-13 23:45+0100\n"
"Last-Translator: Dominique PELLÉ <dominique.pelle@gmail.com>\n"
"Language-Team: \n"
"Language: eo\n"
@@ -504,10 +504,6 @@ msgstr "E686: Argumento de %s devas esti Listo"
msgid "E712: Argument of %s must be a List or Dictionary"
msgstr "E712: Argumento de %s devas esti Listo aÅ­ Vortaro"
-#: ../eval.c:144
-msgid "E713: Cannot use empty key for Dictionary"
-msgstr "E713: Ne eblas uzi malplenan Ålosilon de Vortaro"
-
#: ../eval.c:145
msgid "E714: List required"
msgstr "E714: Listo bezonata"
@@ -657,6 +653,9 @@ msgstr "E110: Mankas ')'"
msgid "E695: Cannot index a Funcref"
msgstr "E695: Ne eblas indeksi Funcref"
+msgid "E909: Cannot index a special variable"
+msgstr "E909: Ne eblas indeksi specialan variablon"
+
#: ../eval.c:4839
#, c-format
msgid "E112: Option name missing: %s"
@@ -689,7 +688,7 @@ msgstr "E697: Mankas fino de Listo ']': %s"
#: ../eval.c:5750
msgid "Not enough memory to set references, garbage collection aborted!"
-msgstr "Ne sufiĉa memory por valorigi referencojn, senrubigado ĉesigita!"
+msgstr "Ne sufiĉa memoro por valorigi referencojn, senrubigado ĉesigita!"
#: ../eval.c:6475
#, c-format
@@ -874,6 +873,18 @@ msgstr "E745: Uzo de Listo kiel Nombro"
msgid "E728: Using a Dictionary as a Number"
msgstr "E728: Uzo de Vortaro kiel Nombro"
+msgid "E891: Using a Funcref as a Float"
+msgstr "E891: Uzo de Funcref kiel Glitpunktnombro"
+
+msgid "E892: Using a String as a Float"
+msgstr "E892: Uzo de Ĉeno kiel Glitpunktnombro"
+
+msgid "E893: Using a List as a Float"
+msgstr "E893: Uzo de Listo kiel Glitpunktnombro"
+
+msgid "E894: Using a Dictionary as a Float"
+msgstr "E894: Uzo de Vortaro kiel Glitpunktnombro"
+
#: ../eval.c:16259
msgid "E729: using Funcref as a String"
msgstr "E729: uzo de Funcref kiel Ĉeno"
@@ -886,6 +897,9 @@ msgstr "E730: uzo de Listo kiel Ĉeno"
msgid "E731: using Dictionary as a String"
msgstr "E731: uzo de Vortaro kiel Ĉeno"
+msgid "E908: using an invalid value as a String"
+msgstr "E908: uzo de nevalida valoro kiel Ĉeno"
+
#: ../eval.c:16619
#, c-format
msgid "E706: Variable type mismatch for: %s"
@@ -1391,6 +1405,13 @@ msgstr "linio %<PRId64>: %s"
msgid "cmd: %s"
msgstr "kmd: %s"
+msgid "frame is zero"
+msgstr "kadro estas nul"
+
+#, c-format
+msgid "frame at highest level: %d"
+msgstr "kadro je la plej alta nivelo: %d"
+
#: ../ex_cmds2.c:322
#, c-format
msgid "Breakpoint in \"%s%s\" line %<PRId64>"
@@ -2861,6 +2882,9 @@ msgstr "E46: Ne eblas ÅanÄi nurlegeblan variablon \"%s\""
msgid "E794: Cannot set variable in the sandbox: \"%s\""
msgstr "E794: Ne eblas agordi variablon en la sabloludejo: \"%s\""
+msgid "E713: Cannot use empty key for Dictionary"
+msgstr "E713: Ne eblas uzi malplenan Ålosilon de Vortaro"
+
#: ../globals.h:1076
msgid "E47: Error while reading errorfile"
msgstr "E47: Eraro dum legado de erardosiero"
@@ -4090,12 +4114,12 @@ msgid ""
"\n"
"(1) Another program may be editing the same file. If this is the case,\n"
" be careful not to end up with two different instances of the same\n"
-" file when making changes."
+" file when making changes. Quit, or continue with caution.\n"
msgstr ""
"\n"
-"(1) Alia programo eble redaktas la saman dosieron.\n"
-" Se jes, estu singarda por ne havi du malsamajn\n"
-" aperojn de la sama dosiero, kiam vi faros ÅanÄojn."
+"(1) Alia programo eble redaktas la saman dosieron. Se jes, estu singarda\n"
+" por ne havi du malsamajn aperojn de la sama dosiero, kiam vi faros\n"
+" ÅanÄojn. Eliru aÅ­ daÅ­rigu singarde.\n"
#: ../memline.c:3245
msgid " Quit, or continue with caution.\n"
@@ -4792,6 +4816,15 @@ msgstr ""
"\n"
"Ne povis Åalti kuntekston de sekureco por "
+#, c-format
+msgid "Could not set security context %s for %s"
+msgstr "Ne povis Åalti kuntekston de sekureco %s por %s"
+
+#, c-format
+msgid "Could not get security context %s for %s. Removing it!"
+msgstr ""
+"Ne povis akiri kuntekston de sekureco %s por %s. Gi nun estas forigata!"
+
#: ../os_unix.c:1558 ../os_unix.c:1647
#, c-format
msgid "dlerror = \"%s\""
@@ -5717,6 +5750,9 @@ msgstr "Neniu sintaksa elemento difinita por tiu bufro"
msgid "E390: Illegal argument: %s"
msgstr "E390: Nevalida argumento: %s"
+msgid "syntax iskeyword "
+msgstr "sintakso iskeyword "
+
#: ../syntax.c:3299
#, c-format
msgid "E391: No such syntax cluster: %s"
@@ -5813,6 +5849,10 @@ msgstr "E847: Tro da sintaksaj inkluzivoj"
msgid "E789: Missing ']': %s"
msgstr "E789: Mankas ']': %s"
+#, c-format
+msgid "E890: trailing char after ']': %s]%s"
+msgstr "E890: vosta signo post ']': %s]%s"
+
#: ../syntax.c:4531
#, c-format
msgid "E398: Missing '=': %s"
diff --git a/src/nvim/po/it.po b/src/nvim/po/it.po
index 171e155689..084102da60 100644
--- a/src/nvim/po/it.po
+++ b/src/nvim/po/it.po
@@ -13,13 +13,13 @@ msgid ""
msgstr ""
"Project-Id-Version: vim 7.4\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2015-08-11 20:58+0200\n"
-"PO-Revision-Date: 2015-08-11 22:02+0200\n"
-"Last-Translator: Vlad Sandrini <vlad.gently@gmail.com>\n"
-"Language-Team: Italian Antonio Colombo <azc100@gmail."
-"com> Vlad Sandrini <vlad.gently@gmail."
-"com> Luciano Montanaro <mikelima@cirulla.net>\n"
-"Language: \n"
+"POT-Creation-Date: 2016-02-11 12:10+0100\n"
+"PO-Revision-Date: 2016-02-11 14:42+0200\n"
+"Last-Translator: Antonio Colombo <azc100@gmail.com>\n"
+"Language-Team: Antonio Colombo <azc100@gmail.com>"
+" Vlad Sandrini <vlad.gently@gmail.com"
+" Luciano Montanaro <mikelima@cirulla.net>\n"
+"Language: Italian\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=ISO_8859-1\n"
"Content-Transfer-Encoding: 8-bit\n"
@@ -491,10 +491,6 @@ msgstr "E686: L'argomento di %s deve essere una Lista"
msgid "E712: Argument of %s must be a List or Dictionary"
msgstr "E712: L'argomento di %s deve essere una Lista o un Dizionario"
-#: ../eval.c:144
-msgid "E713: Cannot use empty key for Dictionary"
-msgstr "E713: Non posso usare una chiave nulla per il Dizionario"
-
#: ../eval.c:145
msgid "E714: List required"
msgstr "E714: È necessaria una Lista"
@@ -548,7 +544,7 @@ msgstr "E461: Nome di variabile non ammesso: %s"
# nuovo
#: ../eval.c:157
msgid "E806: using Float as a String"
-msgstr "E806: uso di un numero con virgola come stringa"
+msgstr "E806: uso di un Numero-a-virgola-mobile come Stringa"
#: ../eval.c:1830
msgid "E687: Less targets than List items"
@@ -635,7 +631,7 @@ msgstr "E694: Operazione non valida per Funcref"
#: ../eval.c:4277
msgid "E804: Cannot use '%' with Float"
-msgstr "E804: Non si può usare '%' con un numero con virgola"
+msgstr "E804: Non si può usare '%' con un Numero-a-virgola-mobile"
#: ../eval.c:4478
msgid "E110: Missing ')'"
@@ -645,6 +641,9 @@ msgstr "E110: Manca ')'"
msgid "E695: Cannot index a Funcref"
msgstr "E695: Non posso indicizzare un Funcref"
+msgid "E909: Cannot index a special variable"
+msgstr "E909: Non posso indicizzare una variabile speciale"
+
#: ../eval.c:4839
#, c-format
msgid "E112: Option name missing: %s"
@@ -736,7 +735,7 @@ msgstr "E725: Chiamata di funzione dict in assenza di Dizionario: %s"
#: ../eval.c:7453
msgid "E808: Number or Float required"
-msgstr "E808: Ci vuole un numero intero o con virgola"
+msgstr "E808: Ci vuole un Numero o un Numero-a-virgola-mobile"
#: ../eval.c:7503
msgid "add() argument"
@@ -847,7 +846,7 @@ msgstr "E677: Errore in scrittura su file temporaneo"
#: ../eval.c:16159
msgid "E805: Using a Float as a Number"
-msgstr "E805: Uso di un numero con virgola come intero"
+msgstr "E805: Uso di un Numero-a-virgola-mobile come Numero"
#: ../eval.c:16162
msgid "E703: Using a Funcref as a Number"
@@ -861,6 +860,18 @@ msgstr "E745: Uso di Lista come Numero"
msgid "E728: Using a Dictionary as a Number"
msgstr "E728: Uso di Dizionario come Numero"
+msgid "E891: Using a Funcref as a Float"
+msgstr "E891: Uso di Funcref come Numero-a-virgola-mobile"
+
+msgid "E892: Using a String as a Float"
+msgstr "E892: Uso di Stringa come Numero-a-virgola-mobile"
+
+msgid "E893: Using a List as a Float"
+msgstr "E893: Uso di Lista come Numero-a-virgola-mobile"
+
+msgid "E894: Using a Dictionary as a Float"
+msgstr "E894: Uso di Dizionario come Numero-a-virgola-mobile"
+
#: ../eval.c:16259
msgid "E729: using Funcref as a String"
msgstr "E729: uso di Funcref come Stringa"
@@ -873,6 +884,10 @@ msgstr "E730: uso di Lista come Stringa"
msgid "E731: using Dictionary as a String"
msgstr "E731: uso di Dizionario come Stringa"
+# nuovo
+msgid "E908: using an invalid value as a String"
+msgstr "E908: uso di un valore non valido come Stringa"
+
#: ../eval.c:16619
#, c-format
msgid "E706: Variable type mismatch for: %s"
@@ -960,12 +975,14 @@ msgid "E129: Function name required"
msgstr "E129: Nome funzione necessario"
#: ../eval.c:17824
+#, c-format
msgid "E128: Function name must start with a capital or \"s:\": %s"
-msgstr "E128: Il nome funzione deve iniziare con una maiuscola o \"s:\": %s"
+msgstr "E128: Il nome funzione deve iniziare con maiuscola o \"s:\": %s"
#: ../eval.c:17833
+#, c-format
msgid "E884: Function name cannot contain a colon: %s"
-msgstr "E884: Il nome funzione non può contenere una virgola: %s"
+msgstr "E884: Il nome della funzione non può contenere un due punti: %s"
#: ../eval.c:18336
#, c-format
@@ -1382,6 +1399,13 @@ msgstr "riga %<PRId64>: %s"
msgid "cmd: %s"
msgstr "com: %s"
+msgid "frame is zero"
+msgstr "al livello zero"
+
+#, c-format
+msgid "frame at highest level: %d"
+msgstr "al livello più alto: %d"
+
#: ../ex_cmds2.c:322
#, c-format
msgid "Breakpoint in \"%s%s\" line %<PRId64>"
@@ -1422,8 +1446,7 @@ msgstr "E162: Buffer \"%s\" non salvato dopo modifica"
#: ../ex_cmds2.c:1480
msgid "Warning: Entered other buffer unexpectedly (check autocommands)"
msgstr ""
-"Avviso: Entrato in altro buffer inaspettatamente (controllare "
-"autocomandi)"
+"Avviso: Entrato in altro buffer inaspettatamente (controllare autocomandi)"
#: ../ex_cmds2.c:1826
msgid "E163: There is only one file to edit"
@@ -2301,19 +2324,19 @@ msgstr "[in formato DOS]"
#: ../fileio.c:3801
msgid "[mac]"
-msgstr "[MAC]"
+msgstr "[Mac]"
#: ../fileio.c:3801
msgid "[mac format]"
-msgstr "[in formato MAC]"
+msgstr "[in formato Mac]"
#: ../fileio.c:3807
msgid "[unix]"
-msgstr "[UNIX]"
+msgstr "[Unix]"
#: ../fileio.c:3807
msgid "[unix format]"
-msgstr "[in formato UNIX]"
+msgstr "[in formato Unix]"
#: ../fileio.c:3831
msgid "1 line, "
@@ -2864,6 +2887,9 @@ msgstr "E46: Non posso cambiare la variabile read-only \"%s\""
msgid "E794: Cannot set variable in the sandbox: \"%s\""
msgstr "E794: Non posso impostare la variabile read-only in ambiente protetto: \"%s\""
+msgid "E713: Cannot use empty key for Dictionary"
+msgstr "E713: Non posso usare una chiave nulla per il Dizionario"
+
#: ../globals.h:1076
msgid "E47: Error while reading errorfile"
msgstr "E47: Errore leggendo il file errori"
@@ -4087,12 +4113,12 @@ msgid ""
"\n"
"(1) Another program may be editing the same file. If this is the case,\n"
" be careful not to end up with two different instances of the same\n"
-" file when making changes."
+" file when making changes. Quit, or continue with caution.\n"
msgstr ""
"\n"
-"(1) Un altro programma può essere in edit sullo stesso file.\n"
-" Se è così, attenzione a non trovarti con due versioni\n"
-" differenti dello stesso file a cui vengono apportate modifiche."
+"(1) Un altro programma può essere in edit sullo stesso file. Se è così,\n"
+" attenzione a non finire con due sessioni differenti che modificano lo\n"
+" stesso file. Uscire da Vim, o continuare con cautela.\n"
#: ../memline.c:3245
msgid " Quit, or continue with caution.\n"
@@ -4335,7 +4361,7 @@ msgstr "E766: Argomenti non sufficienti per printf()"
#: ../message.c:3119
msgid "E807: Expected Float argument for printf()"
-msgstr "E807: Numero con virgola atteso come argomento per printf()"
+msgstr "E807: Numero-a-virgola-mobile atteso come argomento per printf()"
#: ../message.c:3873
msgid "E767: Too many arguments to printf()"
@@ -4526,7 +4552,8 @@ msgstr "E574: Tipo di registro sconosciuto: %d"
msgid ""
"E883: search pattern and expression register may not contain two or more "
"lines"
-msgstr "E883: espressione di ricerca e registro dell'espressione non possono "
+msgstr ""
+"E883: espressione di ricerca e registro dell'espressione non possono "
"contenere due o più righe"
#: ../ops.c:5089
@@ -4794,6 +4821,14 @@ msgstr ""
"\n"
"Non posso impostare il contesto di sicurezza per "
+#, c-format
+msgid "Could not set security context %s for %s"
+msgstr "Non posso impostare il contesto di sicurezza %s per %s"
+
+#, c-format
+msgid "Could not get security context %s for %s. Removing it!"
+msgstr "Non posso ottenere il contesto di sicurezza %s per %s. Lo rimuovo!"
+
#: ../os_unix.c:1558 ../os_unix.c:1647
#, c-format
msgid "dlerror = \"%s\""
@@ -4890,6 +4925,7 @@ msgid "E777: String or List expected"
msgstr "E777: aspettavo Stringa o Lista"
#: ../regexp.c:359
+#, c-format
msgid "E369: invalid item in %s%%[]"
msgstr "E369: elemento non valido in %s%%[]"
@@ -5005,6 +5041,7 @@ msgid "External submatches:\n"
msgstr "Sotto-corrispondenze esterne:\n"
#: ../regexp.c:2470
+#, c-format
msgid "E888: (NFA regexp) cannot repeat %s"
msgstr "E888: (NFA regexp) non riesco a ripetere %s"
@@ -5399,8 +5436,7 @@ msgstr "Valore errato per CHECKCOMPOUNDPATTERN in %s riga %d: %s"
#: ../spell.c:4847
#, c-format
msgid "Different combining flag in continued affix block in %s line %d: %s"
-msgstr ""
-"Flag combinazione diverso in blocco affissi continuo in %s riga %d: %s"
+msgstr "Flag combinazione diverso in blocco affissi continuo in %s riga %d: %s"
#: ../spell.c:4850
#, c-format
@@ -5639,10 +5675,12 @@ msgid "E765: 'spellfile' does not have %<PRId64> entries"
msgstr "E765: 'spellfile' non ha %<PRId64> elementi"
#: ../spell.c:8074
+#, c-format
msgid "Word '%.*s' removed from %s"
msgstr "Parola '%.*s' rimossa da %s"
#: ../spell.c:8117
+#, c-format
msgid "Word '%.*s' added to %s"
msgstr "Parola '%.*s' aggiunta a %s"
@@ -5720,6 +5758,9 @@ msgstr "Nessun elemento sintattico definito per questo buffer"
msgid "E390: Illegal argument: %s"
msgstr "E390: Argomento non ammesso: %s"
+msgid "syntax iskeyword "
+msgstr "syntax iskeyword "
+
#: ../syntax.c:3299
#, c-format
msgid "E391: No such syntax cluster: %s"
@@ -5816,6 +5857,9 @@ msgstr "E847: Troppe inclusioni di sintassi"
msgid "E789: Missing ']': %s"
msgstr "E789: Manca ']': %s"
+msgid "E890: trailing char after ']': %s]%s"
+msgstr "E890: Caratteri in più dopo ']': %s]%s"
+
#: ../syntax.c:4531
#, c-format
msgid "E398: Missing '=': %s"
diff --git a/src/nvim/po/ja.euc-jp.po b/src/nvim/po/ja.euc-jp.po
index d3061d3c5a..85042e3506 100644
--- a/src/nvim/po/ja.euc-jp.po
+++ b/src/nvim/po/ja.euc-jp.po
@@ -1,11 +1,11 @@
-# Japanese translation for Vim vim:set foldmethod=marker:
+# Japanese translation for Vim
#
# Do ":help uganda" in Vim to read copying and usage conditions.
# Do ":help credits" in Vim to see a list of people who contributed.
#
-# Last Change: 2013 Jul 06
+# Copyright (C) 2001-2016 MURAOKA Taro <koron.kaoriya@gmail.com>,
+# vim-jp (http://vim-jp.org/)
#
-# Copyright (C) 2001-13 MURAOKA Taro <koron.kaoriya@gmail.com>
# THIS FILE IS DISTRIBUTED UNDER THE VIM LICENSE.
#
# Generated from ja.po, DO NOT EDIT.
@@ -14,10 +14,10 @@ msgid ""
msgstr ""
"Project-Id-Version: Vim 7.4\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2014-05-26 14:21+0200\n"
-"PO-Revision-Date: 2013-07-06 15:00+0900\n"
+"POT-Creation-Date: 2016-02-01 09:02+0900\n"
+"PO-Revision-Date: 2016-02-01 09:08+0900\n"
"Last-Translator: MURAOKA Taro <koron.kaoriya@gmail.com>\n"
-"Language-Team: MURAOKA Taro <koron.kaoriya@gmail.com>\n"
+"Language-Team: vim-jp (https://github.com/vim-jp/lang-ja)\n"
"Language: Japanese\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=euc-jp\n"
@@ -34,7 +34,7 @@ msgstr "ÆâÉô¥¨¥é¡¼: ̤ÃΤΥª¥×¥·¥ç¥ó·¿¤Ç¤¹"
#: ../buffer.c:92
msgid "[Location List]"
-msgstr "[¾ì½ê¥ê¥¹¥È]"
+msgstr "[¥í¥±¡¼¥·¥ç¥ó¥ê¥¹¥È]"
#: ../buffer.c:93
msgid "[Quickfix List]"
@@ -277,7 +277,7 @@ msgstr "E810: °ì»þ¥Õ¥¡¥¤¥ë¤ÎÆÉ¹þ¤â¤·¤¯¤Ï½ñ¹þ¤¬¤Ç¤­¤Þ¤»¤ó"
#: ../diff.c:755
msgid "E97: Cannot create diffs"
-msgstr "E97: º¹Ê¬¤òºîÀ®¤Ç¤­¤Þ¤»¤ó "
+msgstr "E97: º¹Ê¬¤òºîÀ®¤Ç¤­¤Þ¤»¤ó"
#: ../diff.c:966
msgid "E816: Cannot read patch output"
@@ -293,7 +293,7 @@ msgstr "E99: ¸½ºß¤Î¥Ð¥Ã¥Õ¥¡¤Ïº¹Ê¬¥â¡¼¥É¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó"
#: ../diff.c:2100
msgid "E793: No other buffer in diff mode is modifiable"
-msgstr "E793: º¹Ê¬¥â¡¼¥É¤Ç¤¢¤ë¾¤Î¥Ð¥Ã¥Õ¥¡¤ÏÊѹ¹²Äǽ¤Ç¤¹"
+msgstr "E793: º¹Ê¬¥â¡¼¥É¤Ç¤¢¤ë¾¤Î¥Ð¥Ã¥Õ¥¡¤ÏÊѹ¹¤Ç¤­¤Þ¤»¤ó"
#: ../diff.c:2102
msgid "E100: No other buffer in diff mode"
@@ -349,7 +349,7 @@ msgstr " ¹Ô(Á´ÂÎ)Êä´° (^L^N^P)"
#: ../edit.c:86
msgid " File name completion (^F^N^P)"
-msgstr "¥Õ¥¡¥¤¥ë̾Êä´° (^F^N^P)"
+msgstr " ¥Õ¥¡¥¤¥ë̾Êä´° (^F^N^P)"
#: ../edit.c:87
msgid " Tag completion (^]^N^P)"
@@ -377,7 +377,7 @@ msgstr " ¥³¥Þ¥ó¥É¥é¥¤¥óÊä´° (^V^N^P)"
#: ../edit.c:94
msgid " User defined completion (^U^N^P)"
-msgstr " ¥æ¡¼¥¶ÄêµÁÊä´° (^U^N^P)"
+msgstr " ¥æ¡¼¥¶¡¼ÄêµÁÊä´° (^U^N^P)"
#: ../edit.c:95
msgid " Omni completion (^O^N^P)"
@@ -679,6 +679,11 @@ msgstr "E696: ¥ê¥¹¥È·¿¤Ë¥«¥ó¥Þ¤¬¤¢¤ê¤Þ¤»¤ó: %s"
msgid "E697: Missing end of List ']': %s"
msgstr "E697: ¥ê¥¹¥È·¿¤ÎºÇ¸å¤Ë ']' ¤¬¤¢¤ê¤Þ¤»¤ó: %s"
+#: ../eval.c:5807
+msgid "Not enough memory to set references, garbage collection aborted!"
+msgstr ""
+"¥¬¡¼¥Ù¥Ã¥¸¥³¥ì¥¯¥·¥ç¥ó¤òÃæ»ß¤·¤Þ¤·¤¿! »²¾È¤òºîÀ®¤¹¤ë¤Î¤Ë¥á¥â¥ê¤¬ÉÔ­¤·¤Þ¤·¤¿"
+
#: ../eval.c:6475
#, c-format
msgid "E720: Missing colon in Dictionary: %s"
@@ -721,7 +726,7 @@ msgstr "E117: ̤ÃΤδؿô¤Ç¤¹: %s"
#: ../eval.c:7383
#, c-format
msgid "E119: Not enough arguments for function: %s"
-msgstr "E119: ´Ø¿ô¤Î°ú¿ô¤¬¾¯¤Ê²á¤®¤Þ¤¹: %s"
+msgstr "E119: ´Ø¿ô¤Î°ú¿ô¤¬Â­¤ê¤Þ¤»¤ó: %s"
#: ../eval.c:7387
#, c-format
@@ -826,18 +831,16 @@ msgid "sort() argument"
msgstr "sort() ¤Î°ú¿ô"
#: ../eval.c:13721
-#, fuzzy
msgid "uniq() argument"
-msgstr "add() ¤Î°ú¿ô"
+msgstr "uniq() ¤Î°ú¿ô"
#: ../eval.c:13776
msgid "E702: Sort compare function failed"
msgstr "E702: ¥½¡¼¥È¤ÎÈæ³Ó´Ø¿ô¤¬¼ºÇÔ¤·¤Þ¤·¤¿"
#: ../eval.c:13806
-#, fuzzy
msgid "E882: Uniq compare function failed"
-msgstr "E702: ¥½¡¼¥È¤ÎÈæ³Ó´Ø¿ô¤¬¼ºÇÔ¤·¤Þ¤·¤¿"
+msgstr "E882: Uniq ¤ÎÈæ³Ó´Ø¿ô¤¬¼ºÇÔ¤·¤Þ¤·¤¿"
#: ../eval.c:14085
msgid "(Invalid)"
@@ -863,6 +866,18 @@ msgstr "E745: ¥ê¥¹¥È·¿¤ò¿ôÃͤȤ·¤Æ°·¤Ã¤Æ¤¤¤Þ¤¹"
msgid "E728: Using a Dictionary as a Number"
msgstr "E728: ¼­½ñ·¿¤ò¿ôÃͤȤ·¤Æ°·¤Ã¤Æ¤¤¤Þ¤¹"
+msgid "E891: Using a Funcref as a Float"
+msgstr "E891: ´Ø¿ô»²¾È·¿¤òÉâÆ°¾®¿ôÅÀ¿ô¤È¤·¤Æ°·¤Ã¤Æ¤¤¤Þ¤¹¡£"
+
+msgid "E892: Using a String as a Float"
+msgstr "E892: ʸ»úÎó¤òÉâÆ°¾®¿ôÅÀ¿ô¤È¤·¤Æ°·¤Ã¤Æ¤¤¤Þ¤¹"
+
+msgid "E893: Using a List as a Float"
+msgstr "E893: ¥ê¥¹¥È·¿¤òÉâÆ°¾®¿ôÅÀ¿ô¤È¤·¤Æ°·¤Ã¤Æ¤¤¤Þ¤¹"
+
+msgid "E894: Using a Dictionary as a Float"
+msgstr "E894: ¼­½ñ·¿¤òÉâÆ°¾®¿ôÅÀ¿ô¤È¤·¤Æ°·¤Ã¤Æ¤¤¤Þ¤¹"
+
#: ../eval.c:16259
msgid "E729: using Funcref as a String"
msgstr "E729: ´Ø¿ô»²¾È·¿¤òʸ»úÎó¤È¤·¤Æ°·¤Ã¤Æ¤¤¤Þ¤¹"
@@ -961,14 +976,14 @@ msgid "E129: Function name required"
msgstr "E129: ´Ø¿ô̾¤¬Í׵ᤵ¤ì¤Þ¤¹"
#: ../eval.c:17824
-#, fuzzy, c-format
+#, c-format
msgid "E128: Function name must start with a capital or \"s:\": %s"
-msgstr "E128: ´Ø¿ô̾¤ÏÂçʸ»ú¤Ç»Ï¤Þ¤ë¤«¥³¥í¥ó¤ò´Þ¤Þ¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó: %s"
+msgstr "E128: ´Ø¿ô̾¤ÏÂçʸ»ú¤« \"s:\" ¤Ç»Ï¤Þ¤é¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó: %s"
#: ../eval.c:17833
-#, fuzzy, c-format
+#, c-format
msgid "E884: Function name cannot contain a colon: %s"
-msgstr "E128: ´Ø¿ô̾¤ÏÂçʸ»ú¤Ç»Ï¤Þ¤ë¤«¥³¥í¥ó¤ò´Þ¤Þ¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó: %s"
+msgstr "E884: ´Ø¿ô̾¤Ë¤Ï¥³¥í¥ó¤Ï´Þ¤á¤é¤ì¤Þ¤»¤ó: %s"
#: ../eval.c:18336
#, c-format
@@ -1081,7 +1096,7 @@ msgstr "E136: viminfo: ¥¨¥é¡¼¤¬Â¿²á¤®¤ë¤Î¤Ç, °Ê¹ß¤Ï¥¹¥­¥Ã¥×¤·¤Þ¤¹"
#: ../ex_cmds.c:1458
#, c-format
msgid "Reading viminfo file \"%s\"%s%s%s"
-msgstr "viminfo¥Õ¥¡¥¤¥ë \"%s\"%s%s%s ¤òÆÉ¹þ¤ßÃæ "
+msgstr "viminfo¥Õ¥¡¥¤¥ë \"%s\"%s%s%s ¤òÆÉ¹þ¤ßÃæ"
#: ../ex_cmds.c:1460
msgid " info"
@@ -1357,6 +1372,10 @@ msgstr "E158: ̵¸ú¤Ê¥Ð¥Ã¥Õ¥¡Ì¾¤Ç¤¹: %s"
msgid "E157: Invalid sign ID: %<PRId64>"
msgstr "E157: ̵¸ú¤Êsign¼±Ê̻ҤǤ¹: %<PRId64>"
+#, c-format
+msgid "E885: Not possible to change sign %s"
+msgstr "E885: Êѹ¹¤Ç¤­¤Ê¤¤ sign ¤Ç¤¹: %s"
+
#: ../ex_cmds.c:6066
msgid " (not supported)"
msgstr " (È󥵥ݡ¼¥È)"
@@ -1379,6 +1398,13 @@ msgstr "¹Ô %<PRId64>: %s"
msgid "cmd: %s"
msgstr "¥³¥Þ¥ó¥É: %s"
+msgid "frame is zero"
+msgstr "¥Õ¥ì¡¼¥à¤¬ 0 ¤Ç¤¹"
+
+#, c-format
+msgid "frame at highest level: %d"
+msgstr "ºÇ¹â¥ì¥Ù¥ë¤Î¥Õ¥ì¡¼¥à: %d"
+
#: ../ex_cmds2.c:322
#, c-format
msgid "Breakpoint in \"%s%s\" line %<PRId64>"
@@ -1528,7 +1554,8 @@ msgstr "E197: ¸À¸ì¤ò \"%s\" ¤ËÀßÄê¤Ç¤­¤Þ¤»¤ó"
#. don't wait for return
#: ../ex_docmd.c:387
msgid "Entering Ex mode. Type \"visual\" to go to Normal mode."
-msgstr "Ex¥â¡¼¥É¤ËÆþ¤ê¤Þ¤¹. ¥Î¡¼¥Þ¥ë¤ËÌá¤ë¤Ë¤Ï\"visual\"¤ÈÆþÎϤ·¤Æ¤¯¤À¤µ¤¤."
+msgstr ""
+"Ex¥â¡¼¥É¤ËÆþ¤ê¤Þ¤¹. ¥Î¡¼¥Þ¥ë¥â¡¼¥É¤ËÌá¤ë¤Ë¤Ï\"visual\"¤ÈÆþÎϤ·¤Æ¤¯¤À¤µ¤¤."
#: ../ex_docmd.c:428
msgid "E501: At end-of-file"
@@ -1553,7 +1580,7 @@ msgstr "´Ø¿ô¤ÎºÇ¸å¤Ç¤¹"
#: ../ex_docmd.c:1628
msgid "E464: Ambiguous use of user-defined command"
-msgstr "E464: ¥æ¡¼¥¶ÄêµÁ¥³¥Þ¥ó¥É¤Î¤¢¤¤¤Þ¤¤¤Ê»ÈÍѤǤ¹"
+msgstr "E464: ¥æ¡¼¥¶¡¼ÄêµÁ¥³¥Þ¥ó¥É¤Î¤¢¤¤¤Þ¤¤¤Ê»ÈÍѤǤ¹"
#: ../ex_docmd.c:1638
msgid "E492: Not an editor command"
@@ -1606,14 +1633,14 @@ msgstr "E174: ¥³¥Þ¥ó¥É¤¬´û¤Ë¤¢¤ê¤Þ¤¹: ºÆÄêµÁ¤¹¤ë¤Ë¤Ï ! ¤òÄɲ䷤Ƥ¯¤À¤µ¤¤"
#: ../ex_docmd.c:4432
msgid ""
"\n"
-" Name Args Range Complete Definition"
+" Name Args Address Complete Definition"
msgstr ""
"\n"
-" ̾Á° °ú¿ô ÈÏ°Ï Êä´° ÄêµÁ"
+" ̾Á° °ú¿ô ¥¢¥É¥ì¥¹ Êä´° ÄêµÁ"
#: ../ex_docmd.c:4516
msgid "No user-defined commands found"
-msgstr "¥æ¡¼¥¶ÄêµÁ¥³¥Þ¥ó¥É¤¬¸«¤Ä¤«¤ê¤Þ¤»¤ó¤Ç¤·¤¿"
+msgstr "¥æ¡¼¥¶¡¼ÄêµÁ¥³¥Þ¥ó¥É¤¬¸«¤Ä¤«¤ê¤Þ¤»¤ó¤Ç¤·¤¿"
#: ../ex_docmd.c:4538
msgid "E175: No attribute specified"
@@ -1633,7 +1660,10 @@ msgstr "E178: ¥«¥¦¥ó¥È¤Î¾ÊάÃͤ¬Ìµ¸ú¤Ç¤¹"
#: ../ex_docmd.c:4625
msgid "E179: argument required for -complete"
-msgstr "E179: -Êä´°¤Î¤¿¤á¤Î°ú¿ô¤¬É¬ÍפǤ¹"
+msgstr "E179: -complete ¤Ë¤Ï°ú¿ô¤¬É¬ÍפǤ¹"
+
+msgid "E179: argument required for -addr"
+msgstr "E179: -addr ¤Ë¤Ï°ú¿ô¤¬É¬ÍפǤ¹"
#: ../ex_docmd.c:4635
#, c-format
@@ -1650,12 +1680,16 @@ msgstr "E183: ¥æ¡¼¥¶ÄêµÁ¥³¥Þ¥ó¥É¤Ï±ÑÂçʸ»ú¤Ç»Ï¤Þ¤é¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó"
#: ../ex_docmd.c:4696
msgid "E841: Reserved name, cannot be used for user defined command"
-msgstr "E841: ͽÌó̾¤Ê¤Î¤Ç, ¥æ¡¼¥¶ÄêµÁ¥³¥Þ¥ó¥É¤ËÍøÍѤǤ­¤Þ¤»¤ó"
+msgstr "E841: ͽÌó̾¤Ê¤Î¤Ç, ¥æ¡¼¥¶¡¼ÄêµÁ¥³¥Þ¥ó¥É¤ËÍøÍѤǤ­¤Þ¤»¤ó"
#: ../ex_docmd.c:4751
#, c-format
msgid "E184: No such user-defined command: %s"
-msgstr "E184: ¤½¤Î¥æ¡¼¥¶ÄêµÁ¥³¥Þ¥ó¥É¤Ï¤¢¤ê¤Þ¤»¤ó: %s"
+msgstr "E184: ¤½¤Î¥æ¡¼¥¶¡¼ÄêµÁ¥³¥Þ¥ó¥É¤Ï¤¢¤ê¤Þ¤»¤ó: %s"
+
+#, c-format
+msgid "E180: Invalid address type value: %s"
+msgstr "E180: ̵¸ú¤Ê¥¢¥É¥ì¥¹¥¿¥¤¥×ÃͤǤ¹: %s"
#: ../ex_docmd.c:5219
#, c-format
@@ -2019,11 +2053,11 @@ msgstr "ÉÔÀµ¤Ê¥Õ¥¡¥¤¥ë̾"
#: ../fileio.c:395 ../fileio.c:476 ../fileio.c:2543 ../fileio.c:2578
msgid "is a directory"
-msgstr " ¤Ï¥Ç¥£¥ì¥¯¥È¥ê¤Ç¤¹"
+msgstr "¤Ï¥Ç¥£¥ì¥¯¥È¥ê¤Ç¤¹"
#: ../fileio.c:397
msgid "is not a file"
-msgstr " ¤Ï¥Õ¥¡¥¤¥ë¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó"
+msgstr "¤Ï¥Õ¥¡¥¤¥ë¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó"
#: ../fileio.c:508 ../fileio.c:3522
msgid "[New File]"
@@ -2039,7 +2073,7 @@ msgstr "[¥Õ¥¡¥¤¥ë²áÂç]"
#: ../fileio.c:534
msgid "[Permission Denied]"
-msgstr "[ǧ²Ä¤¬¤¢¤ê¤Þ¤»¤ó]"
+msgstr "[¸¢¸Â¤¬¤¢¤ê¤Þ¤»¤ó]"
#: ../fileio.c:653
msgid "E200: *ReadPre autocommands made the file unreadable"
@@ -2210,7 +2244,7 @@ msgstr " ÊÑ´¹¥¨¥é¡¼"
#: ../fileio.c:3509
#, c-format
msgid " in line %<PRId64>;"
-msgstr "¹Ô %<PRId64>;"
+msgstr " ¹Ô %<PRId64>;"
#: ../fileio.c:3519
msgid "[Device]"
@@ -2766,9 +2800,8 @@ msgid "E37: No write since last change (add ! to override)"
msgstr "E37: ºÇ¸å¤ÎÊѹ¹¤¬Êݸ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó (! ¤òÄɲäÇÊѹ¹¤òÇË´þ)"
#: ../globals.h:1055
-#, fuzzy
msgid "E37: No write since last change"
-msgstr "[ºÇ¸å¤ÎÊѹ¹¤¬Êݸ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó]\n"
+msgstr "E37: ºÇ¸å¤ÎÊѹ¹¤¬Êݸ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó"
#: ../globals.h:1056
msgid "E38: Null argument"
@@ -2810,7 +2843,7 @@ msgstr "E42: ¥¨¥é¡¼¤Ï¤¢¤ê¤Þ¤»¤ó"
#: ../globals.h:1067
msgid "E776: No location list"
-msgstr "E776: ¾ì½ê¥ê¥¹¥È¤Ï¤¢¤ê¤Þ¤»¤ó"
+msgstr "E776: ¥í¥±¡¼¥·¥ç¥ó¥ê¥¹¥È¤Ï¤¢¤ê¤Þ¤»¤ó"
#: ../globals.h:1068
msgid "E43: Damaged match string"
@@ -3831,7 +3864,7 @@ msgid ""
"\n"
msgstr ""
"\n"
-"¤½¤ì¤«¤é.swp¥Õ¥¡¥¤¥ë¤òºï½ü¤·¤Æ¤¯¤À¤µ¤¤\n"
+"¸µ¤Î.swp¥Õ¥¡¥¤¥ë¤Ïºï½ü¤·¤Æ¤â¹½¤¤¤Þ¤»¤ó\n"
"\n"
#. use msg() to start the scrolling properly
@@ -3845,7 +3878,7 @@ msgstr " ¸½ºß¤Î¥Ç¥£¥ì¥¯¥È¥ê:\n"
#: ../memline.c:1448
msgid " Using specified name:\n"
-msgstr " ¤¢¤ë̾Á°¤ò»ÈÍÑÃæ:\n"
+msgstr " °Ê²¼¤Î̾Á°¤ò»ÈÍÑÃæ:\n"
#: ../memline.c:1450
msgid " In directory "
@@ -3901,7 +3934,7 @@ msgid ""
" user name: "
msgstr ""
"\n"
-" ¥æ¡¼¥¶Ì¾: "
+" ¥æ¡¼¥¶¡¼Ì¾: "
#: ../memline.c:1568
msgid " host name: "
@@ -4050,12 +4083,12 @@ msgid ""
msgstr ""
"\n"
"(1) ÊÌ¤Î¥×¥í¥°¥é¥à¤¬Æ±¤¸¥Õ¥¡¥¤¥ë¤òÊÔ½¸¤·¤Æ¤¤¤ë¤«¤â¤·¤ì¤Þ¤»¤ó.\n"
-" ¤³¤Î¾ì¹ç¤Ë¤Ï, Êѹ¹¤ò¤·¤¿ºÝ¤ËºÇ½ªÅª¤Ë, Ʊ¤¸¥Õ¥¡¥¤¥ë¤Î°Û¤Ê¤ë\n"
-" 2¤Ä¤Î¥¤¥ó¥¹¥¿¥ó¥¹¤¬¤Ç¤­¤Æ¤·¤Þ¤¦¤³¤È¤ËÃí°Õ¤·¤Æ¤¯¤À¤µ¤¤."
+" ¤³¤Î¾ì¹ç¤Ë¤Ï, Êѹ¹¤ò¤·¤Æ¤·¤Þ¤¦¤È1¤Ä¤Î¥Õ¥¡¥¤¥ë¤ËÂФ·¤Æ°Û¤Ê¤ë2¤Ä¤Î\n"
+" ¥¤¥ó¥¹¥¿¥ó¥¹¤¬¤Ç¤­¤Æ¤·¤Þ¤¦¤Î¤Ç, ¤½¤¦¤·¤Ê¤¤¤è¤¦¤Ëµ¤¤ò¤Ä¤±¤Æ¤¯¤À¤µ¤¤."
#: ../memline.c:3245
msgid " Quit, or continue with caution.\n"
-msgstr " ½ªÎ»¤¹¤ë¤«, Ãí°Õ¤·¤Ê¤¬¤é³¤±¤Æ¤¯¤À¤µ¤¤.\n"
+msgstr " ½ªÎ»¤¹¤ë¤«, Ãí°Õ¤·¤Ê¤¬¤é³¤±¤Æ¤¯¤À¤µ¤¤.\n"
#: ../memline.c:3246
msgid "(2) An edit session for this file crashed.\n"
@@ -4479,6 +4512,11 @@ msgstr ""
msgid "E574: Unknown register type %d"
msgstr "E574: ̤ÃΤΥ쥸¥¹¥¿·¿ %d ¤Ç¤¹"
+msgid ""
+"E883: search pattern and expression register may not contain two or more "
+"lines"
+msgstr "E883: ¸¡º÷¥Ñ¥¿¡¼¥ó¤È¼°¥ì¥¸¥¹¥¿¤Ë¤Ï2¹Ô°Ê¾å¤ò´Þ¤á¤é¤ì¤Þ¤»¤ó"
+
#: ../ops.c:5089
#, c-format
msgid "%<PRId64> Cols; "
@@ -4563,6 +4601,10 @@ msgstr "E522: termcap Æâ¤Ë¸«¤Ä¤«¤ê¤Þ¤»¤ó"
msgid "E539: Illegal character <%s>"
msgstr "E539: ÉÔÀµ¤Êʸ»ú¤Ç¤¹ <%s>"
+#, c-format
+msgid "For option %s"
+msgstr "¥ª¥×¥·¥ç¥ó: %s"
+
#: ../option.c:3862
msgid "E529: Cannot set 'term' to empty string"
msgstr "E529: 'term' ¤Ë¤Ï¶õʸ»úÎó¤òÀßÄê¤Ç¤­¤Þ¤»¤ó"
@@ -4740,6 +4782,14 @@ msgstr ""
"\n"
"¥»¥­¥å¥ê¥Æ¥£¥³¥ó¥Æ¥­¥¹¥È¤òÀßÄê¤Ç¤­¤Þ¤»¤ó "
+#, c-format
+msgid "Could not set security context %s for %s"
+msgstr "¥»¥­¥å¥ê¥Æ¥£¥³¥ó¥Æ¥­¥¹¥È %s ¤ò %s ¤ËÀßÄê¤Ç¤­¤Þ¤»¤ó"
+
+#, c-format
+msgid "Could not get security context %s for %s. Removing it!"
+msgstr "¥»¥­¥å¥ê¥Æ¥£¥³¥ó¥Æ¥­¥¹¥È %s ¤ò %s ¤«¤é¼èÆÀ¤Ç¤­¤Þ¤»¤ó. ºï½ü¤·¤Þ¤¹!"
+
#: ../os_unix.c:1558 ../os_unix.c:1647
#, c-format
msgid "dlerror = \"%s\""
@@ -4960,6 +5010,10 @@ msgstr "E554: %s{...} Æâ¤Ëʸˡ¥¨¥é¡¼¤¬¤¢¤ê¤Þ¤¹"
msgid "External submatches:\n"
msgstr "³°Éô¤ÎÉôʬ³ºÅö:\n"
+#, c-format
+msgid "E888: (NFA regexp) cannot repeat %s"
+msgstr "E888: (NFA Àµµ¬É½¸½) ·«¤êÊÖ¤»¤Þ¤»¤ó %s"
+
#: ../regexp.c:7022
msgid ""
"E864: \\%#= can only be followed by 0, 1, or 2. The automatic engine will be "
@@ -4968,6 +5022,9 @@ msgstr ""
"E864: \\%#= ¤Ë¤Ï 0, 1 ¤â¤·¤¯¤Ï 2 ¤Î¤ß¤¬Â³¤±¤é¤ì¤Þ¤¹¡£Àµµ¬É½¸½¥¨¥ó¥¸¥ó¤Ï¼«Æ°Áª"
"Âò¤µ¤ì¤Þ¤¹¡£"
+msgid "Switching to backtracking RE engine for pattern: "
+msgstr "¼¡¤Î¥Ñ¥¿¡¼¥ó¤Ë¥Ð¥Ã¥¯¥È¥é¥Ã¥­¥ó¥° RE ¥¨¥ó¥¸¥ó¤òŬÍѤ·¤Þ¤¹: "
+
#: ../regexp_nfa.c:239
msgid "E865: (NFA) Regexp end encountered prematurely"
msgstr "E865: (NFA) ´üÂÔ¤è¤êÁ᤯Àµµ¬É½¸½¤Î½ªÃ¼¤ËÅþ㤷¤Þ¤·¤¿"
@@ -4980,7 +5037,7 @@ msgstr "E866: (NFA Àµµ¬É½¸½) °ÌÃÖ¤¬¸í¤Ã¤Æ¤¤¤Þ¤¹: %c"
#: ../regexp_nfa.c:242
#, c-format
msgid "E877: (NFA regexp) Invalid character class: %<PRId64>"
-msgstr ""
+msgstr "E877: (NFA Àµµ¬É½¸½) ̵¸ú¤Êʸ»ú¥¯¥é¥¹: %<PRId64>"
#: ../regexp_nfa.c:1261
#, c-format
@@ -5590,14 +5647,14 @@ msgid "E765: 'spellfile' does not have %<PRId64> entries"
msgstr "E765: 'spellfile' ¤Ë¤Ï %<PRId64> ¸Ä¤Î¥¨¥ó¥È¥ê¤Ï¤¢¤ê¤Þ¤»¤ó"
#: ../spell.c:8074
-#, fuzzy, c-format
+#, c-format
msgid "Word '%.*s' removed from %s"
-msgstr "%s ¤«¤éñ¸ì¤¬ºï½ü¤µ¤ì¤Þ¤·¤¿"
+msgstr "ñ¸ì '%.*s' ¤¬ %s ¤«¤éºï½ü¤µ¤ì¤Þ¤·¤¿"
#: ../spell.c:8117
-#, fuzzy, c-format
+#, c-format
msgid "Word '%.*s' added to %s"
-msgstr "%s ¤Ëñ¸ì¤¬Äɲ䵤ì¤Þ¤·¤¿"
+msgstr "ñ¸ì '%.*s' ¤¬ %s ¤ØÄɲ䵤ì¤Þ¤·¤¿"
#: ../spell.c:8381
msgid "E763: Word characters differ between spell files"
@@ -5673,6 +5730,9 @@ msgstr "¤³¤Î¥Ð¥Ã¥Õ¥¡¤ËÄêµÁ¤µ¤ì¤¿¹½Ê¸Í×ÁǤϤ¢¤ê¤Þ¤»¤ó"
msgid "E390: Illegal argument: %s"
msgstr "E390: ÉÔÀµ¤Ê°ú¿ô¤Ç¤¹: %s"
+msgid "syntax iskeyword "
+msgstr "¥·¥ó¥¿¥Ã¥¯¥¹ÍÑ iskeyword "
+
#: ../syntax.c:3299
#, c-format
msgid "E391: No such syntax cluster: %s"
@@ -5769,6 +5829,10 @@ msgstr "E847: ¹½Ê¸¤Î¼è¤ê¹þ¤ß(include)¤¬Â¿²á¤®¤Þ¤¹"
msgid "E789: Missing ']': %s"
msgstr "E789: ']' ¤¬¤¢¤ê¤Þ¤»¤ó: %s"
+#, c-format
+msgid "E890: trailing char after ']': %s]%s"
+msgstr "E890: ']' ¤Î¸å¤í¤Ë;ʬ¤Êʸ»ú¤¬¤¢¤ê¤Þ¤¹: %s]%s"
+
#: ../syntax.c:4531
#, c-format
msgid "E398: Missing '=': %s"
@@ -5874,7 +5938,7 @@ msgstr "E415: ͽ´ü¤»¤ÌÅù¹æ¤Ç¤¹: %s"
#: ../syntax.c:6395
#, c-format
msgid "E416: missing equal sign: %s"
-msgstr "E416: Åù¹æ¤¬¤¬¤¢¤ê¤Þ¤»¤ó: %s"
+msgstr "E416: Åù¹æ¤¬¤¢¤ê¤Þ¤»¤ó: %s"
#: ../syntax.c:6418
#, c-format
@@ -6078,9 +6142,8 @@ msgstr "Vim: ÆþÎϤòÆÉ¹þ¤ßÃæ¤Î¥¨¥é¡¼¤Ë¤è¤ê½ªÎ»¤·¤Þ¤¹...\n"
#. This happens when the FileChangedRO autocommand changes the
#. * file in a way it becomes shorter.
#: ../undo.c:379
-#, fuzzy
msgid "E881: Line count changed unexpectedly"
-msgstr "E834: ͽ´ü¤»¤º¹Ô¥«¥¦¥ó¥È¤¬ÊѤï¤ê¤Þ¤·¤¿"
+msgstr "E881: ͽ´ü¤»¤º¹Ô¥«¥¦¥ó¥È¤¬ÊѤï¤ê¤Þ¤·¤¿"
#: ../undo.c:627
#, c-format
@@ -6287,23 +6350,23 @@ msgstr " ¥·¥¹¥Æ¥à vimrc: \""
#: ../version.c:672
msgid " user vimrc file: \""
-msgstr " ¥æ¡¼¥¶ vimrc: \""
+msgstr " ¥æ¡¼¥¶¡¼ vimrc: \""
#: ../version.c:677
msgid " 2nd user vimrc file: \""
-msgstr " Âè2¥æ¡¼¥¶ vimrc: \""
+msgstr " Âè2¥æ¡¼¥¶¡¼ vimrc: \""
#: ../version.c:682
msgid " 3rd user vimrc file: \""
-msgstr " Âè3¥æ¡¼¥¶ vimrc: \""
+msgstr " Âè3¥æ¡¼¥¶¡¼ vimrc: \""
#: ../version.c:687
msgid " user exrc file: \""
-msgstr " ¥æ¡¼¥¶ exrc: \""
+msgstr " ¥æ¡¼¥¶¡¼ exrc: \""
#: ../version.c:692
msgid " 2nd user exrc file: \""
-msgstr " Âè2¥æ¡¼¥¶ exrc: \""
+msgstr " Âè2¥æ¡¼¥¶¡¼ exrc: \""
#: ../version.c:699
msgid " fall-back for $VIM: \""
@@ -6379,7 +6442,7 @@ msgstr "Vim¤Î³«È¯¤ò±þ±ç¤·¤Æ¤¯¤À¤µ¤¤!"
#: ../version.c:828
msgid "Become a registered Vim user!"
-msgstr "Vim¤ÎÅÐÏ¿¥æ¡¼¥¶¤Ë¤Ê¤Ã¤Æ¤¯¤À¤µ¤¤!"
+msgstr "Vim¤ÎÅÐÏ¿¥æ¡¼¥¶¡¼¤Ë¤Ê¤Ã¤Æ¤¯¤À¤µ¤¤!"
#: ../version.c:831
msgid "type :help sponsor<Enter> for information "
@@ -6391,7 +6454,7 @@ msgstr "¾ÜºÙ¤Ê¾ðÊó¤Ï :help register<Enter> "
#: ../version.c:834
msgid "menu Help->Sponsor/Register for information "
-msgstr "¾ÜºÙ¤Ï¥á¥Ë¥å¡¼¤Î ¥Ø¥ë¥×¢ª¥¹¥Ý¥ó¥µ¡¼/ÅÐÏ¿ ¤ò»²¾È¤·¤Æ²¼¤µ¤¤ "
+msgstr "¾ÜºÙ¤Ï¥á¥Ë¥å¡¼¤Î ¥Ø¥ë¥×->¥¹¥Ý¥ó¥µ¡¼/ÅÐÏ¿ ¤ò»²¾È¤·¤Æ²¼¤µ¤¤"
#: ../window.c:119
msgid "Already only one window"
@@ -6429,6 +6492,9 @@ msgstr "E445: ¾¤Î¥¦¥£¥ó¥É¥¦¤Ë¤ÏÊѹ¹¤¬¤¢¤ê¤Þ¤¹"
msgid "E446: No file name under cursor"
msgstr "E446: ¥«¡¼¥½¥ë¤Î²¼¤Ë¥Õ¥¡¥¤¥ë̾¤¬¤¢¤ê¤Þ¤»¤ó"
+msgid "List or number required"
+msgstr "¥ê¥¹¥È¤«¿ôÃͤ¬É¬ÍפǤ¹"
+
#~ msgid "E831: bf_key_init() called with empty password"
#~ msgstr "E831: bf_key_init() ¤¬¶õ¥Ñ¥¹¥ï¡¼¥É¤Ç¸Æ¤Ó½Ð¤µ¤ì¤Þ¤·¤¿"
diff --git a/src/nvim/po/ja.po b/src/nvim/po/ja.po
index 6bdfcb426f..8a3fcb8f78 100644
--- a/src/nvim/po/ja.po
+++ b/src/nvim/po/ja.po
@@ -1,11 +1,11 @@
-# Japanese translation for Vim vim:set foldmethod=marker:
+# Japanese translation for Vim
#
# Do ":help uganda" in Vim to read copying and usage conditions.
# Do ":help credits" in Vim to see a list of people who contributed.
#
-# Last Change: 2013 Jul 06
+# Copyright (C) 2001-2016 MURAOKA Taro <koron.kaoriya@gmail.com>,
+# vim-jp (http://vim-jp.org/)
#
-# Copyright (C) 2001-13 MURAOKA Taro <koron.kaoriya@gmail.com>
# THIS FILE IS DISTRIBUTED UNDER THE VIM LICENSE.
#
# Original translations.
@@ -14,10 +14,10 @@ msgid ""
msgstr ""
"Project-Id-Version: Vim 7.4\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2014-05-26 14:21+0200\n"
-"PO-Revision-Date: 2013-07-06 15:00+0900\n"
+"POT-Creation-Date: 2016-02-01 09:02+0900\n"
+"PO-Revision-Date: 2013-06-02-01 09:08+09n"
"Last-Translator: MURAOKA Taro <koron.kaoriya@gmail.com>\n"
-"Language-Team: MURAOKA Taro <koron.kaoriya@gmail.com>\n"
+"Language-Team: vim-jp (https://github.com/vim-jp/lang-ja)\n"
"Language: Japanese\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
@@ -34,7 +34,7 @@ msgstr "内部エラー: 未知ã®ã‚ªãƒ—ション型ã§ã™"
#: ../buffer.c:92
msgid "[Location List]"
-msgstr "[場所リスト]"
+msgstr "[ロケーションリスト]"
#: ../buffer.c:93
msgid "[Quickfix List]"
@@ -277,7 +277,7 @@ msgstr "E810: 一時ファイルã®èª­è¾¼ã‚‚ã—ãã¯æ›¸è¾¼ãŒã§ãã¾ã›ã‚“"
#: ../diff.c:755
msgid "E97: Cannot create diffs"
-msgstr "E97: 差分を作æˆã§ãã¾ã›ã‚“ "
+msgstr "E97: 差分を作æˆã§ãã¾ã›ã‚“"
#: ../diff.c:966
msgid "E816: Cannot read patch output"
@@ -293,7 +293,7 @@ msgstr "E99: ç¾åœ¨ã®ãƒãƒƒãƒ•ã‚¡ã¯å·®åˆ†ãƒ¢ãƒ¼ãƒ‰ã§ã¯ã‚りã¾ã›ã‚“"
#: ../diff.c:2100
msgid "E793: No other buffer in diff mode is modifiable"
-msgstr "E793: 差分モードã§ã‚ã‚‹ä»–ã®ãƒãƒƒãƒ•ã‚¡ã¯å¤‰æ›´å¯èƒ½ã§ã™"
+msgstr "E793: 差分モードã§ã‚ã‚‹ä»–ã®ãƒãƒƒãƒ•ã‚¡ã¯å¤‰æ›´ã§ãã¾ã›ã‚“"
#: ../diff.c:2102
msgid "E100: No other buffer in diff mode"
@@ -349,7 +349,7 @@ msgstr " 行(全体)補完 (^L^N^P)"
#: ../edit.c:86
msgid " File name completion (^F^N^P)"
-msgstr "ファイルå補完 (^F^N^P)"
+msgstr " ファイルå補完 (^F^N^P)"
#: ../edit.c:87
msgid " Tag completion (^]^N^P)"
@@ -377,7 +377,7 @@ msgstr " コマンドライン補完 (^V^N^P)"
#: ../edit.c:94
msgid " User defined completion (^U^N^P)"
-msgstr " ユーザ定義補完 (^U^N^P)"
+msgstr " ユーザー定義補完 (^U^N^P)"
#: ../edit.c:95
msgid " Omni completion (^O^N^P)"
@@ -679,6 +679,10 @@ msgstr "E696: リスト型ã«ã‚«ãƒ³ãƒžãŒã‚りã¾ã›ã‚“: %s"
msgid "E697: Missing end of List ']': %s"
msgstr "E697: ãƒªã‚¹ãƒˆåž‹ã®æœ€å¾Œã« ']' ãŒã‚りã¾ã›ã‚“: %s"
+msgid "Not enough memory to set references, garbage collection aborted!"
+msgstr ""
+"ガーベッジコレクションを中止ã—ã¾ã—ãŸ! å‚照を作æˆã™ã‚‹ã®ã«ãƒ¡ãƒ¢ãƒªãŒä¸è¶³ã—ã¾ã—ãŸ"
+
#: ../eval.c:6475
#, c-format
msgid "E720: Missing colon in Dictionary: %s"
@@ -721,7 +725,7 @@ msgstr "E117: 未知ã®é–¢æ•°ã§ã™: %s"
#: ../eval.c:7383
#, c-format
msgid "E119: Not enough arguments for function: %s"
-msgstr "E119: 関数ã®å¼•æ•°ãŒå°‘ãªéŽãŽã¾ã™: %s"
+msgstr "E119: 関数ã®å¼•æ•°ãŒè¶³ã‚Šã¾ã›ã‚“: %s"
#: ../eval.c:7387
#, c-format
@@ -826,18 +830,16 @@ msgid "sort() argument"
msgstr "sort() ã®å¼•æ•°"
#: ../eval.c:13721
-#, fuzzy
msgid "uniq() argument"
-msgstr "add() ã®å¼•æ•°"
+msgstr "uniq() ã®å¼•æ•°"
#: ../eval.c:13776
msgid "E702: Sort compare function failed"
msgstr "E702: ã‚½ãƒ¼ãƒˆã®æ¯”較関数ãŒå¤±æ•—ã—ã¾ã—ãŸ"
#: ../eval.c:13806
-#, fuzzy
msgid "E882: Uniq compare function failed"
-msgstr "E702: ã‚½ãƒ¼ãƒˆã®æ¯”較関数ãŒå¤±æ•—ã—ã¾ã—ãŸ"
+msgstr "E882: Uniq ã®æ¯”較関数ãŒå¤±æ•—ã—ã¾ã—ãŸ"
#: ../eval.c:14085
msgid "(Invalid)"
@@ -863,6 +865,18 @@ msgstr "E745: リスト型を数値ã¨ã—ã¦æ‰±ã£ã¦ã„ã¾ã™"
msgid "E728: Using a Dictionary as a Number"
msgstr "E728: 辞書型を数値ã¨ã—ã¦æ‰±ã£ã¦ã„ã¾ã™"
+msgid "E891: Using a Funcref as a Float"
+msgstr "E891: 関数å‚ç…§åž‹ã‚’æµ®å‹•å°æ•°ç‚¹æ•°ã¨ã—ã¦æ‰±ã£ã¦ã„ã¾ã™ã€‚"
+
+msgid "E892: Using a String as a Float"
+msgstr "E892: æ–‡å­—åˆ—ã‚’æµ®å‹•å°æ•°ç‚¹æ•°ã¨ã—ã¦æ‰±ã£ã¦ã„ã¾ã™"
+
+msgid "E893: Using a List as a Float"
+msgstr "E893: ãƒªã‚¹ãƒˆåž‹ã‚’æµ®å‹•å°æ•°ç‚¹æ•°ã¨ã—ã¦æ‰±ã£ã¦ã„ã¾ã™"
+
+msgid "E894: Using a Dictionary as a Float"
+msgstr "E894: è¾žæ›¸åž‹ã‚’æµ®å‹•å°æ•°ç‚¹æ•°ã¨ã—ã¦æ‰±ã£ã¦ã„ã¾ã™"
+
#: ../eval.c:16259
msgid "E729: using Funcref as a String"
msgstr "E729: 関数å‚照型を文字列ã¨ã—ã¦æ‰±ã£ã¦ã„ã¾ã™"
@@ -961,14 +975,14 @@ msgid "E129: Function name required"
msgstr "E129: 関数åãŒè¦æ±‚ã•れã¾ã™"
#: ../eval.c:17824
-#, fuzzy, c-format
+#, c-format
msgid "E128: Function name must start with a capital or \"s:\": %s"
-msgstr "E128: 関数åã¯å¤§æ–‡å­—ã§å§‹ã¾ã‚‹ã‹ã‚³ãƒ­ãƒ³ã‚’å«ã¾ãªã‘れã°ãªã‚Šã¾ã›ã‚“: %s"
+msgstr "E128: 関数åã¯å¤§æ–‡å­—ã‹ \"s:\" ã§å§‹ã¾ã‚‰ãªã‘れã°ãªã‚Šã¾ã›ã‚“: %s"
#: ../eval.c:17833
-#, fuzzy, c-format
+#, c-format
msgid "E884: Function name cannot contain a colon: %s"
-msgstr "E128: 関数åã¯å¤§æ–‡å­—ã§å§‹ã¾ã‚‹ã‹ã‚³ãƒ­ãƒ³ã‚’å«ã¾ãªã‘れã°ãªã‚Šã¾ã›ã‚“: %s"
+msgstr "E884: 関数åã«ã¯ã‚³ãƒ­ãƒ³ã¯å«ã‚られã¾ã›ã‚“: %s"
#: ../eval.c:18336
#, c-format
@@ -1081,7 +1095,7 @@ msgstr "E136: viminfo: エラーãŒå¤šéŽãŽã‚‹ã®ã§, 以é™ã¯ã‚¹ã‚­ãƒƒãƒ—ã—ã
#: ../ex_cmds.c:1458
#, c-format
msgid "Reading viminfo file \"%s\"%s%s%s"
-msgstr "viminfoファイル \"%s\"%s%s%s を読込ã¿ä¸­ "
+msgstr "viminfoファイル \"%s\"%s%s%s を読込ã¿ä¸­"
#: ../ex_cmds.c:1460
msgid " info"
@@ -1357,6 +1371,10 @@ msgstr "E158: 無効ãªãƒãƒƒãƒ•ã‚¡åã§ã™: %s"
msgid "E157: Invalid sign ID: %<PRId64>"
msgstr "E157: 無効ãªsign識別å­ã§ã™: %<PRId64>"
+#, c-format
+msgid "E885: Not possible to change sign %s"
+msgstr "E885: 変更ã§ããªã„ sign ã§ã™: %s"
+
#: ../ex_cmds.c:6066
msgid " (not supported)"
msgstr " (éžã‚µãƒãƒ¼ãƒˆ)"
@@ -1379,6 +1397,13 @@ msgstr "行 %<PRId64>: %s"
msgid "cmd: %s"
msgstr "コマンド: %s"
+msgid "frame is zero"
+msgstr "フレーム㌠0 ã§ã™"
+
+#, c-format
+msgid "frame at highest level: %d"
+msgstr "最高レベルã®ãƒ•レーム: %d"
+
#: ../ex_cmds2.c:322
#, c-format
msgid "Breakpoint in \"%s%s\" line %<PRId64>"
@@ -1528,7 +1553,8 @@ msgstr "E197: 言語を \"%s\" ã«è¨­å®šã§ãã¾ã›ã‚“"
#. don't wait for return
#: ../ex_docmd.c:387
msgid "Entering Ex mode. Type \"visual\" to go to Normal mode."
-msgstr "Exモードã«å…¥ã‚Šã¾ã™. ãƒŽãƒ¼ãƒžãƒ«ã«æˆ»ã‚‹ã«ã¯\"visual\"ã¨å…¥åŠ›ã—ã¦ãã ã•ã„."
+msgstr ""
+"Exモードã«å…¥ã‚Šã¾ã™. ãƒŽãƒ¼ãƒžãƒ«ãƒ¢ãƒ¼ãƒ‰ã«æˆ»ã‚‹ã«ã¯\"visual\"ã¨å…¥åŠ›ã—ã¦ãã ã•ã„."
#: ../ex_docmd.c:428
msgid "E501: At end-of-file"
@@ -1553,7 +1579,7 @@ msgstr "é–¢æ•°ã®æœ€å¾Œã§ã™"
#: ../ex_docmd.c:1628
msgid "E464: Ambiguous use of user-defined command"
-msgstr "E464: ユーザ定義コマンドã®ã‚ã„ã¾ã„ãªä½¿ç”¨ã§ã™"
+msgstr "E464: ユーザー定義コマンドã®ã‚ã„ã¾ã„ãªä½¿ç”¨ã§ã™"
#: ../ex_docmd.c:1638
msgid "E492: Not an editor command"
@@ -1606,14 +1632,14 @@ msgstr "E174: ã‚³ãƒžãƒ³ãƒ‰ãŒæ—¢ã«ã‚りã¾ã™: å†å®šç¾©ã™ã‚‹ã«ã¯ ! を追å
#: ../ex_docmd.c:4432
msgid ""
"\n"
-" Name Args Range Complete Definition"
+" Name Args Address Complete Definition"
msgstr ""
"\n"
-" åå‰ å¼•æ•° 範囲 補完 定義"
+" åå‰ å¼•æ•° アドレス 補完 定義"
#: ../ex_docmd.c:4516
msgid "No user-defined commands found"
-msgstr "ユーザ定義コマンドãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
+msgstr "ユーザー定義コマンドãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
#: ../ex_docmd.c:4538
msgid "E175: No attribute specified"
@@ -1633,7 +1659,10 @@ msgstr "E178: カウントã®çœç•¥å€¤ãŒç„¡åйã§ã™"
#: ../ex_docmd.c:4625
msgid "E179: argument required for -complete"
-msgstr "E179: -補完ã®ãŸã‚ã®å¼•æ•°ãŒå¿…è¦ã§ã™"
+msgstr "E179: -complete ã«ã¯å¼•æ•°ãŒå¿…è¦ã§ã™"
+
+msgid "E179: argument required for -addr"
+msgstr "E179: -addr ã«ã¯å¼•æ•°ãŒå¿…è¦ã§ã™"
#: ../ex_docmd.c:4635
#, c-format
@@ -1646,16 +1675,20 @@ msgstr "E182: 無効ãªã‚³ãƒžãƒ³ãƒ‰åã§ã™"
#: ../ex_docmd.c:4691
msgid "E183: User defined commands must start with an uppercase letter"
-msgstr "E183: ユーザ定義コマンドã¯è‹±å¤§æ–‡å­—ã§å§‹ã¾ã‚‰ãªã‘れã°ãªã‚Šã¾ã›ã‚“"
+msgstr "E183: ユーザー定義コマンドã¯è‹±å¤§æ–‡å­—ã§å§‹ã¾ã‚‰ãªã‘れã°ãªã‚Šã¾ã›ã‚“"
#: ../ex_docmd.c:4696
msgid "E841: Reserved name, cannot be used for user defined command"
-msgstr "E841: 予約åãªã®ã§, ユーザ定義コマンドã«åˆ©ç”¨ã§ãã¾ã›ã‚“"
+msgstr "E841: 予約åãªã®ã§, ユーザー定義コマンドã«åˆ©ç”¨ã§ãã¾ã›ã‚“"
#: ../ex_docmd.c:4751
#, c-format
msgid "E184: No such user-defined command: %s"
-msgstr "E184: ãã®ãƒ¦ãƒ¼ã‚¶å®šç¾©ã‚³ãƒžãƒ³ãƒ‰ã¯ã‚りã¾ã›ã‚“: %s"
+msgstr "E184: ãã®ãƒ¦ãƒ¼ã‚¶ãƒ¼å®šç¾©ã‚³ãƒžãƒ³ãƒ‰ã¯ã‚りã¾ã›ã‚“: %s"
+
+#, c-format
+msgid "E180: Invalid address type value: %s"
+msgstr "E180: 無効ãªã‚¢ãƒ‰ãƒ¬ã‚¹ã‚¿ã‚¤ãƒ—値ã§ã™: %s"
#: ../ex_docmd.c:5219
#, c-format
@@ -2019,11 +2052,11 @@ msgstr "䏿­£ãªãƒ•ァイルå"
#: ../fileio.c:395 ../fileio.c:476 ../fileio.c:2543 ../fileio.c:2578
msgid "is a directory"
-msgstr " ã¯ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã§ã™"
+msgstr "ã¯ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã§ã™"
#: ../fileio.c:397
msgid "is not a file"
-msgstr " ã¯ãƒ•ァイルã§ã¯ã‚りã¾ã›ã‚“"
+msgstr "ã¯ãƒ•ァイルã§ã¯ã‚りã¾ã›ã‚“"
#: ../fileio.c:508 ../fileio.c:3522
msgid "[New File]"
@@ -2039,7 +2072,7 @@ msgstr "[ファイルéŽå¤§]"
#: ../fileio.c:534
msgid "[Permission Denied]"
-msgstr "[èªå¯ãŒã‚りã¾ã›ã‚“]"
+msgstr "[権é™ãŒã‚りã¾ã›ã‚“]"
#: ../fileio.c:653
msgid "E200: *ReadPre autocommands made the file unreadable"
@@ -2210,7 +2243,7 @@ msgstr " 変æ›ã‚¨ãƒ©ãƒ¼"
#: ../fileio.c:3509
#, c-format
msgid " in line %<PRId64>;"
-msgstr "行 %<PRId64>;"
+msgstr " 行 %<PRId64>;"
#: ../fileio.c:3519
msgid "[Device]"
@@ -2766,9 +2799,8 @@ msgid "E37: No write since last change (add ! to override)"
msgstr "E37: 最後ã®å¤‰æ›´ãŒä¿å­˜ã•れã¦ã„ã¾ã›ã‚“ (! を追加ã§å¤‰æ›´ã‚’破棄)"
#: ../globals.h:1055
-#, fuzzy
msgid "E37: No write since last change"
-msgstr "[最後ã®å¤‰æ›´ãŒä¿å­˜ã•れã¦ã„ã¾ã›ã‚“]\n"
+msgstr "E37: 最後ã®å¤‰æ›´ãŒä¿å­˜ã•れã¦ã„ã¾ã›ã‚“"
#: ../globals.h:1056
msgid "E38: Null argument"
@@ -2810,7 +2842,7 @@ msgstr "E42: エラーã¯ã‚りã¾ã›ã‚“"
#: ../globals.h:1067
msgid "E776: No location list"
-msgstr "E776: 場所リストã¯ã‚りã¾ã›ã‚“"
+msgstr "E776: ロケーションリストã¯ã‚りã¾ã›ã‚“"
#: ../globals.h:1068
msgid "E43: Damaged match string"
@@ -3831,7 +3863,7 @@ msgid ""
"\n"
msgstr ""
"\n"
-"ãれã‹ã‚‰.swpファイルを削除ã—ã¦ãã ã•ã„\n"
+"å…ƒã®.swpファイルã¯å‰Šé™¤ã—ã¦ã‚‚æ§‹ã„ã¾ã›ã‚“\n"
"\n"
#. use msg() to start the scrolling properly
@@ -3845,7 +3877,7 @@ msgstr " ç¾åœ¨ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª:\n"
#: ../memline.c:1448
msgid " Using specified name:\n"
-msgstr " ã‚ã‚‹åå‰ã‚’使用中:\n"
+msgstr " 以下ã®åå‰ã‚’使用中:\n"
#: ../memline.c:1450
msgid " In directory "
@@ -3901,7 +3933,7 @@ msgid ""
" user name: "
msgstr ""
"\n"
-" ユーザå: "
+" ユーザーå: "
#: ../memline.c:1568
msgid " host name: "
@@ -4050,12 +4082,12 @@ msgid ""
msgstr ""
"\n"
"(1) 別ã®ãƒ—ログラムãŒåŒã˜ãƒ•ァイルを編集ã—ã¦ã„ã‚‹ã‹ã‚‚ã—れã¾ã›ã‚“.\n"
-" ã“ã®å ´åˆã«ã¯, 変更をã—ãŸéš›ã«æœ€çµ‚çš„ã«, åŒã˜ãƒ•ァイルã®ç•°ãªã‚‹\n"
-" 2ã¤ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ãŒã§ãã¦ã—ã¾ã†ã“ã¨ã«æ³¨æ„ã—ã¦ãã ã•ã„."
+" ã“ã®å ´åˆã«ã¯, 変更をã—ã¦ã—ã¾ã†ã¨1ã¤ã®ãƒ•ァイルã«å¯¾ã—ã¦ç•°ãªã‚‹2ã¤ã®\n"
+" インスタンスãŒã§ãã¦ã—ã¾ã†ã®ã§, ãã†ã—ãªã„よã†ã«æ°—ã‚’ã¤ã‘ã¦ãã ã•ã„."
#: ../memline.c:3245
msgid " Quit, or continue with caution.\n"
-msgstr " 終了ã™ã‚‹ã‹, 注æ„ã—ãªãŒã‚‰ç¶šã‘ã¦ãã ã•ã„.\n"
+msgstr " 終了ã™ã‚‹ã‹, 注æ„ã—ãªãŒã‚‰ç¶šã‘ã¦ãã ã•ã„.\n"
#: ../memline.c:3246
msgid "(2) An edit session for this file crashed.\n"
@@ -4479,6 +4511,11 @@ msgstr ""
msgid "E574: Unknown register type %d"
msgstr "E574: 未知ã®ãƒ¬ã‚¸ã‚¹ã‚¿åž‹ %d ã§ã™"
+msgid ""
+"E883: search pattern and expression register may not contain two or more "
+"lines"
+msgstr "E883: 検索パターンã¨å¼ãƒ¬ã‚¸ã‚¹ã‚¿ã«ã¯2行以上をå«ã‚られã¾ã›ã‚“"
+
#: ../ops.c:5089
#, c-format
msgid "%<PRId64> Cols; "
@@ -4563,6 +4600,10 @@ msgstr "E522: termcap 内ã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
msgid "E539: Illegal character <%s>"
msgstr "E539: 䏿­£ãªæ–‡å­—ã§ã™ <%s>"
+#, c-format
+msgid "For option %s"
+msgstr "オプション: %s"
+
#: ../option.c:3862
msgid "E529: Cannot set 'term' to empty string"
msgstr "E529: 'term' ã«ã¯ç©ºæ–‡å­—列を設定ã§ãã¾ã›ã‚“"
@@ -4740,6 +4781,14 @@ msgstr ""
"\n"
"セキュリティコンテキストを設定ã§ãã¾ã›ã‚“ "
+#, c-format
+msgid "Could not set security context %s for %s"
+msgstr "セキュリティコンテキスト %s ã‚’ %s ã«è¨­å®šã§ãã¾ã›ã‚“"
+
+#, c-format
+msgid "Could not get security context %s for %s. Removing it!"
+msgstr "セキュリティコンテキスト %s ã‚’ %s ã‹ã‚‰å–å¾—ã§ãã¾ã›ã‚“. 削除ã—ã¾ã™!"
+
#: ../os_unix.c:1558 ../os_unix.c:1647
#, c-format
msgid "dlerror = \"%s\""
@@ -4960,6 +5009,10 @@ msgstr "E554: %s{...} å†…ã«æ–‡æ³•エラーãŒã‚りã¾ã™"
msgid "External submatches:\n"
msgstr "外部ã®éƒ¨åˆ†è©²å½“:\n"
+#, c-format
+msgid "E888: (NFA regexp) cannot repeat %s"
+msgstr "E888: (NFA æ­£è¦è¡¨ç¾) 繰り返ã›ã¾ã›ã‚“ %s"
+
#: ../regexp.c:7022
msgid ""
"E864: \\%#= can only be followed by 0, 1, or 2. The automatic engine will be "
@@ -4968,6 +5021,9 @@ msgstr ""
"E864: \\%#= ã«ã¯ 0, 1 ã‚‚ã—ã㯠2 ã®ã¿ãŒç¶šã‘られã¾ã™ã€‚æ­£è¦è¡¨ç¾ã‚¨ãƒ³ã‚¸ãƒ³ã¯è‡ªå‹•é¸"
"択ã•れã¾ã™ã€‚"
+msgid "Switching to backtracking RE engine for pattern: "
+msgstr "次ã®ãƒ‘ターンã«ãƒãƒƒã‚¯ãƒˆãƒ©ãƒƒã‚­ãƒ³ã‚° RE エンジンをé©ç”¨ã—ã¾ã™: "
+
#: ../regexp_nfa.c:239
msgid "E865: (NFA) Regexp end encountered prematurely"
msgstr "E865: (NFA) æœŸå¾…ã‚ˆã‚Šæ—©ãæ­£è¦è¡¨ç¾ã®çµ‚端ã«åˆ°é”ã—ã¾ã—ãŸ"
@@ -4980,7 +5036,7 @@ msgstr "E866: (NFA æ­£è¦è¡¨ç¾) ä½ç½®ãŒèª¤ã£ã¦ã„ã¾ã™: %c"
#: ../regexp_nfa.c:242
#, c-format
msgid "E877: (NFA regexp) Invalid character class: %<PRId64>"
-msgstr ""
+msgstr "E877: (NFA æ­£è¦è¡¨ç¾) ç„¡åŠ¹ãªæ–‡å­—クラス: %<PRId64>"
#: ../regexp_nfa.c:1261
#, c-format
@@ -5590,12 +5646,12 @@ msgid "E765: 'spellfile' does not have %<PRId64> entries"
msgstr "E765: 'spellfile' ã«ã¯ %<PRId64> 個ã®ã‚¨ãƒ³ãƒˆãƒªã¯ã‚りã¾ã›ã‚“"
#: ../spell.c:8074
-#, fuzzy, c-format
+#, c-format
msgid "Word '%.*s' removed from %s"
-msgstr "%s ã‹ã‚‰å˜èªžãŒå‰Šé™¤ã•れã¾ã—ãŸ"
+msgstr "å˜èªž '%.*s' ㌠%s ã‹ã‚‰å‰Šé™¤ã•れã¾ã—ãŸ"
#: ../spell.c:8117
-#, fuzzy, c-format
+#, c-format
msgid "Word '%.*s' added to %s"
msgstr "%s ã«å˜èªžãŒè¿½åŠ ã•れã¾ã—ãŸ"
@@ -5673,6 +5729,9 @@ msgstr "ã“ã®ãƒãƒƒãƒ•ã‚¡ã«å®šç¾©ã•ã‚ŒãŸæ§‹æ–‡è¦ç´ ã¯ã‚りã¾ã›ã‚“"
msgid "E390: Illegal argument: %s"
msgstr "E390: 䏿­£ãªå¼•æ•°ã§ã™: %s"
+msgid "syntax iskeyword "
+msgstr "シンタックス用 iskeyword "
+
#: ../syntax.c:3299
#, c-format
msgid "E391: No such syntax cluster: %s"
@@ -5769,6 +5828,10 @@ msgstr "E847: æ§‹æ–‡ã®å–り込ã¿(include)ãŒå¤šéŽãŽã¾ã™"
msgid "E789: Missing ']': %s"
msgstr "E789: ']' ãŒã‚りã¾ã›ã‚“: %s"
+#, c-format
+msgid "E890: trailing char after ']': %s]%s"
+msgstr "E890: ']' ã®å¾Œã‚ã«ä½™åˆ†ãªæ–‡å­—ãŒã‚りã¾ã™: %s]%s"
+
#: ../syntax.c:4531
#, c-format
msgid "E398: Missing '=': %s"
@@ -5874,7 +5937,7 @@ msgstr "E415: 予期ã›ã¬ç­‰å·ã§ã™: %s"
#: ../syntax.c:6395
#, c-format
msgid "E416: missing equal sign: %s"
-msgstr "E416: ç­‰å·ãŒãŒã‚りã¾ã›ã‚“: %s"
+msgstr "E416: ç­‰å·ãŒã‚りã¾ã›ã‚“: %s"
#: ../syntax.c:6418
#, c-format
@@ -6078,9 +6141,8 @@ msgstr "Vim: 入力を読込ã¿ä¸­ã®ã‚¨ãƒ©ãƒ¼ã«ã‚ˆã‚Šçµ‚了ã—ã¾ã™...\n"
#. This happens when the FileChangedRO autocommand changes the
#. * file in a way it becomes shorter.
#: ../undo.c:379
-#, fuzzy
msgid "E881: Line count changed unexpectedly"
-msgstr "E834: 予期ã›ãšè¡Œã‚«ã‚¦ãƒ³ãƒˆãŒå¤‰ã‚りã¾ã—ãŸ"
+msgstr "E881: 予期ã›ãšè¡Œã‚«ã‚¦ãƒ³ãƒˆãŒå¤‰ã‚りã¾ã—ãŸ"
#: ../undo.c:627
#, c-format
@@ -6287,23 +6349,23 @@ msgstr " システム vimrc: \""
#: ../version.c:672
msgid " user vimrc file: \""
-msgstr " ユーザ vimrc: \""
+msgstr " ユーザー vimrc: \""
#: ../version.c:677
msgid " 2nd user vimrc file: \""
-msgstr " 第2ユーザ vimrc: \""
+msgstr " 第2ユーザー vimrc: \""
#: ../version.c:682
msgid " 3rd user vimrc file: \""
-msgstr " 第3ユーザ vimrc: \""
+msgstr " 第3ユーザー vimrc: \""
#: ../version.c:687
msgid " user exrc file: \""
-msgstr " ユーザ exrc: \""
+msgstr " ユーザー exrc: \""
#: ../version.c:692
msgid " 2nd user exrc file: \""
-msgstr " 第2ユーザ exrc: \""
+msgstr " 第2ユーザー exrc: \""
#: ../version.c:699
msgid " fall-back for $VIM: \""
@@ -6379,7 +6441,7 @@ msgstr "Vimã®é–‹ç™ºã‚’応æ´ã—ã¦ãã ã•ã„!"
#: ../version.c:828
msgid "Become a registered Vim user!"
-msgstr "Vimã®ç™»éŒ²ãƒ¦ãƒ¼ã‚¶ã«ãªã£ã¦ãã ã•ã„!"
+msgstr "Vimã®ç™»éŒ²ãƒ¦ãƒ¼ã‚¶ãƒ¼ã«ãªã£ã¦ãã ã•ã„!"
#: ../version.c:831
msgid "type :help sponsor<Enter> for information "
@@ -6391,7 +6453,7 @@ msgstr "è©³ç´°ãªæƒ…報㯠:help register<Enter> "
#: ../version.c:834
msgid "menu Help->Sponsor/Register for information "
-msgstr "詳細ã¯ãƒ¡ãƒ‹ãƒ¥ãƒ¼ã® ヘルプ→スãƒãƒ³ã‚µãƒ¼/登録 ã‚’å‚ç…§ã—ã¦ä¸‹ã•ã„ "
+msgstr "詳細ã¯ãƒ¡ãƒ‹ãƒ¥ãƒ¼ã® ヘルプ->スãƒãƒ³ã‚µãƒ¼/登録 ã‚’å‚ç…§ã—ã¦ä¸‹ã•ã„"
#: ../window.c:119
msgid "Already only one window"
@@ -6429,6 +6491,9 @@ msgstr "E445: ä»–ã®ã‚¦ã‚£ãƒ³ãƒ‰ã‚¦ã«ã¯å¤‰æ›´ãŒã‚りã¾ã™"
msgid "E446: No file name under cursor"
msgstr "E446: カーソルã®ä¸‹ã«ãƒ•ァイルåãŒã‚りã¾ã›ã‚“"
+msgid "List or number required"
+msgstr "ãƒªã‚¹ãƒˆã‹æ•°å€¤ãŒå¿…è¦ã§ã™"
+
#~ msgid "E831: bf_key_init() called with empty password"
#~ msgstr "E831: bf_key_init() ãŒç©ºãƒ‘スワードã§å‘¼ã³å‡ºã•れã¾ã—ãŸ"
diff --git a/src/nvim/po/ja.sjis.po b/src/nvim/po/ja.sjis.po
index 7dac89e172..16a5d2ce36 100644
--- a/src/nvim/po/ja.sjis.po
+++ b/src/nvim/po/ja.sjis.po
@@ -1,11 +1,11 @@
-# Japanese translation for Vim vim:set foldmethod=marker:
+# Japanese translation for Vim
#
# Do ":help uganda" in Vim to read copying and usage conditions.
# Do ":help credits" in Vim to see a list of people who contributed.
#
-# Last Change: 2013 Jul 06
+# Copyright (C) 2001-2016 MURAOKA Taro <koron.kaoriya@gmail.com>,
+# vim-jp (http://vim-jp.org/)
#
-# Copyright (C) 2001-13 MURAOKA Taro <koron.kaoriya@gmail.com>
# THIS FILE IS DISTRIBUTED UNDER THE VIM LICENSE.
#
# Original translations.
@@ -14,10 +14,10 @@ msgid ""
msgstr ""
"Project-Id-Version: Vim 7.4\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2014-05-26 14:21+0200\n"
-"PO-Revision-Date: 2013-07-06 15:00+0900\n"
+"POT-Creation-Date: 2016-02-01 09:02+0900\n"
+"PO-Revision-Date: 2016-02-01 09:08+0900\n"
"Last-Translator: MURAOKA Taro <koron.kaoriya@gmail.com>\n"
-"Language-Team: MURAOKA Taro <koron.kaoriya@gmail.com>\n"
+"Language-Team: vim-jpj (https://github.com/vim-jp/lang-ja)\n"
"Language: Japanese\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=cp932\n"
@@ -34,7 +34,7 @@ msgstr "“à•”ƒGƒ‰[: –¢’m‚̃IƒvƒVƒ‡ƒ“Œ^‚Å‚·"
#: ../buffer.c:92
msgid "[Location List]"
-msgstr "[êŠƒŠƒXƒg]"
+msgstr "[ƒƒP[ƒVƒ‡ƒ“ƒŠƒXƒg]"
#: ../buffer.c:93
msgid "[Quickfix List]"
@@ -277,7 +277,7 @@ msgstr "E810: ˆêŽžƒtƒ@ƒCƒ‹‚̓Ǟ‚à‚µ‚­‚Í‘ž‚ª‚Å‚«‚Ü‚¹‚ñ"
#: ../diff.c:755
msgid "E97: Cannot create diffs"
-msgstr "E97: ·•ª‚ð쬂ł«‚Ü‚¹‚ñ "
+msgstr "E97: ·•ª‚ð쬂ł«‚Ü‚¹‚ñ"
#: ../diff.c:966
msgid "E816: Cannot read patch output"
@@ -293,7 +293,7 @@ msgstr "E99: Œ»Ý‚̃oƒbƒtƒ@‚Í·•ªƒ‚[ƒh‚ł͂ ‚è‚Ü‚¹‚ñ"
#: ../diff.c:2100
msgid "E793: No other buffer in diff mode is modifiable"
-msgstr "E793: ·•ªƒ‚[ƒh‚Å‚ ‚鑼‚̃oƒbƒtƒ@‚Í•ÏX‰Â”\\‚Å‚·"
+msgstr "E793: ·•ªƒ‚[ƒh‚Å‚ ‚鑼‚̃oƒbƒtƒ@‚Í•ÏX‚Å‚«‚Ü‚¹‚ñ"
#: ../diff.c:2102
msgid "E100: No other buffer in diff mode"
@@ -349,7 +349,7 @@ msgstr " s(‘S‘Ì)•⊮ (^L^N^P)"
#: ../edit.c:86
msgid " File name completion (^F^N^P)"
-msgstr "ƒtƒ@ƒCƒ‹–¼•⊮ (^F^N^P)"
+msgstr " ƒtƒ@ƒCƒ‹–¼•⊮ (^F^N^P)"
#: ../edit.c:87
msgid " Tag completion (^]^N^P)"
@@ -377,7 +377,7 @@ msgstr " ƒRƒ}ƒ“ƒhƒ‰ƒCƒ“•⊮ (^V^N^P)"
#: ../edit.c:94
msgid " User defined completion (^U^N^P)"
-msgstr " ƒ†[ƒU’è‹`•⊮ (^U^N^P)"
+msgstr " ƒ†[ƒU[’è‹`•⊮ (^U^N^P)"
#: ../edit.c:95
msgid " Omni completion (^O^N^P)"
@@ -679,6 +679,10 @@ msgstr "E696: ƒŠƒXƒgŒ^‚ɃJƒ“ƒ}‚ª‚ ‚è‚Ü‚¹‚ñ: %s"
msgid "E697: Missing end of List ']': %s"
msgstr "E697: ƒŠƒXƒgŒ^‚ÌÅŒã‚É ']' ‚ª‚ ‚è‚Ü‚¹‚ñ: %s"
+msgid "Not enough memory to set references, garbage collection aborted!"
+msgstr ""
+"ƒK[ƒxƒbƒWƒRƒŒƒNƒVƒ‡ƒ“‚𒆎~‚µ‚Ü‚µ‚½! ŽQÆ‚ð쬂·‚é‚̂Ƀƒ‚ƒŠ‚ª•s‘«‚µ‚Ü‚µ‚½"
+
#: ../eval.c:6475
#, c-format
msgid "E720: Missing colon in Dictionary: %s"
@@ -721,7 +725,7 @@ msgstr "E117: –¢’m‚ÌŠÖ”‚Å‚·: %s"
#: ../eval.c:7383
#, c-format
msgid "E119: Not enough arguments for function: %s"
-msgstr "E119: ŠÖ”‚̈ø”‚ª­‚ȉ߂¬‚Ü‚·: %s"
+msgstr "E119: ŠÖ”‚̈ø”‚ª‘«‚è‚Ü‚¹‚ñ: %s"
#: ../eval.c:7387
#, c-format
@@ -826,18 +830,16 @@ msgid "sort() argument"
msgstr "sort() ‚̈ø”"
#: ../eval.c:13721
-#, fuzzy
msgid "uniq() argument"
-msgstr "add() ‚̈ø”"
+msgstr "uniq() ‚̈ø”"
#: ../eval.c:13776
msgid "E702: Sort compare function failed"
msgstr "E702: ƒ\\[ƒg‚Ì”äŠrŠÖ”‚ªŽ¸”s‚µ‚Ü‚µ‚½"
#: ../eval.c:13806
-#, fuzzy
msgid "E882: Uniq compare function failed"
-msgstr "E702: ƒ\\[ƒg‚Ì”äŠrŠÖ”‚ªŽ¸”s‚µ‚Ü‚µ‚½"
+msgstr "E882: Uniq ‚Ì”äŠrŠÖ”‚ªŽ¸”s‚µ‚Ü‚µ‚½"
#: ../eval.c:14085
msgid "(Invalid)"
@@ -863,6 +865,18 @@ msgstr "E745: ƒŠƒXƒgŒ^‚ð”’l‚Æ‚µ‚Ĉµ‚Á‚Ä‚¢‚Ü‚·"
msgid "E728: Using a Dictionary as a Number"
msgstr "E728: Ž«‘Œ^‚ð”’l‚Æ‚µ‚Ĉµ‚Á‚Ä‚¢‚Ü‚·"
+msgid "E891: Using a Funcref as a Float"
+msgstr "E891: ŠÖ”ŽQÆŒ^‚ð•‚“®¬”“_”‚Æ‚µ‚Ĉµ‚Á‚Ä‚¢‚Ü‚·B"
+
+msgid "E892: Using a String as a Float"
+msgstr "E892: •¶Žš—ñ‚ð•‚“®¬”“_”‚Æ‚µ‚Ĉµ‚Á‚Ä‚¢‚Ü‚·"
+
+msgid "E893: Using a List as a Float"
+msgstr "E893: ƒŠƒXƒgŒ^‚ð•‚“®¬”“_”‚Æ‚µ‚Ĉµ‚Á‚Ä‚¢‚Ü‚·"
+
+msgid "E894: Using a Dictionary as a Float"
+msgstr "E894: Ž«‘Œ^‚ð•‚“®¬”“_”‚Æ‚µ‚Ĉµ‚Á‚Ä‚¢‚Ü‚·"
+
#: ../eval.c:16259
msgid "E729: using Funcref as a String"
msgstr "E729: ŠÖ”ŽQÆŒ^‚ð•¶Žš—ñ‚Æ‚µ‚Ĉµ‚Á‚Ä‚¢‚Ü‚·"
@@ -961,14 +975,14 @@ msgid "E129: Function name required"
msgstr "E129: ŠÖ”–¼‚ª—v‹‚³‚ê‚Ü‚·"
#: ../eval.c:17824
-#, fuzzy, c-format
+#, c-format
msgid "E128: Function name must start with a capital or \"s:\": %s"
-msgstr "E128: ŠÖ”–¼‚Í‘å•¶Žš‚ÅŽn‚܂邩ƒRƒƒ“‚ðŠÜ‚܂Ȃ¯‚ê‚΂Ȃè‚Ü‚¹‚ñ: %s"
+msgstr "E128: ŠÖ”–¼‚Í‘å•¶Žš‚© \"s:\" ‚ÅŽn‚Ü‚ç‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ: %s"
#: ../eval.c:17833
-#, fuzzy, c-format
+#, c-format
msgid "E884: Function name cannot contain a colon: %s"
-msgstr "E128: ŠÖ”–¼‚Í‘å•¶Žš‚ÅŽn‚܂邩ƒRƒƒ“‚ðŠÜ‚܂Ȃ¯‚ê‚΂Ȃè‚Ü‚¹‚ñ: %s"
+msgstr "E884: ŠÖ”–¼‚ɂ̓Rƒƒ“‚͊܂߂ç‚ê‚Ü‚¹‚ñ: %s"
#: ../eval.c:18336
#, c-format
@@ -1081,7 +1095,7 @@ msgstr "E136: viminfo: ƒGƒ‰[‚ª‘½‰ß‚¬‚é‚Ì‚Å, ˆÈ~‚̓XƒLƒbƒv‚µ‚Ü‚·"
#: ../ex_cmds.c:1458
#, c-format
msgid "Reading viminfo file \"%s\"%s%s%s"
-msgstr "viminfoƒtƒ@ƒCƒ‹ \"%s\"%s%s%s ‚ð“Çž‚Ý’† "
+msgstr "viminfoƒtƒ@ƒCƒ‹ \"%s\"%s%s%s ‚ð“Çž‚Ý’†"
#: ../ex_cmds.c:1460
msgid " info"
@@ -1357,6 +1371,10 @@ msgstr "E158: –³Œø‚ȃoƒbƒtƒ@–¼‚Å‚·: %s"
msgid "E157: Invalid sign ID: %<PRId64>"
msgstr "E157: –³Œø‚Èsignޝ•ÊŽq‚Å‚·: %<PRId64>"
+#, c-format
+msgid "E885: Not possible to change sign %s"
+msgstr "E885: •ÏX‚Å‚«‚È‚¢ sign ‚Å‚·: %s"
+
#: ../ex_cmds.c:6066
msgid " (not supported)"
msgstr " (”ñƒTƒ|[ƒg)"
@@ -1379,6 +1397,13 @@ msgstr "s %<PRId64>: %s"
msgid "cmd: %s"
msgstr "ƒRƒ}ƒ“ƒh: %s"
+msgid "frame is zero"
+msgstr "ƒtƒŒ[ƒ€‚ª 0 ‚Å‚·"
+
+#, c-format
+msgid "frame at highest level: %d"
+msgstr "Å‚ƒŒƒxƒ‹‚̃tƒŒ[ƒ€: %d"
+
#: ../ex_cmds2.c:322
#, c-format
msgid "Breakpoint in \"%s%s\" line %<PRId64>"
@@ -1528,7 +1553,8 @@ msgstr "E197: Œ¾Œê‚ð \"%s\" ‚ÉÝ’è‚Å‚«‚Ü‚¹‚ñ"
#. don't wait for return
#: ../ex_docmd.c:387
msgid "Entering Ex mode. Type \"visual\" to go to Normal mode."
-msgstr "Exƒ‚[ƒh‚É“ü‚è‚Ü‚·. ƒm[ƒ}ƒ‹‚É–ß‚é‚É‚Í\"visual\"‚Æ“ü—Í‚µ‚Ä‚­‚¾‚³‚¢."
+msgstr ""
+"Exƒ‚[ƒh‚É“ü‚è‚Ü‚·. ƒm[ƒ}ƒ‹ƒ‚[ƒh‚É–ß‚é‚É‚Í\"visual\"‚Æ“ü—Í‚µ‚Ä‚­‚¾‚³‚¢."
#: ../ex_docmd.c:428
msgid "E501: At end-of-file"
@@ -1553,7 +1579,7 @@ msgstr "ŠÖ”‚ÌÅŒã‚Å‚·"
#: ../ex_docmd.c:1628
msgid "E464: Ambiguous use of user-defined command"
-msgstr "E464: ƒ†[ƒU’è‹`ƒRƒ}ƒ“ƒh‚Ì‚ ‚¢‚Ü‚¢‚ÈŽg—p‚Å‚·"
+msgstr "E464: ƒ†[ƒU[’è‹`ƒRƒ}ƒ“ƒh‚Ì‚ ‚¢‚Ü‚¢‚ÈŽg—p‚Å‚·"
#: ../ex_docmd.c:1638
msgid "E492: Not an editor command"
@@ -1606,14 +1632,14 @@ msgstr "E174: ƒRƒ}ƒ“ƒh‚ªŠù‚É‚ ‚è‚Ü‚·: Ä’è‹`‚·‚é‚É‚Í ! ‚ð’ljÁ‚µ‚Ä‚­‚¾‚³‚¢"
#: ../ex_docmd.c:4432
msgid ""
"\n"
-" Name Args Range Complete Definition"
+" Name Args Address Complete Definition"
msgstr ""
"\n"
-" –¼‘O ˆø” ”ÍˆÍ •⊮ ’è‹`"
+" –¼‘O ˆø” ƒAƒhƒŒƒX •⊮ ’è‹`"
#: ../ex_docmd.c:4516
msgid "No user-defined commands found"
-msgstr "ƒ†[ƒU’è‹`ƒRƒ}ƒ“ƒh‚ªŒ©‚‚©‚è‚Ü‚¹‚ñ‚Å‚µ‚½"
+msgstr "ƒ†[ƒU[’è‹`ƒRƒ}ƒ“ƒh‚ªŒ©‚‚©‚è‚Ü‚¹‚ñ‚Å‚µ‚½"
#: ../ex_docmd.c:4538
msgid "E175: No attribute specified"
@@ -1633,7 +1659,10 @@ msgstr "E178: ƒJƒEƒ“ƒg‚ÌÈ—ª’l‚ª–³Œø‚Å‚·"
#: ../ex_docmd.c:4625
msgid "E179: argument required for -complete"
-msgstr "E179: -•⊮‚Ì‚½‚߂̈ø”‚ª•K—v‚Å‚·"
+msgstr "E179: -complete ‚ɂ͈ø”‚ª•K—v‚Å‚·"
+
+msgid "E179: argument required for -addr"
+msgstr "E179: -addr ‚ɂ͈ø”‚ª•K—v‚Å‚·"
#: ../ex_docmd.c:4635
#, c-format
@@ -1646,16 +1675,20 @@ msgstr "E182: –³Œø‚ȃRƒ}ƒ“ƒh–¼‚Å‚·"
#: ../ex_docmd.c:4691
msgid "E183: User defined commands must start with an uppercase letter"
-msgstr "E183: ƒ†[ƒU’è‹`ƒRƒ}ƒ“ƒh‚͉p‘å•¶Žš‚ÅŽn‚Ü‚ç‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ"
+msgstr "E183: ƒ†[ƒU[’è‹`ƒRƒ}ƒ“ƒh‚͉p‘å•¶Žš‚ÅŽn‚Ü‚ç‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ"
#: ../ex_docmd.c:4696
msgid "E841: Reserved name, cannot be used for user defined command"
-msgstr "E841: —\\–ñ–¼‚Ȃ̂Å, ƒ†[ƒU’è‹`ƒRƒ}ƒ“ƒh‚É—˜—p‚Å‚«‚Ü‚¹‚ñ"
+msgstr "E841: —\\–ñ–¼‚Ȃ̂Å, ƒ†[ƒU[’è‹`ƒRƒ}ƒ“ƒh‚É—˜—p‚Å‚«‚Ü‚¹‚ñ"
#: ../ex_docmd.c:4751
#, c-format
msgid "E184: No such user-defined command: %s"
-msgstr "E184: ‚»‚̃†[ƒU’è‹`ƒRƒ}ƒ“ƒh‚Í‚ ‚è‚Ü‚¹‚ñ: %s"
+msgstr "E184: ‚»‚̃†[ƒU[’è‹`ƒRƒ}ƒ“ƒh‚Í‚ ‚è‚Ü‚¹‚ñ: %s"
+
+#, c-format
+msgid "E180: Invalid address type value: %s"
+msgstr "E180: –³Œø‚ȃAƒhƒŒƒXƒ^ƒCƒv’l‚Å‚·: %s"
#: ../ex_docmd.c:5219
#, c-format
@@ -2019,11 +2052,11 @@ msgstr "•s³‚ȃtƒ@ƒCƒ‹–¼"
#: ../fileio.c:395 ../fileio.c:476 ../fileio.c:2543 ../fileio.c:2578
msgid "is a directory"
-msgstr " ‚̓fƒBƒŒƒNƒgƒŠ‚Å‚·"
+msgstr "‚̓fƒBƒŒƒNƒgƒŠ‚Å‚·"
#: ../fileio.c:397
msgid "is not a file"
-msgstr " ‚̓tƒ@ƒCƒ‹‚ł͂ ‚è‚Ü‚¹‚ñ"
+msgstr "‚̓tƒ@ƒCƒ‹‚ł͂ ‚è‚Ü‚¹‚ñ"
#: ../fileio.c:508 ../fileio.c:3522
msgid "[New File]"
@@ -2039,7 +2072,7 @@ msgstr "[ƒtƒ@ƒCƒ‹‰ß‘å]"
#: ../fileio.c:534
msgid "[Permission Denied]"
-msgstr "[”F‰Â‚ª‚ ‚è‚Ü‚¹‚ñ]"
+msgstr "[Œ ŒÀ‚ª‚ ‚è‚Ü‚¹‚ñ]"
#: ../fileio.c:653
msgid "E200: *ReadPre autocommands made the file unreadable"
@@ -2210,7 +2243,7 @@ msgstr " •ÏŠ·ƒGƒ‰["
#: ../fileio.c:3509
#, c-format
msgid " in line %<PRId64>;"
-msgstr "s %<PRId64>;"
+msgstr " s %<PRId64>;"
#: ../fileio.c:3519
msgid "[Device]"
@@ -2766,9 +2799,8 @@ msgid "E37: No write since last change (add ! to override)"
msgstr "E37: ÅŒã‚Ì•ÏX‚ª•Û‘¶‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ (! ‚ð’ljÁ‚Å•ÏX‚ð”jŠü)"
#: ../globals.h:1055
-#, fuzzy
msgid "E37: No write since last change"
-msgstr "[ÅŒã‚Ì•ÏX‚ª•Û‘¶‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ]\n"
+msgstr "E37: ÅŒã‚Ì•ÏX‚ª•Û‘¶‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ"
#: ../globals.h:1056
msgid "E38: Null argument"
@@ -2810,7 +2842,7 @@ msgstr "E42: ƒGƒ‰[‚Í‚ ‚è‚Ü‚¹‚ñ"
#: ../globals.h:1067
msgid "E776: No location list"
-msgstr "E776: êŠƒŠƒXƒg‚Í‚ ‚è‚Ü‚¹‚ñ"
+msgstr "E776: ƒƒP[ƒVƒ‡ƒ“ƒŠƒXƒg‚Í‚ ‚è‚Ü‚¹‚ñ"
#: ../globals.h:1068
msgid "E43: Damaged match string"
@@ -2931,6 +2963,10 @@ msgstr "E363: ƒpƒ^[ƒ“‚ª 'maxmempattern' ˆÈã‚̃ƒ‚ƒŠ‚ðŽg—p‚µ‚Ü‚·"
msgid "E749: empty buffer"
msgstr "E749: ƒoƒbƒtƒ@‚ª‹ó‚Å‚·"
+#, c-format
+msgid "E86: Buffer %ld does not exist"
+msgstr "E86: ƒoƒbƒtƒ@ %ld ‚Í‚ ‚è‚Ü‚¹‚ñ"
+
#: ../globals.h:1108
msgid "E682: Invalid search pattern or delimiter"
msgstr "E682: ŒŸõƒpƒ^[ƒ“‚©‹æØ‚è‹L†‚ª•s³‚Å‚·"
@@ -3831,7 +3867,7 @@ msgid ""
"\n"
msgstr ""
"\n"
-"‚»‚ê‚©‚ç.swpƒtƒ@ƒCƒ‹‚ð휂µ‚Ä‚­‚¾‚³‚¢\n"
+"Œ³‚Ì.swpƒtƒ@ƒCƒ‹‚Í휂µ‚Ä‚à\\‚¢‚Ü‚¹‚ñ\n"
"\n"
#. use msg() to start the scrolling properly
@@ -3845,7 +3881,7 @@ msgstr " Œ»Ý‚̃fƒBƒŒƒNƒgƒŠ:\n"
#: ../memline.c:1448
msgid " Using specified name:\n"
-msgstr " ‚ ‚é–¼‘O‚ðŽg—p’†:\n"
+msgstr " ˆÈ‰º‚Ì–¼‘O‚ðŽg—p’†:\n"
#: ../memline.c:1450
msgid " In directory "
@@ -3901,7 +3937,7 @@ msgid ""
" user name: "
msgstr ""
"\n"
-" ƒ†[ƒU–¼: "
+" ƒ†[ƒU[–¼: "
#: ../memline.c:1568
msgid " host name: "
@@ -4050,12 +4086,12 @@ msgid ""
msgstr ""
"\n"
"(1) •ʂ̃vƒƒOƒ‰ƒ€‚ª“¯‚¶ƒtƒ@ƒCƒ‹‚ð•ÒW‚µ‚Ä‚¢‚é‚©‚à‚µ‚ê‚Ü‚¹‚ñ.\n"
-" ‚±‚ÌꇂɂÍ, •ÏX‚ð‚µ‚½Û‚ÉÅI“I‚É, “¯‚¶ƒtƒ@ƒCƒ‹‚̈قȂé\n"
-" 2‚‚̃Cƒ“ƒXƒ^ƒ“ƒX‚ª‚Å‚«‚Ä‚µ‚Ü‚¤‚±‚ƂɒˆÓ‚µ‚Ä‚­‚¾‚³‚¢."
+" ‚±‚ÌꇂɂÍ, •ÏX‚ð‚µ‚Ä‚µ‚Ü‚¤‚Æ1‚‚̃tƒ@ƒCƒ‹‚ɑ΂µ‚ĈقȂé2‚‚Ì\n"
+" ƒCƒ“ƒXƒ^ƒ“ƒX‚ª‚Å‚«‚Ä‚µ‚Ü‚¤‚Ì‚Å, ‚»‚¤‚µ‚È‚¢‚悤‚É‹C‚ð‚‚¯‚Ä‚­‚¾‚³‚¢."
#: ../memline.c:3245
msgid " Quit, or continue with caution.\n"
-msgstr " I—¹‚·‚é‚©, ’ˆÓ‚µ‚È‚ª‚瑱‚¯‚Ä‚­‚¾‚³‚¢.\n"
+msgstr " I—¹‚·‚é‚©, ’ˆÓ‚µ‚È‚ª‚瑱‚¯‚Ä‚­‚¾‚³‚¢.\n"
#: ../memline.c:3246
msgid "(2) An edit session for this file crashed.\n"
@@ -4479,6 +4515,11 @@ msgstr ""
msgid "E574: Unknown register type %d"
msgstr "E574: –¢’m‚̃ŒƒWƒXƒ^Œ^ %d ‚Å‚·"
+msgid ""
+"E883: search pattern and expression register may not contain two or more "
+"lines"
+msgstr "E883: ŒŸõƒpƒ^[ƒ“‚ÆŽ®ƒŒƒWƒXƒ^‚É‚Í2sˆÈã‚ðŠÜ‚ß‚ç‚ê‚Ü‚¹‚ñ"
+
#: ../ops.c:5089
#, c-format
msgid "%<PRId64> Cols; "
@@ -4563,6 +4604,10 @@ msgstr "E522: termcap “à‚ÉŒ©‚‚©‚è‚Ü‚¹‚ñ"
msgid "E539: Illegal character <%s>"
msgstr "E539: •s³‚È•¶Žš‚Å‚· <%s>"
+#, c-format
+msgid "For option %s"
+msgstr "ƒIƒvƒVƒ‡ƒ“: %s"
+
#: ../option.c:3862
msgid "E529: Cannot set 'term' to empty string"
msgstr "E529: 'term' ‚ɂ͋󕶎š—ñ‚ðÝ’è‚Å‚«‚Ü‚¹‚ñ"
@@ -4740,6 +4785,14 @@ msgstr ""
"\n"
"ƒZƒLƒ…ƒŠƒeƒBƒRƒ“ƒeƒLƒXƒg‚ðÝ’è‚Å‚«‚Ü‚¹‚ñ "
+#, c-format
+msgid "Could not set security context %s for %s"
+msgstr "ƒZƒLƒ…ƒŠƒeƒBƒRƒ“ƒeƒLƒXƒg %s ‚ð %s ‚ÉÝ’è‚Å‚«‚Ü‚¹‚ñ"
+
+#, c-format
+msgid "Could not get security context %s for %s. Removing it!"
+msgstr "ƒZƒLƒ…ƒŠƒeƒBƒRƒ“ƒeƒLƒXƒg %s ‚ð %s ‚©‚çŽæ“¾‚Å‚«‚Ü‚¹‚ñ. 휂µ‚Ü‚·!"
+
#: ../os_unix.c:1558 ../os_unix.c:1647
#, c-format
msgid "dlerror = \"%s\""
@@ -4960,6 +5013,10 @@ msgstr "E554: %s{...} “à‚É•¶–@ƒGƒ‰[‚ª‚ ‚è‚Ü‚·"
msgid "External submatches:\n"
msgstr "ŠO•”‚Ì•”•ªŠY“–:\n"
+#, c-format
+msgid "E888: (NFA regexp) cannot repeat %s"
+msgstr "E888: (NFA ³‹K•\\Œ») ŒJ‚è•Ô‚¹‚Ü‚¹‚ñ %s"
+
#: ../regexp.c:7022
msgid ""
"E864: \\%#= can only be followed by 0, 1, or 2. The automatic engine will be "
@@ -4968,7 +5025,9 @@ msgstr ""
"E864: \\%#= ‚É‚Í 0, 1 ‚à‚µ‚­‚Í 2 ‚݂̂ª‘±‚¯‚ç‚ê‚Ü‚·B³‹K•\\Œ»ƒGƒ“ƒWƒ“‚ÍŽ©“®‘I"
"‘ð‚³‚ê‚Ü‚·B"
-#: ../regexp_nfa.c:239
+msgid "Switching to backtracking RE engine for pattern: "
+msgstr "ŽŸ‚̃pƒ^[ƒ“‚ɃoƒbƒNƒgƒ‰ƒbƒLƒ“ƒO RE ƒGƒ“ƒWƒ“‚ð“K—p‚µ‚Ü‚·: "
+
msgid "E865: (NFA) Regexp end encountered prematurely"
msgstr "E865: (NFA) Šú‘Ò‚æ‚è‘‚­³‹K•\\Œ»‚ÌI’[‚É“ž’B‚µ‚Ü‚µ‚½"
@@ -4980,7 +5039,7 @@ msgstr "E866: (NFA ³‹K•\\Œ») ˆÊ’u‚ªŒë‚Á‚Ä‚¢‚Ü‚·: %c"
#: ../regexp_nfa.c:242
#, c-format
msgid "E877: (NFA regexp) Invalid character class: %<PRId64>"
-msgstr ""
+msgstr "E877: (NFA ³‹K•\\Œ») –³Œø‚È•¶ŽšƒNƒ‰ƒX: %<PRId64>"
#: ../regexp_nfa.c:1261
#, c-format
@@ -5590,14 +5649,14 @@ msgid "E765: 'spellfile' does not have %<PRId64> entries"
msgstr "E765: 'spellfile' ‚É‚Í %<PRId64> ŒÂ‚̃Gƒ“ƒgƒŠ‚Í‚ ‚è‚Ü‚¹‚ñ"
#: ../spell.c:8074
-#, fuzzy, c-format
+#, c-format
msgid "Word '%.*s' removed from %s"
-msgstr "%s ‚©‚ç’PŒê‚ªíœ‚³‚ê‚Ü‚µ‚½"
+msgstr "’PŒê '%.*s' ‚ª %s ‚©‚ç휂³‚ê‚Ü‚µ‚½"
#: ../spell.c:8117
-#, fuzzy, c-format
+#, c-format
msgid "Word '%.*s' added to %s"
-msgstr "%s ‚É’PŒê‚ª’ljÁ‚³‚ê‚Ü‚µ‚½"
+msgstr "’PŒê '%.*s' ‚ª %s ‚֒ljÁ‚³‚ê‚Ü‚µ‚½"
#: ../spell.c:8381
msgid "E763: Word characters differ between spell files"
@@ -5673,6 +5732,9 @@ msgstr "‚±‚̃oƒbƒtƒ@‚É’è‹`‚³‚ꂽ\\•¶—v‘f‚Í‚ ‚è‚Ü‚¹‚ñ"
msgid "E390: Illegal argument: %s"
msgstr "E390: •s³‚Ȉø”‚Å‚·: %s"
+msgid "syntax iskeyword "
+msgstr "ƒVƒ“ƒ^ƒbƒNƒX—p iskeyword "
+
#: ../syntax.c:3299
#, c-format
msgid "E391: No such syntax cluster: %s"
@@ -5769,6 +5831,10 @@ msgstr "E847: \\•¶‚ÌŽæ‚èž‚Ý(include)‚ª‘½‰ß‚¬‚Ü‚·"
msgid "E789: Missing ']': %s"
msgstr "E789: ']' ‚ª‚ ‚è‚Ü‚¹‚ñ: %s"
+#, c-format
+msgid "E890: trailing char after ']': %s]%s"
+msgstr "E890: ']' ‚ÌŒã‚ë‚É—]•ª‚È•¶Žš‚ª‚ ‚è‚Ü‚·: %s]%s"
+
#: ../syntax.c:4531
#, c-format
msgid "E398: Missing '=': %s"
@@ -5874,7 +5940,7 @@ msgstr "E415: —\\Šú‚¹‚Ê“™†‚Å‚·: %s"
#: ../syntax.c:6395
#, c-format
msgid "E416: missing equal sign: %s"
-msgstr "E416: “™†‚ª‚ª‚ ‚è‚Ü‚¹‚ñ: %s"
+msgstr "E416: “™†‚ª‚ ‚è‚Ü‚¹‚ñ: %s"
#: ../syntax.c:6418
#, c-format
@@ -6078,9 +6144,8 @@ msgstr "Vim: “ü—Í‚ð“Çž‚Ý’†‚̃Gƒ‰[‚É‚æ‚èI—¹‚µ‚Ü‚·...\n"
#. This happens when the FileChangedRO autocommand changes the
#. * file in a way it becomes shorter.
#: ../undo.c:379
-#, fuzzy
msgid "E881: Line count changed unexpectedly"
-msgstr "E834: —\\Šú‚¹‚¸sƒJƒEƒ“ƒg‚ª•Ï‚í‚è‚Ü‚µ‚½"
+msgstr "E881: —\\Šú‚¹‚¸sƒJƒEƒ“ƒg‚ª•Ï‚í‚è‚Ü‚µ‚½"
#: ../undo.c:627
#, c-format
@@ -6287,23 +6352,23 @@ msgstr " ƒVƒXƒeƒ€ vimrc: \""
#: ../version.c:672
msgid " user vimrc file: \""
-msgstr " ƒ†[ƒU vimrc: \""
+msgstr " ƒ†[ƒU[ vimrc: \""
#: ../version.c:677
msgid " 2nd user vimrc file: \""
-msgstr " ‘æ2ƒ†[ƒU vimrc: \""
+msgstr " ‘æ2ƒ†[ƒU[ vimrc: \""
#: ../version.c:682
msgid " 3rd user vimrc file: \""
-msgstr " ‘æ3ƒ†[ƒU vimrc: \""
+msgstr " ‘æ3ƒ†[ƒU[ vimrc: \""
#: ../version.c:687
msgid " user exrc file: \""
-msgstr " ƒ†[ƒU exrc: \""
+msgstr " ƒ†[ƒU[ exrc: \""
#: ../version.c:692
msgid " 2nd user exrc file: \""
-msgstr " ‘æ2ƒ†[ƒU exrc: \""
+msgstr " ‘æ2ƒ†[ƒU[ exrc: \""
#: ../version.c:699
msgid " fall-back for $VIM: \""
@@ -6379,7 +6444,7 @@ msgstr "Vim‚ÌŠJ”­‚ð‰ž‰‡‚µ‚Ä‚­‚¾‚³‚¢!"
#: ../version.c:828
msgid "Become a registered Vim user!"
-msgstr "Vim‚Ì“o˜^ƒ†[ƒU‚ɂȂÁ‚Ä‚­‚¾‚³‚¢!"
+msgstr "Vim‚Ì“o˜^ƒ†[ƒU[‚ɂȂÁ‚Ä‚­‚¾‚³‚¢!"
#: ../version.c:831
msgid "type :help sponsor<Enter> for information "
@@ -6391,7 +6456,7 @@ msgstr "ÚׂÈî•ñ‚Í :help register<Enter> "
#: ../version.c:834
msgid "menu Help->Sponsor/Register for information "
-msgstr "Úׂ̓ƒjƒ…[‚Ì ƒwƒ‹ƒv¨ƒXƒ|ƒ“ƒT[/“o˜^ ‚ðŽQÆ‚µ‚ĉº‚³‚¢ "
+msgstr "Úׂ̓ƒjƒ…[‚Ì ƒwƒ‹ƒv->ƒXƒ|ƒ“ƒT[/“o˜^ ‚ðŽQÆ‚µ‚ĉº‚³‚¢"
#: ../window.c:119
msgid "Already only one window"
@@ -6429,6 +6494,9 @@ msgstr "E445: ‘¼‚̃EƒBƒ“ƒhƒE‚ɂ͕ÏX‚ª‚ ‚è‚Ü‚·"
msgid "E446: No file name under cursor"
msgstr "E446: ƒJ[ƒ\\ƒ‹‚̉º‚Ƀtƒ@ƒCƒ‹–¼‚ª‚ ‚è‚Ü‚¹‚ñ"
+msgid "List or number required"
+msgstr "ƒŠƒXƒg‚©”’l‚ª•K—v‚Å‚·"
+
#~ msgid "E831: bf_key_init() called with empty password"
#~ msgstr "E831: bf_key_init() ‚ª‹óƒpƒXƒ[ƒh‚ŌĂÑo‚³‚ê‚Ü‚µ‚½"
diff --git a/src/nvim/rbuffer.h b/src/nvim/rbuffer.h
index 35fb16508e..454972c69d 100644
--- a/src/nvim/rbuffer.h
+++ b/src/nvim/rbuffer.h
@@ -36,30 +36,36 @@
//
// Note that the rbuffer_{produced,consumed} calls are necessary or these macros
// create infinite loops
-#define RBUFFER_UNTIL_EMPTY(buf, rptr, rcnt) \
- for (size_t rcnt = 0, _r = 1; _r; _r = 0) \
- for (char *rptr = rbuffer_read_ptr(buf, &rcnt); \
- buf->size; \
- rptr = rbuffer_read_ptr(buf, &rcnt))
+#define RBUFFER_UNTIL_EMPTY(buf, rptr, rcnt) \
+ for (size_t rcnt = 0, _r = 1; _r; _r = 0) /* NOLINT(readability/braces) */ \
+ for ( /* NOLINT(readability/braces) */ \
+ char *rptr = rbuffer_read_ptr(buf, &rcnt); \
+ buf->size; \
+ rptr = rbuffer_read_ptr(buf, &rcnt))
-#define RBUFFER_UNTIL_FULL(buf, wptr, wcnt) \
- for (size_t wcnt = 0, _r = 1; _r; _r = 0) \
- for (char *wptr = rbuffer_write_ptr(buf, &wcnt); \
- rbuffer_space(buf); \
- wptr = rbuffer_write_ptr(buf, &wcnt))
+#define RBUFFER_UNTIL_FULL(buf, wptr, wcnt) \
+ for (size_t wcnt = 0, _r = 1; _r; _r = 0) /* NOLINT(readability/braces) */ \
+ for ( /* NOLINT(readability/braces) */ \
+ char *wptr = rbuffer_write_ptr(buf, &wcnt); \
+ rbuffer_space(buf); \
+ wptr = rbuffer_write_ptr(buf, &wcnt))
// Iteration
-#define RBUFFER_EACH(buf, c, i) \
- for (size_t i = 0; i < buf->size; i = buf->size) \
- for (char c = 0; \
- i < buf->size ? ((int)(c = *rbuffer_get(buf, i))) || 1 : 0; \
+#define RBUFFER_EACH(buf, c, i) \
+ for (size_t i = 0; /* NOLINT(readability/braces) */ \
+ i < buf->size; \
+ i = buf->size) \
+ for (char c = 0; /* NOLINT(readability/braces) */ \
+ i < buf->size ? ((int)(c = *rbuffer_get(buf, i))) || 1 : 0; \
i++)
-#define RBUFFER_EACH_REVERSE(buf, c, i) \
- for (size_t i = buf->size; i != SIZE_MAX; i = SIZE_MAX) \
- for (char c = 0; \
- i-- > 0 ? ((int)(c = *rbuffer_get(buf, i))) || 1 : 0; \
+#define RBUFFER_EACH_REVERSE(buf, c, i) \
+ for (size_t i = buf->size; /* NOLINT(readability/braces) */ \
+ i != SIZE_MAX; \
+ i = SIZE_MAX) \
+ for (char c = 0; /* NOLINT(readability/braces) */ \
+ i-- > 0 ? ((int)(c = *rbuffer_get(buf, i))) || 1 : 0; \
)
typedef struct rbuffer RBuffer;
diff --git a/src/nvim/regexp_nfa.c b/src/nvim/regexp_nfa.c
index 7e53b2ccd1..f97dce9e0d 100644
--- a/src/nvim/regexp_nfa.c
+++ b/src/nvim/regexp_nfa.c
@@ -357,13 +357,14 @@ static int nfa_ll_index = 0;
# include "regexp_nfa.c.generated.h"
#endif
-/* helper functions used when doing re2post() ... regatom() parsing */
-#define EMIT(c) do { \
- if (post_ptr >= post_end) { \
- realloc_post_list(); \
- } \
- *post_ptr++ = c; \
-} while (0)
+// Helper functions used when doing re2post() ... regatom() parsing
+#define EMIT(c) \
+ do { \
+ if (post_ptr >= post_end) { \
+ realloc_post_list(); \
+ } \
+ *post_ptr++ = c; \
+ } while (0)
/*
* Initialize internal variables before NFA compilation.
@@ -2892,12 +2893,11 @@ static nfa_state_T *post2nfa(int *postfix, int *end, int nfa_calc_size)
return NULL;
#define PUSH(s) st_push((s), &stackp, stack_end)
-#define POP() st_pop(&stackp, stack); \
- if (stackp < stack) \
- { \
- st_error(postfix, end, p); \
- xfree(stack); \
- return NULL; \
+#define POP() st_pop(&stackp, stack); \
+ if (stackp < stack) { \
+ st_error(postfix, end, p); \
+ xfree(stack); \
+ return NULL; \
}
if (nfa_calc_size == FALSE) {
@@ -4904,10 +4904,10 @@ static int nfa_regmatch(nfa_regprog_T *prog, nfa_state_T *start,
} else
addstate(thislist, start, m, NULL, 0);
-#define ADD_STATE_IF_MATCH(state) \
- if (result) { \
- add_state = state->out; \
- add_off = clen; \
+#define ADD_STATE_IF_MATCH(state) \
+ if (result) { \
+ add_state = state->out; \
+ add_off = clen; \
}
/*
diff --git a/src/nvim/spell.c b/src/nvim/spell.c
index 84906a3548..d2401b6776 100644
--- a/src/nvim/spell.c
+++ b/src/nvim/spell.c
@@ -9319,7 +9319,56 @@ static void suggest_try_special(suginfo_T *su)
}
}
+// Measure how much time is spent in each state.
+// Output is dumped in "suggestprof".
+
+#ifdef SUGGEST_PROFILE
+proftime_T current;
+proftime_T total;
+proftime_T times[STATE_FINAL + 1];
+long counts[STATE_FINAL + 1];
+
+ static void
+prof_init(void)
+{
+ for (int i = 0; i <= STATE_FINAL; i++) {
+ profile_zero(&times[i]);
+ counts[i] = 0;
+ }
+ profile_start(&current);
+ profile_start(&total);
+}
+
+// call before changing state
+ static void
+prof_store(state_T state)
+{
+ profile_end(&current);
+ profile_add(&times[state], &current);
+ counts[state]++;
+ profile_start(&current);
+}
+# define PROF_STORE(state) prof_store(state);
+
+ static void
+prof_report(char *name)
+{
+ FILE *fd = fopen("suggestprof", "a");
+
+ profile_end(&total);
+ fprintf(fd, "-----------------------\n");
+ fprintf(fd, "%s: %s\n", name, profile_msg(&total));
+ for (int i = 0; i <= STATE_FINAL; i++) {
+ fprintf(fd, "%d: %s ("%" PRId64)\n", i, profile_msg(&times[i]), counts[i]);
+ }
+ fclose(fd);
+}
+#else
+# define PROF_STORE(state)
+#endif
+
// Try finding suggestions by adding/removing/swapping letters.
+
static void suggest_try_change(suginfo_T *su)
{
char_u fword[MAXWLEN]; // copy of the bad word, case-folded
@@ -9344,7 +9393,14 @@ static void suggest_try_change(suginfo_T *su)
continue;
// Try it for this language. Will add possible suggestions.
+ //
+#ifdef SUGGEST_PROFILE
+ prof_init();
+#endif
suggest_trie_walk(su, lp, fword, false);
+#ifdef SUGGEST_PROFILE
+ prof_report("try_change");
+#endif
}
}
@@ -9478,6 +9534,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
// Always past NUL bytes now.
n = (int)sp->ts_state;
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_ENDNUL;
sp->ts_save_badflags = su->su_badflags;
@@ -9517,6 +9574,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
if (sp->ts_curi > len || byts[arridx] != 0) {
// Past bytes in node and/or past NUL bytes.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_ENDNUL;
sp->ts_save_badflags = su->su_badflags;
break;
@@ -9870,6 +9928,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
#endif
// Save things to be restored at STATE_SPLITUNDO.
sp->ts_save_badflags = su->su_badflags;
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_SPLITUNDO;
++depth;
@@ -9936,6 +9995,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
byts = pbyts;
idxs = pidxs;
sp->ts_prefixdepth = PFD_PREFIXTREE;
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_NOPREFIX;
}
}
@@ -9948,6 +10008,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
su->su_badflags = sp->ts_save_badflags;
// Continue looking for NUL bytes.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_START;
// In case we went into the prefix tree.
@@ -9962,9 +10023,11 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
&& sp->ts_tcharlen == 0
) {
// The badword ends, can't use STATE_PLAIN.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_DEL;
break;
}
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_PLAIN;
// FALLTHROUGH
@@ -9975,10 +10038,12 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
if (sp->ts_curi > byts[arridx]) {
// Done all bytes at this node, do next state. When still at
// already changed bytes skip the other tricks.
- if (sp->ts_fidx >= sp->ts_fidxtry)
+ PROF_STORE(sp->ts_state)
+ if (sp->ts_fidx >= sp->ts_fidxtry) {
sp->ts_state = STATE_DEL;
- else
+ } else {
sp->ts_state = STATE_FINAL;
+ }
} else {
arridx += sp->ts_curi++;
c = byts[arridx];
@@ -10110,10 +10175,12 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
// When past the first byte of a multi-byte char don't try
// delete/insert/swap a character.
if (has_mbyte && sp->ts_tcharlen > 0) {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_FINAL;
break;
}
// Try skipping one character in the bad word (delete it).
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_INS_PREP;
sp->ts_curi = 1;
if (soundfold && sp->ts_fidx == 0 && fword[sp->ts_fidx] == '*')
@@ -10161,6 +10228,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
if (sp->ts_flags & TSF_DIDDEL) {
// If we just deleted a byte then inserting won't make sense,
// a substitute is always cheaper.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_SWAP;
break;
}
@@ -10170,11 +10238,13 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
for (;; ) {
if (sp->ts_curi > byts[n]) {
// Only NUL bytes at this node, go to next state.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_SWAP;
break;
}
if (byts[n + sp->ts_curi] != NUL) {
// Found a byte to insert.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_INS;
break;
}
@@ -10190,6 +10260,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
n = sp->ts_arridx;
if (sp->ts_curi > byts[n]) {
// Done all bytes at this node, go to next state.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_SWAP;
break;
}
@@ -10250,6 +10321,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
c = *p;
if (c == NUL) {
// End of word, can't swap or replace.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_FINAL;
break;
}
@@ -10257,6 +10329,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
// Don't swap if the first character is not a word character.
// SWAP3 etc. also don't make sense then.
if (!soundfold && !spell_iswordp(p, curwin)) {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP_INI;
break;
}
@@ -10281,6 +10354,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
// When the second character is NUL we can't swap.
if (c2 == NUL) {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP_INI;
break;
}
@@ -10288,6 +10362,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
// When characters are identical, swap won't do anything.
// Also get here if the second char is not a word character.
if (c == c2) {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_SWAP3;
break;
}
@@ -10298,6 +10373,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
sp->ts_twordlen, tword, fword + sp->ts_fidx,
c, c2);
#endif
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_UNSWAP;
++depth;
if (has_mbyte) {
@@ -10312,6 +10388,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
}
} else
// If this swap doesn't work then SWAP3 won't either.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP_INI;
break;
@@ -10359,6 +10436,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
// Also get here when the third character is not a word character.
// Second character may any char: "a.b" -> "b.a"
if (c == c3 || c3 == NUL) {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP_INI;
break;
}
@@ -10369,6 +10447,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
sp->ts_twordlen, tword, fword + sp->ts_fidx,
c, c3);
#endif
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_UNSWAP3;
++depth;
if (has_mbyte) {
@@ -10382,8 +10461,10 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
p[2] = c;
stack[depth].ts_fidxtry = sp->ts_fidx + 3;
}
- } else
+ } else {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP_INI;
+ }
break;
case STATE_UNSWAP3:
@@ -10409,6 +10490,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
if (!soundfold && !spell_iswordp(p, curwin)) {
// Middle char is not a word char, skip the rotate. First and
// third char were already checked at swap and swap3.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP_INI;
break;
}
@@ -10423,6 +10505,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
sp->ts_twordlen, tword, fword + sp->ts_fidx,
p[0], p[1], p[2]);
#endif
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_UNROT3L;
++depth;
p = fword + sp->ts_fidx;
@@ -10441,8 +10524,10 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
p[2] = c;
stack[depth].ts_fidxtry = sp->ts_fidx + 3;
}
- } else
+ } else {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP_INI;
+ }
break;
case STATE_UNROT3L:
@@ -10472,6 +10557,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
sp->ts_twordlen, tword, fword + sp->ts_fidx,
p[0], p[1], p[2]);
#endif
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_UNROT3R;
++depth;
p = fword + sp->ts_fidx;
@@ -10490,8 +10576,10 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
*p = c;
stack[depth].ts_fidxtry = sp->ts_fidx + 3;
}
- } else
+ } else {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP_INI;
+ }
break;
case STATE_UNROT3R:
@@ -10521,6 +10609,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
if ((lp->lp_replang == NULL && !soundfold)
|| sp->ts_score + SCORE_REP >= su->su_maxscore
|| sp->ts_fidx < sp->ts_fidxtry) {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_FINAL;
break;
}
@@ -10533,10 +10622,12 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
sp->ts_curi = lp->lp_replang->sl_rep_first[fword[sp->ts_fidx]];
if (sp->ts_curi < 0) {
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_FINAL;
break;
}
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP;
// FALLTHROUGH
@@ -10566,6 +10657,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
ftp->ft_from, ftp->ft_to);
#endif
// Need to undo this afterwards.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP_UNDO;
// Change the "from" to the "to" string.
@@ -10585,6 +10677,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
if (sp->ts_curi >= gap->ga_len && sp->ts_state == STATE_REP)
// No (more) matches.
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_FINAL;
break;
@@ -10604,6 +10697,7 @@ static void suggest_trie_walk(suginfo_T *su, langp_T *lp, char_u *fword, bool so
repextra -= tl - fl;
}
memmove(p, ftp->ft_from, fl);
+ PROF_STORE(sp->ts_state)
sp->ts_state = STATE_REP;
break;
@@ -11014,7 +11108,13 @@ static void suggest_try_soundalike(suginfo_T *su)
// try all kinds of inserts/deletes/swaps/etc.
// TODO: also soundfold the next words, so that we can try joining
// and splitting
+#ifdef SUGGEST_PROFILE
+ prof_init();
+#endif
suggest_trie_walk(su, lp, salword, true);
+#ifdef SUGGEST_PROFILE
+ prof_report("soundalike");
+#endif
}
}
}
@@ -13364,3 +13464,4 @@ int expand_spelling(linenr_T lnum, char_u *pat, char_u ***matchp)
return ga.ga_len;
}
+
diff --git a/src/nvim/state.c b/src/nvim/state.c
index b2f3f0bebe..30133e2201 100644
--- a/src/nvim/state.c
+++ b/src/nvim/state.c
@@ -4,6 +4,7 @@
#include "nvim/state.h"
#include "nvim/vim.h"
+#include "nvim/main.h"
#include "nvim/getchar.h"
#include "nvim/ui.h"
#include "nvim/os/input.h"
@@ -32,7 +33,7 @@ getkey:
// processing. Characters can come from mappings, scripts and other
// sources, so this scenario is very common.
key = safe_vgetc();
- } else if (!queue_empty(loop.events)) {
+ } else if (!queue_empty(main_loop.events)) {
// Event was made available after the last queue_process_events call
key = K_EVENT;
} else {
@@ -45,7 +46,7 @@ getkey:
// directly.
(void)os_inchar(NULL, 0, -1, 0);
input_disable_events();
- key = !queue_empty(loop.events) ? K_EVENT : safe_vgetc();
+ key = !queue_empty(main_loop.events) ? K_EVENT : safe_vgetc();
}
if (key == K_EVENT) {
diff --git a/src/nvim/syntax.c b/src/nvim/syntax.c
index 1f9dbd8228..9e4dc0204f 100644
--- a/src/nvim/syntax.c
+++ b/src/nvim/syntax.c
@@ -812,19 +812,39 @@ static void syn_sync(win_T *wp, linenr_T start_lnum, synstate_T *last_valid)
validate_current_state();
}
+static void save_chartab(char_u *chartab)
+{
+ if (syn_block->b_syn_isk != empty_option) {
+ memmove(chartab, syn_buf->b_chartab, (size_t)32);
+ memmove(syn_buf->b_chartab, syn_win->w_s->b_syn_chartab, (size_t)32);
+ }
+}
+
+static void restore_chartab(char_u *chartab)
+{
+ if (syn_win->w_s->b_syn_isk != empty_option) {
+ memmove(syn_buf->b_chartab, chartab, (size_t)32);
+ }
+}
+
/*
* Return TRUE if the line-continuation pattern matches in line "lnum".
*/
static int syn_match_linecont(linenr_T lnum)
{
- regmmatch_T regmatch;
-
if (syn_block->b_syn_linecont_prog != NULL) {
+ regmmatch_T regmatch;
+ // chartab array for syn iskeyword
+ char_u buf_chartab[32];
+ save_chartab(buf_chartab);
+
regmatch.rmm_ic = syn_block->b_syn_linecont_ic;
regmatch.regprog = syn_block->b_syn_linecont_prog;
int r = syn_regexec(&regmatch, lnum, (colnr_T)0,
IF_SYN_TIME(&syn_block->b_syn_linecont_time));
syn_block->b_syn_linecont_prog = regmatch.regprog;
+
+ restore_chartab(buf_chartab);
return r;
}
return FALSE;
@@ -1617,8 +1637,9 @@ syn_current_attr (
lpos_T pos;
int lc_col;
reg_extmatch_T *cur_extmatch = NULL;
- char_u *line; /* current line. NOTE: becomes invalid after
- looking for a pattern match! */
+ char_u buf_chartab[32]; // chartab array for syn iskeyword
+ char_u *line; // current line. NOTE: becomes invalid after
+ // looking for a pattern match!
/* variables for zero-width matches that have a "nextgroup" argument */
int keep_next_list;
@@ -1668,6 +1689,9 @@ syn_current_attr (
* avoid matching the same item in the same position twice. */
ga_init(&zero_width_next_ga, (int)sizeof(int), 10);
+ // use syntax iskeyword option
+ save_chartab(buf_chartab);
+
/*
* Repeat matching keywords and patterns, to find contained items at the
* same column. This stops when there are no extra matches at the current
@@ -1992,6 +2016,8 @@ syn_current_attr (
} while (found_match);
+ restore_chartab(buf_chartab);
+
/*
* Use attributes from the current state, if within its highlighting.
* If not, use attributes from the current-but-one state, etc.
@@ -2522,7 +2548,8 @@ find_endpos (
regmmatch_T best_regmatch; /* startpos/endpos of best match */
lpos_T pos;
char_u *line;
- int had_match = FALSE;
+ int had_match = false;
+ char_u buf_chartab[32]; // chartab array for syn option iskeyword
/* just in case we are invoked for a keyword */
if (idx < 0)
@@ -2562,9 +2589,13 @@ find_endpos (
unref_extmatch(re_extmatch_in);
re_extmatch_in = ref_extmatch(start_ext);
- matchcol = startpos->col; /* start looking for a match at sstart */
- start_idx = idx; /* remember the first END pattern. */
- best_regmatch.startpos[0].col = 0; /* avoid compiler warning */
+ matchcol = startpos->col; // start looking for a match at sstart
+ start_idx = idx; // remember the first END pattern.
+ best_regmatch.startpos[0].col = 0; // avoid compiler warning
+
+ // use syntax iskeyword option
+ save_chartab(buf_chartab);
+
for (;; ) {
/*
* Find end pattern that matches first after "matchcol".
@@ -2707,6 +2738,8 @@ find_endpos (
if (!had_match)
m_endpos->lnum = 0;
+ restore_chartab(buf_chartab);
+
/* Remove external matches. */
unref_extmatch(re_extmatch_in);
re_extmatch_in = NULL;
@@ -3027,6 +3060,46 @@ static void syn_cmd_spell(exarg_T *eap, int syncing)
redraw_win_later(curwin, NOT_VALID);
}
+/// Handle ":syntax iskeyword" command.
+static void syn_cmd_iskeyword(exarg_T *eap, int syncing)
+{
+ char_u *arg = eap->arg;
+ char_u save_chartab[32];
+ char_u *save_isk;
+
+ if (eap->skip) {
+ return;
+ }
+
+ arg = skipwhite(arg);
+ if (*arg == NUL) {
+ MSG_PUTS("\n");
+ MSG_PUTS(_("syntax iskeyword "));
+ if (curwin->w_s->b_syn_isk != empty_option) {
+ msg_outtrans(curwin->w_s->b_syn_isk);
+ } else {
+ msg_outtrans((char_u *)"not set");
+ }
+ } else {
+ if (STRNICMP(arg, "clear", 5) == 0) {
+ memmove(curwin->w_s->b_syn_chartab, curbuf->b_chartab, (size_t)32);
+ clear_string_option(&curwin->w_s->b_syn_isk);
+ } else {
+ memmove(save_chartab, curbuf->b_chartab, (size_t)32);
+ save_isk = curbuf->b_p_isk;
+ curbuf->b_p_isk = vim_strsave(arg);
+
+ buf_init_chartab(curbuf, false);
+ memmove(curwin->w_s->b_syn_chartab, curbuf->b_chartab, (size_t)32);
+ memmove(curbuf->b_chartab, save_chartab, (size_t)32);
+ clear_string_option(&curwin->w_s->b_syn_isk);
+ curwin->w_s->b_syn_isk = curbuf->b_p_isk;
+ curbuf->b_p_isk = save_isk;
+ }
+ }
+ redraw_win_later(curwin, NOT_VALID);
+}
+
/*
* Clear all syntax info for one buffer.
*/
@@ -3065,6 +3138,7 @@ void syntax_clear(synblock_T *block)
xfree(block->b_syn_linecont_pat);
block->b_syn_linecont_pat = NULL;
block->b_syn_folditems = 0;
+ clear_string_option(&block->b_syn_isk);
/* free the stored states */
syn_stack_free_all(block);
@@ -3107,6 +3181,7 @@ static void syntax_sync_clear(void)
curwin->w_s->b_syn_linecont_prog = NULL;
xfree(curwin->w_s->b_syn_linecont_pat);
curwin->w_s->b_syn_linecont_pat = NULL;
+ clear_string_option(&curwin->w_s->b_syn_isk);
syn_stack_free_all(curwin->w_s); /* Need to recompute all syntax. */
}
@@ -3266,6 +3341,7 @@ static void syn_cmd_enable(exarg_T *eap, int syncing)
/*
* Handle ":syntax reset" command.
+ * It actually resets highlighting, not syntax.
*/
static void syn_cmd_reset(exarg_T *eap, int syncing)
{
@@ -5363,24 +5439,25 @@ struct subcommand {
static struct subcommand subcommands[] =
{
- {"case", syn_cmd_case},
- {"clear", syn_cmd_clear},
- {"cluster", syn_cmd_cluster},
- {"conceal", syn_cmd_conceal},
- {"enable", syn_cmd_enable},
- {"include", syn_cmd_include},
- {"keyword", syn_cmd_keyword},
- {"list", syn_cmd_list},
- {"manual", syn_cmd_manual},
- {"match", syn_cmd_match},
- {"on", syn_cmd_on},
- {"off", syn_cmd_off},
- {"region", syn_cmd_region},
- {"reset", syn_cmd_reset},
- {"spell", syn_cmd_spell},
- {"sync", syn_cmd_sync},
- {"", syn_cmd_list},
- {NULL, NULL}
+ { "case", syn_cmd_case },
+ { "clear", syn_cmd_clear },
+ { "cluster", syn_cmd_cluster },
+ { "conceal", syn_cmd_conceal },
+ { "enable", syn_cmd_enable },
+ { "include", syn_cmd_include },
+ { "iskeyword", syn_cmd_iskeyword },
+ { "keyword", syn_cmd_keyword },
+ { "list", syn_cmd_list },
+ { "manual", syn_cmd_manual },
+ { "match", syn_cmd_match },
+ { "on", syn_cmd_on },
+ { "off", syn_cmd_off },
+ { "region", syn_cmd_region },
+ { "reset", syn_cmd_reset },
+ { "spell", syn_cmd_spell },
+ { "sync", syn_cmd_sync },
+ { "", syn_cmd_list },
+ { NULL, NULL }
};
/*
@@ -5434,6 +5511,7 @@ void ex_ownsyntax(exarg_T *eap)
clear_string_option(&curwin->w_s->b_p_spc);
clear_string_option(&curwin->w_s->b_p_spf);
clear_string_option(&curwin->w_s->b_p_spl);
+ clear_string_option(&curwin->w_s->b_syn_isk);
}
/* save value of b:current_syntax */
diff --git a/src/nvim/terminal.c b/src/nvim/terminal.c
index 104cc47cda..3e41926b70 100644
--- a/src/nvim/terminal.c
+++ b/src/nvim/terminal.c
@@ -63,6 +63,7 @@
#include "nvim/map.h"
#include "nvim/misc1.h"
#include "nvim/move.h"
+#include "nvim/main.h"
#include "nvim/state.h"
#include "nvim/ex_docmd.h"
#include "nvim/ex_cmds.h"
@@ -163,9 +164,9 @@ static VTermColor default_vt_bg_rgb;
void terminal_init(void)
{
invalidated_terminals = pmap_new(ptr_t)();
- time_watcher_init(&loop, &refresh_timer, NULL);
+ time_watcher_init(&main_loop, &refresh_timer, NULL);
// refresh_timer_cb will redraw the screen which can call vimscript
- refresh_timer.events = queue_new_child(loop.events);
+ refresh_timer.events = queue_new_child(main_loop.events);
// initialize a rgb->color index map for cterm attributes(VTermScreenCell
// only has RGB information and we need color indexes for terminal UIs)
@@ -452,7 +453,7 @@ static int terminal_execute(VimState *state, int key)
case K_EVENT:
// We cannot let an event free the terminal yet. It is still needed.
s->term->refcount++;
- queue_process_events(loop.events);
+ queue_process_events(main_loop.events);
s->term->refcount--;
if (s->term->buf_handle == 0) {
s->close = true;
@@ -1158,15 +1159,15 @@ static bool is_focused(Terminal *term)
return State & TERM_FOCUS && curbuf->terminal == term;
}
-#define GET_CONFIG_VALUE(k, o) \
- do { \
- Error err; \
- /* Only called from terminal_open where curbuf->terminal is the */ \
- /* context */ \
- o = dict_get_value(curbuf->b_vars, cstr_as_string(k), &err); \
- if (o.type == kObjectTypeNil) { \
- o = dict_get_value(&globvardict, cstr_as_string(k), &err); \
- } \
+#define GET_CONFIG_VALUE(k, o) \
+ do { \
+ Error err; \
+ /* Only called from terminal_open where curbuf->terminal is the */ \
+ /* context */ \
+ o = dict_get_value(curbuf->b_vars, cstr_as_string(k), &err); \
+ if (o.type == kObjectTypeNil) { \
+ o = dict_get_value(&globvardict, cstr_as_string(k), &err); \
+ } \
} while (0)
static char *get_config_string(char *key)
diff --git a/src/nvim/testdir/Makefile b/src/nvim/testdir/Makefile
index 867cab9fbf..4b0b5e8d26 100644
--- a/src/nvim/testdir/Makefile
+++ b/src/nvim/testdir/Makefile
@@ -9,7 +9,6 @@ SCRIPTSOURCE := ../../../runtime
SCRIPTS := \
test8.out \
- test10.out \
test12.out \
test13.out \
test14.out \
@@ -17,7 +16,6 @@ SCRIPTS := \
test24.out \
test30.out \
test32.out \
- test34.out \
test37.out \
test40.out \
test42.out \
@@ -26,7 +24,6 @@ SCRIPTS := \
test49.out \
test52.out \
test53.out \
- test55.out \
test64.out \
test69.out \
test73.out \
@@ -37,9 +34,13 @@ SCRIPTS := \
# Keep test_alot*.res as the last one, sort the others.
NEW_TESTS = \
test_cursor_func.res \
+ test_hardcopy.res \
test_help_tagjump.res \
+ test_langmap.res \
test_menu.res \
+ test_syntax.res \
test_timers.res \
+ test_unlet.res \
test_viml.res \
test_alot.res
@@ -59,7 +60,7 @@ ifdef USE_VALGRIND
TOOL := valgrind -q \
-q \
$(VALGRIND_TOOL) \
- --suppressions=../../../.valgrind.supp \
+ --suppressions=../../.valgrind.supp \
--error-exitcode=123 \
--log-file=valgrind.\%p.$* \
$(VGDB) \
diff --git a/src/nvim/testdir/runtest.vim b/src/nvim/testdir/runtest.vim
index 2712fb9371..578d64efde 100644
--- a/src/nvim/testdir/runtest.vim
+++ b/src/nvim/testdir/runtest.vim
@@ -61,12 +61,13 @@ endif
" Locate Test_ functions and execute them.
set nomore
redir @q
-function /^Test_
+silent function /^Test_
redir END
let tests = split(substitute(@q, 'function \(\k*()\)', '\1', 'g'))
" Execute the tests in alphabetical order.
for test in sort(tests)
+ echo 'Executing ' . test
if exists("*SetUp")
call SetUp()
endif
diff --git a/src/nvim/testdir/test10.in b/src/nvim/testdir/test10.in
deleted file mode 100644
index 2178cf41ce..0000000000
--- a/src/nvim/testdir/test10.in
+++ /dev/null
@@ -1,110 +0,0 @@
-Test for 'errorformat'. This will fail if the quickfix feature was disabled.
-
-STARTTEST
-:7/start of errorfile/,/end of errorfile/w! Xerrorfile1
-:7/start of errorfile/,/end of errorfile/-1w! Xerrorfile2
-:/start of testfile/,/end of testfile/w! Xtestfile
-:set efm+==%f=\\,\ line\ %l%*\\D%v%*[^\ ]\ %m
-:set efm^=%AError\ in\ \"%f\"\ at\ line\ %l:,%Z%p^,%C%m
-:cf Xerrorfile2
-:clast
-:copen
-:let a=w:quickfix_title
-:wincmd p
-lgR=a 
-:cf Xerrorfile1
-grA
-:cn
-gRLINE 6, COL 19
-:cn
-gRNO COLUMN SPECIFIED
-:cn
-gRAGAIN NO COLUMN
-:cn
-gRCOL 1
-:cn
-gRCOL 2
-:cn
-gRCOL 10
-:cn
-gRVCOL 10
-:cn
-grI
-:cn
-gR. SPACE POINTER
-:cn
-gR. DOT POINTER
-:cn
-gR. DASH POINTER
-:cn
-gR. TAB-SPACE POINTER
-:clast
-:cprev
-:cprev
-:wincmd w
-:let a=w:quickfix_title
-:wincmd p
-lgR=a 
-:w! test.out " Write contents of this file
-:qa!
-ENDTEST
-
-start of errorfile
-"Xtestfile", line 4.12: 1506-045 (S) Undeclared identifier fd_set.
-"Xtestfile", line 6 col 19; this is an error
-gcc -c -DHAVE_CONFIsing-prototypes -I/usr/X11R6/include version.c
-Xtestfile:9: parse error before `asd'
-make: *** [vim] Error 1
-in file "Xtestfile" linenr 10: there is an error
-
-2 returned
-"Xtestfile", line 11 col 1; this is an error
-"Xtestfile", line 12 col 2; this is another error
-"Xtestfile", line 14:10; this is an error in column 10
-=Xtestfile=, line 15:10; this is another error, but in vcol 10 this time
-"Xtestfile", linenr 16: yet another problem
-Error in "Xtestfile" at line 17:
-x should be a dot
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 17
- ^
-Error in "Xtestfile" at line 18:
-x should be a dot
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 18
-.............^
-Error in "Xtestfile" at line 19:
-x should be a dot
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 19
---------------^
-Error in "Xtestfile" at line 20:
-x should be a dot
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 20
- ^
-
-Does anyone know what is the problem and how to correction it?
-"Xtestfile", line 21 col 9: What is the title of the quickfix window?
-"Xtestfile", line 22 col 9: What is the title of the quickfix window?
-end of errorfile
-
-start of testfile
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 2
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 3
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 4
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 5
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 6
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 7
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 8
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 9
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 10
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 11
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 12
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 13
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 14
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 15
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 16
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 17
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 18
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 19
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 20
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 21
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 22
-end of testfile
diff --git a/src/nvim/testdir/test10.ok b/src/nvim/testdir/test10.ok
deleted file mode 100644
index 76a02f40b4..0000000000
--- a/src/nvim/testdir/test10.ok
+++ /dev/null
@@ -1,23 +0,0 @@
-start of testfile
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 2
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 3
- xxxxxxxxxxAxxxxxxxxxxxxxxxxxxx line 4
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 5
- xxxxxxxxxxxxxxxxxLINE 6, COL 19 line 6
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 7
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 8
- NO COLUMN SPECIFIEDxxxxxxxxxxx line 9
- AGAIN NO COLUMNxxxxxxxxxxxxxxx line 10
-COL 1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 11
- COL 2xxxxxxxxxxxxxxxxxxxxxxxxx line 12
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 13
- xxxxxxxxCOL 10xxxxxxxxxxxxxxxx line 14
- xVCOL 10xxxxxxxxxxxxxxxxxxxxxx line 15
- Ixxxxxxxxxxxxxxxxxxxxxxxxxxxxx line 16
- xxxx. SPACE POINTERxxxxxxxxxxx line 17
- xxxxx. DOT POINTERxxxxxxxxxxxx line 18
- xxxxxx. DASH POINTERxxxxxxxxxx line 19
- xxxxxxx. TAB-SPACE POINTERxxxx line 20
- xxxxxxxx:cf Xerrorfile1xxxxxxx line 21
- xxxxxxxx:cf Xerrorfile2xxxxxxx line 22
-end of testfile
diff --git a/src/nvim/testdir/test10a.in b/src/nvim/testdir/test10a.in
deleted file mode 100644
index 99a5a03db8..0000000000
--- a/src/nvim/testdir/test10a.in
+++ /dev/null
@@ -1,72 +0,0 @@
-Test for 'errorformat'.
-
-STARTTEST
-:/start of errorfile/,/end of errorfile/w! Xerrorfile
-:/start of testfile/,/end of testfile/w! Xtestfile
-:cf Xerrorfile
-rA
-:cn
-rB
-:cn
-rC
-:cn
-rD
-:cn
-rE
-:w! test.out " Write contents of this file
-:qa!
-ENDTEST
-
-start of errorfile
-
- printf(" %d \n", (number/other)%10 );
-..................^
-%CC-E-NOSEMI, Missing ";".
-at line number 4 in file SYS$DISK:XTESTFILE
-
- other=10000000;
-.............^
-%CC-E-UNDECLARED, In this statement, "oszt" is not declared.
-at line number 7 in file SYS$DISK:XTESTFILE
-
- for (i = 0; i<7 ; i++ ){
-..................^
-%CC-E-UNDECLARED, In this statement, "i" is not declared.
-at line number 16 in file SYS$DISK:XTESTFILE
-
-some other error somewhere here.
-...........................^
-%CC-W-WARRING, Sorry, but no expalnation for such an warring.
-at line number 19 in file SYS$DISK:XTESTFILE
-
-and finally some other error exactly here.
-.....................................^
-%CC-I-INFORMATIONAL, It should be some informational message.
-at line number 20 in file SYS$DISK:XTESTFILE
-
-Does anyone know what is the problem and how to correct ?? :)
-end of errorfile
-
-start of testfile
-01234567890123456789012345678901234567
-line 3 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 4 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 5 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 6 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 7 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 8 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 9 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 10 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 11 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 12 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 13 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 14 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 15 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 16 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 17 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 18 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 19 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 20 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 21 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 22 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-end of testfile
diff --git a/src/nvim/testdir/test10a.ok b/src/nvim/testdir/test10a.ok
deleted file mode 100644
index 10e78c9239..0000000000
--- a/src/nvim/testdir/test10a.ok
+++ /dev/null
@@ -1,23 +0,0 @@
-start of testfile
-01234567890123456789012345678901234567
-line 3 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 4 xxxxxxxxxxAxxxxxxxxxxxxxxxxxxx
-line 5 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 6 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 7 xxxxxBxxxxxxxxxxxxxxxxxxxxxxxx
-line 8 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 9 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 10 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 11 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 12 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 13 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 14 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 15 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 16 xxxxxxxxxxCxxxxxxxxxxxxxxxxxxx
-line 17 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 18 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 19 xxxxxxxxxxxxxxxxxxxDxxxxxxxxxx
-line 20 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxE
-line 21 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-line 22 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-end of testfile
diff --git a/src/nvim/testdir/test34.in b/src/nvim/testdir/test34.in
deleted file mode 100644
index 4cb7e9494a..0000000000
--- a/src/nvim/testdir/test34.in
+++ /dev/null
@@ -1,86 +0,0 @@
-Test for user functions.
-Also test an <expr> mapping calling a function.
-Also test that a builtin function cannot be replaced.
-Also test for regression when calling arbitrary expression.
-
-STARTTEST
-:function Table(title, ...)
-: let ret = a:title
-: let idx = 1
-: while idx <= a:0
-: exe "let ret = ret . a:" . idx
-: let idx = idx + 1
-: endwhile
-: return ret
-:endfunction
-:function Compute(n1, n2, divname)
-: if a:n2 == 0
-: return "fail"
-: endif
-: exe "let g:" . a:divname . " = ". a:n1 / a:n2
-: return "ok"
-:endfunction
-:func Expr1()
-: normal! v
-: return "111"
-:endfunc
-:func Expr2()
-: call search('XX', 'b')
-: return "222"
-:endfunc
-:func ListItem()
-: let g:counter += 1
-: return g:counter . '. '
-:endfunc
-:func ListReset()
-: let g:counter = 0
-: return ''
-:endfunc
-:func FuncWithRef(a)
-: unlet g:FuncRef
-: return a:a
-:endfunc
-:let g:FuncRef=function("FuncWithRef")
-:let counter = 0
-:inoremap <expr> ( ListItem()
-:inoremap <expr> [ ListReset()
-:imap <expr> + Expr1()
-:imap <expr> * Expr2()
-:let retval = "nop"
-/^here
-C=Table("xxx", 4, "asdf")
- =Compute(45, 0, "retval")
- =retval
- =Compute(45, 5, "retval")
- =retval
- =g:FuncRef(333)
-
-XX+-XX
----*---
-(one
-(two
-[(one again:call append(line('$'), max([1, 2, 3]))
-:call extend(g:, {'max': function('min')})
-:call append(line('$'), max([1, 2, 3]))
-:try
-: " Regression: the first line below used to throw ?E110: Missing ')'?
-: " Second is here just to prove that this line is correct when not skipping
-: " rhs of &&.
-: $put =(0&&(function('tr'))(1, 2, 3))
-: $put =(1&&(function('tr'))(1, 2, 3))
-:catch
-: $put ='!!! Unexpected exception:'
-: $put =v:exception
-:endtry
-:$-9,$w! test.out
-:delfunc Table
-:delfunc Compute
-:delfunc Expr1
-:delfunc Expr2
-:delfunc ListItem
-:delfunc ListReset
-:unlet retval counter
-:q!
-ENDTEST
-
-here
diff --git a/src/nvim/testdir/test34.ok b/src/nvim/testdir/test34.ok
deleted file mode 100644
index 97995de80e..0000000000
--- a/src/nvim/testdir/test34.ok
+++ /dev/null
@@ -1,10 +0,0 @@
-xxx4asdf fail nop ok 9 333
-XX111-XX
----222---
-1. one
-2. two
-1. one again
-3
-3
-0
-1
diff --git a/src/nvim/testdir/test55.in b/src/nvim/testdir/test55.in
deleted file mode 100644
index 9a55eac6f6..0000000000
--- a/src/nvim/testdir/test55.in
+++ /dev/null
@@ -1,600 +0,0 @@
-Tests for List and Dictionary types. vim: set ft=vim :
-
-STARTTEST
-:fun Test(...)
-:lang C
-:" Creating List directly with different types
-:let l = [1, 'as''d', [1, 2, function("strlen")], {'a': 1},]
-:$put =string(l)
-:$put =string(l[-1])
-:$put =string(l[-4])
-:try
-: $put =string(l[-5])
-:catch
-: $put =v:exception[:14]
-:endtry
-:" List slices
-:$put =string(l[:])
-:$put =string(l[1:])
-:$put =string(l[:-2])
-:$put =string(l[0:8])
-:$put =string(l[8:-1])
-:"
-:" List identity
-:let ll = l
-:let lx = copy(l)
-:try
-: $put =(l == ll) . (l isnot ll) . (l is ll) . (l == lx) . (l is lx) . (l isnot lx)
-:catch
-: $put =v:exception
-:endtry
-:"
-:" Creating Dictionary directly with different types
-:let d = {001: 'asd', 'b': [1, 2, function('strlen')], -1: {'a': 1},}
-:$put =string(d) . d.1
-:$put =string(sort(keys(d)))
-:$put =string (values(d))
-:for [key, val] in items(d)
-: $put =key . ':' . string(val)
-: unlet key val
-:endfor
-:call extend (d, {3:33, 1:99})
-:call extend(d, {'b':'bbb', 'c':'ccc'}, "keep")
-:try
-: call extend(d, {3:333,4:444}, "error")
-:catch
-: $put =v:exception[:15] . v:exception[-1:-1]
-:endtry
-:$put =string(d)
-:call filter(d, 'v:key =~ ''[ac391]''')
-:$put =string(d)
-:"
-:" Dictionary identity
-:let dd = d
-:let dx = copy(d)
-:try
-: $put =(d == dd) . (d isnot dd) . (d is dd) . (d == dx) . (d is dx) . (d isnot dx)
-:catch
-: $put =v:exception
-:endtry
-:"
-:" Changing var type should fail
-:try
-: let d = []
-:catch
-: $put =v:exception[:14] . v:exception[-1:-1]
-:endtry
-:try
-: let l = {}
-:catch
-: $put =v:exception[:14] . v:exception[-1:-1]
-:endtry
-:"
-:" removing items with :unlet
-:unlet l[2]
-:$put =string(l)
-:let l = range(8)
-:try
-:unlet l[:3]
-:unlet l[1:]
-:catch
-:$put =v:exception
-:endtry
-:$put =string(l)
-:"
-:unlet d.c
-:unlet d[-1]
-:$put =string(d)
-:"
-:" removing items out of range: silently skip items that don't exist
-let l = [0, 1, 2, 3]
-:unlet l[2:1]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[2:2]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[2:3]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[2:4]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[2:5]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[-1:2]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[-2:2]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[-3:2]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[-4:2]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[-5:2]
-:$put =string(l)
-let l = [0, 1, 2, 3]
-:unlet l[-6:2]
-:$put =string(l)
-:"
-:" assignment to a list
-:let l = [0, 1, 2, 3]
-:let [va, vb] = l[2:3]
-:$put =va
-:$put =vb
-:try
-: let [va, vb] = l
-:catch
-: $put =v:exception[:14]
-:endtry
-:try
-: let [va, vb] = l[1:1]
-:catch
-: $put =v:exception[:14]
-:endtry
-:"
-:" manipulating a big Dictionary (hashtable.c has a border of 1000 entries)
-:let d = {}
-:for i in range(1500)
-: let d[i] = 3000 - i
-:endfor
-:$put =d[0] . ' ' . d[100] . ' ' . d[999] . ' ' . d[1400] . ' ' . d[1499]
-:try
-: let n = d[1500]
-:catch
-: $put =substitute(v:exception, '\v(.{14}).*( \d{4}).*', '\1\2', '')
-:endtry
-:" lookup each items
-:for i in range(1500)
-: if d[i] != 3000 - i
-: $put =d[i]
-: endif
-:endfor
-: let i += 1
-:" delete even items
-:while i >= 2
-: let i -= 2
-: unlet d[i]
-:endwhile
-:$put =get(d, 1500 - 100, 'NONE') . ' ' . d[1]
-:" delete odd items, checking value, one intentionally wrong
-:let d[33] = 999
-:let i = 1
-:while i < 1500
-: if d[i] != 3000 - i
-: $put =i . '=' . d[i]
-: else
-: unlet d[i]
-: endif
-: let i += 2
-:endwhile
-:$put =string(d) " must be almost empty now
-:unlet d
-:"
-:" Dictionary function
-:let dict = {}
-:func dict.func(a) dict
-: $put =a:a . len(self.data)
-:endfunc
-:let dict.data = [1,2,3]
-:call dict.func("len: ")
-:let x = dict.func("again: ")
-:try
-: let Fn = dict.func
-: call Fn('xxx')
-:catch
-: $put =v:exception[:15]
-:endtry
-:"
-:" Function in script-local List or Dict
-:let g:dict = {}
-:function g:dict.func() dict
-: $put ='g:dict.func'.self.foo[1].self.foo[0]('asdf')
-:endfunc
-:let g:dict.foo = ['-', 2, 3]
-:call insert(g:dict.foo, function('strlen'))
-:call g:dict.func()
-:"
-:" Nasty: remove func from Dict that's being called (works)
-:let d = {1:1}
-:func d.func(a)
-: return "a:". a:a
-:endfunc
-:$put =d.func(string(remove(d, 'func')))
-:"
-:" Nasty: deepcopy() dict that refers to itself (fails when noref used)
-:let d = {1:1, 2:2}
-:let l = [4, d, 6]
-:let d[3] = l
-:let dc = deepcopy(d)
-:try
-: let dc = deepcopy(d, 1)
-:catch
-: $put =v:exception[:14]
-:endtry
-:let l2 = [0, l, l, 3]
-:let l[1] = l2
-:let l3 = deepcopy(l2)
-:$put ='same list: ' . (l3[1] is l3[2])
-:"
-:" Locked variables
-:for depth in range(5)
-: $put ='depth is ' . depth
-: for u in range(3)
-: unlet l
-: let l = [0, [1, [2, 3]], {4: 5, 6: {7: 8}}]
-: exe "lockvar " . depth . " l"
-: if u == 1
-: exe "unlockvar l"
-: elseif u == 2
-: exe "unlockvar " . depth . " l"
-: endif
-: let ps = islocked("l").islocked("l[1]").islocked("l[1][1]").islocked("l[1][1][0]").'-'.islocked("l[2]").islocked("l[2]['6']").islocked("l[2]['6'][7]")
-: $put =ps
-: let ps = ''
-: try
-: let l[1][1][0] = 99
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: let l[1][1] = [99]
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: let l[1] = [99]
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: let l[2]['6'][7] = 99
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: let l[2][6] = {99: 99}
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: let l[2] = {99: 99}
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: let l = [99]
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: $put =ps
-: endfor
-:endfor
-:"
-:" Unletting locked variables
-:$put ='Unletting:'
-:for depth in range(5)
-: $put ='depth is ' . depth
-: for u in range(3)
-: unlet l
-: let l = [0, [1, [2, 3]], {4: 5, 6: {7: 8}}]
-: exe "lockvar " . depth . " l"
-: if u == 1
-: exe "unlockvar l"
-: elseif u == 2
-: exe "unlockvar " . depth . " l"
-: endif
-: let ps = islocked("l").islocked("l[1]").islocked("l[1][1]").islocked("l[1][1][0]").'-'.islocked("l[2]").islocked("l[2]['6']").islocked("l[2]['6'][7]")
-: $put =ps
-: let ps = ''
-: try
-: unlet l[2]['6'][7]
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: unlet l[2][6]
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: unlet l[2]
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: unlet l[1][1][0]
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: unlet l[1][1]
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: unlet l[1]
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: try
-: unlet l
-: let ps .= 'p'
-: catch
-: let ps .= 'F'
-: endtry
-: $put =ps
-: endfor
-:endfor
-:"
-:" Locked variables and :unlet or list / dict functions
-:$put ='Locks and commands or functions:'
-:"
-:$put ='No :unlet after lock on dict:'
-:unlet! d
-:let d = {'a': 99, 'b': 100}
-:lockvar 1 d
-:try
-: unlet d.a
-: $put ='did :unlet'
-:catch
-: $put =v:exception[:16]
-:endtry
-:$put =string(d)
-:"
-:$put =':unlet after lock on dict item:'
-:unlet! d
-:let d = {'a': 99, 'b': 100}
-:lockvar d.a
-:try
-: unlet d.a
-: $put ='did :unlet'
-:catch
-: $put =v:exception[:16]
-:endtry
-:$put =string(d)
-:"
-:$put ='filter() after lock on dict item:'
-:unlet! d
-:let d = {'a': 99, 'b': 100}
-:lockvar d.a
-:try
-: call filter(d, 'v:key != "a"')
-: $put ='did filter()'
-:catch
-: $put =v:exception[:16]
-:endtry
-:$put =string(d)
-:"
-:$put ='map() after lock on dict:'
-:unlet! d
-:let d = {'a': 99, 'b': 100}
-:lockvar 1 d
-:try
-: call map(d, 'v:val + 200')
-: $put ='did map()'
-:catch
-: $put =v:exception[:16]
-:endtry
-:$put =string(d)
-:"
-:$put ='No extend() after lock on dict item:'
-:unlet! d
-:let d = {'a': 99, 'b': 100}
-:lockvar d.a
-:try
-: $put =string(extend(d, {'a': 123}))
-: $put ='did extend()'
-:catch
-: $put =v:exception[:14]
-:endtry
-:$put =string(d)
-:"
-:$put ='No remove() of write-protected scope-level variable:'
-:fun! Tfunc(this_is_a_loooooooooong_parameter_name)
-: try
-: $put =string(remove(a:, 'this_is_a_loooooooooong_parameter_name'))
-: $put ='did remove()'
-: catch
-: $put =v:exception[:14]
-: endtry
-:endfun
-:call Tfunc('testval')
-:"
-:$put ='No extend() of write-protected scope-level variable:'
-:fun! Tfunc(this_is_a_loooooooooong_parameter_name)
-: try
-: $put =string(extend(a:, {'this_is_a_loooooooooong_parameter_name': 1234}))
-: $put ='did extend()'
-: catch
-: $put =v:exception[:14]
-: endtry
-:endfun
-:call Tfunc('testval')
-:"
-:$put ='No :unlet of variable in locked scope:'
-:let b:testvar = 123
-:lockvar 1 b:
-:try
-: unlet b:testvar
-: $put ='b:testvar was :unlet: '. (!exists('b:testvar'))
-:catch
-: $put =v:exception[:16]
-:endtry
-:unlockvar 1 b:
-:unlet! b:testvar
-:"
-:$put ='No :let += of locked list variable:'
-:let l = ['a', 'b', 3]
-:lockvar 1 l
-:try
-: let l += ['x']
-: $put ='did :let +='
-:catch
-: $put =v:exception[:14]
-:endtry
-:$put =string(l)
-:"
-:unlet l
-:let l = [1, 2, 3, 4]
-:lockvar! l
-:$put =string(l)
-:unlockvar l[1]
-:unlet l[0:1]
-:$put =string(l)
-:unlet l[1:2]
-:$put =string(l)
-:unlockvar l[1]
-:let l[0:1] = [0, 1]
-:$put =string(l)
-:let l[1:2] = [0, 1]
-:$put =string(l)
-:unlet l
-:" :lockvar/islocked() triggering script autoloading
-:set rtp+=./sautest
-:lockvar g:footest#x
-:unlockvar g:footest#x
-:$put ='locked g:footest#x:'.islocked('g:footest#x')
-:$put ='exists g:footest#x:'.exists('g:footest#x')
-:$put ='g:footest#x: '.g:footest#x
-:"
-:" a:000 function argument
-:" first the tests that should fail
-:try
-: let a:000 = [1, 2]
-:catch
-: $put ='caught a:000'
-:endtry
-:try
-: let a:000[0] = 9
-:catch
-: $put ='caught a:000[0]'
-:endtry
-:try
-: let a:000[2] = [9, 10]
-:catch
-: $put ='caught a:000[2]'
-:endtry
-:try
-: let a:000[3] = {9: 10}
-:catch
-: $put ='caught a:000[3]'
-:endtry
-:" now the tests that should pass
-:try
-: let a:000[2][1] = 9
-: call extend(a:000[2], [5, 6])
-: let a:000[3][5] = 8
-: let a:000[3]['a'] = 12
-: $put =string(a:000)
-:catch
-: $put ='caught ' . v:exception
-:endtry
-:"
-:" reverse(), sort(), uniq()
-:let l = ['-0', 'A11', 2, 2, 'xaaa', 4, 'foo', 'foo6', 'foo', [0, 1, 2], 'x8', [0, 1, 2], 1.5]
-:$put =string(uniq(copy(l)))
-:$put =string(reverse(l))
-:$put =string(reverse(reverse(l)))
-:$put =string(sort(l))
-:$put =string(reverse(sort(l)))
-:$put =string(sort(reverse(sort(l))))
-:$put =string(uniq(sort(l)))
-:let l=[7, 9, 'one', 18, 12, 22, 'two', 10.0e-16, -1, 'three', 0xff, 0.22, 'four']
-:$put =string(sort(copy(l), 'n'))
-:let l=[7, 9, 18, 12, 22, 10.0e-16, -1, 0xff, 0, -0, 0.22, 'bar', 'BAR', 'Bar', 'Foo', 'FOO', 'foo', 'FOOBAR', {}, []]
-:$put =string(sort(copy(l), 1))
-:$put =string(sort(copy(l), 'i'))
-:$put =string(sort(copy(l)))
-:"
-:" splitting a string to a List
-:$put =string(split(' aa bb '))
-:$put =string(split(' aa bb ', '\W\+', 0))
-:$put =string(split(' aa bb ', '\W\+', 1))
-:$put =string(split(' aa bb ', '\W', 1))
-:$put =string(split(':aa::bb:', ':', 0))
-:$put =string(split(':aa::bb:', ':', 1))
-:$put =string(split('aa,,bb, cc,', ',\s*', 1))
-:$put =string(split('abc', '\zs'))
-:$put =string(split('abc', '\zs', 1))
-:"
-:" compare recursively linked list and dict
-:let l = [1, 2, 3, 4]
-:let d = {'1': 1, '2': l, '3': 3}
-:let l[1] = d
-:$put =(l == l)
-:$put =(d == d)
-:$put =(l != deepcopy(l))
-:$put =(d != deepcopy(d))
-:"
-:" compare complex recursively linked list and dict
-:let l = []
-:call add(l, l)
-:let dict4 = {"l": l}
-:call add(dict4.l, dict4)
-:let lcopy = deepcopy(l)
-:let dict4copy = deepcopy(dict4)
-:$put =(l == lcopy)
-:$put =(dict4 == dict4copy)
-:"
-:" Pass the same List to extend()
-:let l = [1, 2, 3, 4, 5]
-:call extend(l, l)
-:$put =string(l)
-:"
-:" Pass the same Dict to extend()
-:let d = { 'a': {'b': 'B'}}
-:call extend(d, d)
-:$put =string(d)
-:"
-:" Pass the same Dict to extend() with "error"
-:try
-: call extend(d, d, "error")
-:catch
-: $put =v:exception[:15] . v:exception[-1:-1]
-:endtry
-:$put =string(d)
-:"
-:" test for range assign
-:let l = [0]
-:let l[:] = [1, 2]
-:$put =string(l)
-:endfun
-:"
-:call Test(1, 2, [3, 4], {5: 6}) " This may take a while
-:"
-:delfunc Test
-:unlet dict
-:call garbagecollect(1)
-:"
-:" test for patch 7.3.637
-:let a = 'No error caught'
-:try|foldopen|catch|let a = matchstr(v:exception,'^[^ ]*')|endtry
-o=a :"
-:lang C
-:redir => a
-:try|foobar|catch|let a = matchstr(v:exception,'^[^ ]*')|endtry
-:redir END
-o=a :"
-:"
-:/^start:/,$wq! test.out
-ENDTEST
-
-start:
diff --git a/src/nvim/testdir/test55.ok b/src/nvim/testdir/test55.ok
deleted file mode 100644
index 607a95ead9..0000000000
--- a/src/nvim/testdir/test55.ok
+++ /dev/null
@@ -1,199 +0,0 @@
-start:
-[1, 'as''d', [1, 2, function('strlen')], {'a': 1}]
-{'a': 1}
-1
-Vim(put):E684:
-[1, 'as''d', [1, 2, function('strlen')], {'a': 1}]
-['as''d', [1, 2, function('strlen')], {'a': 1}]
-[1, 'as''d', [1, 2, function('strlen')]]
-[1, 'as''d', [1, 2, function('strlen')], {'a': 1}]
-[]
-101101
-{'1': 'asd', 'b': [1, 2, function('strlen')], '-1': {'a': 1}}asd
-['-1', '1', 'b']
-['asd', [1, 2, function('strlen')], {'a': 1}]
-1:'asd'
-b:[1, 2, function('strlen')]
--1:{'a': 1}
-Vim(call):E737: 3
-{'c': 'ccc', '1': 99, 'b': [1, 2, function('strlen')], '3': 33, '-1': {'a': 1}}
-{'c': 'ccc', '1': 99, '3': 33, '-1': {'a': 1}}
-101101
-Vim(let):E706: d
-Vim(let):E706: l
-[1, 'as''d', {'a': 1}]
-[4]
-{'1': 99, '3': 33}
-[0, 1, 2, 3]
-[0, 1, 3]
-[0, 1]
-[0, 1]
-[0, 1]
-[0, 1, 2, 3]
-[0, 1, 3]
-[0, 3]
-[3]
-[3]
-[3]
-2
-3
-Vim(let):E687:
-Vim(let):E688:
-3000 2900 2001 1600 1501
-Vim(let):E716: 1500
-NONE 2999
-33=999
-{'33': 999}
-len: 3
-again: 3
-Vim(call):E725:
-g:dict.func-4
-a:function('3')
-Vim(let):E698:
-same list: 1
-depth is 0
-0000-000
-ppppppp
-0000-000
-ppppppp
-0000-000
-ppppppp
-depth is 1
-1000-000
-ppppppF
-0000-000
-ppppppp
-0000-000
-ppppppp
-depth is 2
-1100-100
-ppFppFF
-0000-000
-ppppppp
-0000-000
-ppppppp
-depth is 3
-1110-110
-pFFpFFF
-0010-010
-pFppFpp
-0000-000
-ppppppp
-depth is 4
-1111-111
-FFFFFFF
-0011-011
-FFpFFpp
-0000-000
-ppppppp
-Unletting:
-depth is 0
-0000-000
-ppppppp
-0000-000
-ppppppp
-0000-000
-ppppppp
-depth is 1
-1000-000
-ppFppFp
-0000-000
-ppppppp
-0000-000
-ppppppp
-depth is 2
-1100-100
-pFFpFFp
-0000-000
-ppppppp
-0000-000
-ppppppp
-depth is 3
-1110-110
-FFFFFFp
-0010-010
-FppFppp
-0000-000
-ppppppp
-depth is 4
-1111-111
-FFFFFFp
-0011-011
-FppFppp
-0000-000
-ppppppp
-Locks and commands or functions:
-No :unlet after lock on dict:
-Vim(unlet):E741:
-{'a': 99, 'b': 100}
-:unlet after lock on dict item:
-did :unlet
-{'b': 100}
-filter() after lock on dict item:
-did filter()
-{'b': 100}
-map() after lock on dict:
-did map()
-{'a': 299, 'b': 300}
-No extend() after lock on dict item:
-Vim(put):E741:
-{'a': 99, 'b': 100}
-No remove() of write-protected scope-level variable:
-Vim(put):E795:
-No extend() of write-protected scope-level variable:
-Vim(put):E742:
-No :unlet of variable in locked scope:
-Vim(unlet):E741:
-No :let += of locked list variable:
-Vim(let):E741:
-['a', 'b', 3]
-[1, 2, 3, 4]
-[1, 2, 3, 4]
-[1, 2, 3, 4]
-[1, 2, 3, 4]
-[1, 2, 3, 4]
-locked g:footest#x:-1
-exists g:footest#x:0
-g:footest#x: 1
-caught a:000
-caught a:000[0]
-caught a:000[2]
-caught a:000[3]
-[1, 2, [3, 9, 5, 6], {'a': 12, '5': 8}]
-['-0', 'A11', 2, 'xaaa', 4, 'foo', 'foo6', 'foo', [0, 1, 2], 'x8', [0, 1, 2], 1.5]
-[1.5, [0, 1, 2], 'x8', [0, 1, 2], 'foo', 'foo6', 'foo', 4, 'xaaa', 2, 2, 'A11', '-0']
-[1.5, [0, 1, 2], 'x8', [0, 1, 2], 'foo', 'foo6', 'foo', 4, 'xaaa', 2, 2, 'A11', '-0']
-['-0', 'A11', 'foo', 'foo', 'foo6', 'x8', 'xaaa', 1.5, 2, 2, 4, [0, 1, 2], [0, 1, 2]]
-[[0, 1, 2], [0, 1, 2], 4, 2, 2, 1.5, 'xaaa', 'x8', 'foo6', 'foo', 'foo', 'A11', '-0']
-['-0', 'A11', 'foo', 'foo', 'foo6', 'x8', 'xaaa', 1.5, 2, 2, 4, [0, 1, 2], [0, 1, 2]]
-['-0', 'A11', 'foo', 'foo6', 'x8', 'xaaa', 1.5, 2, 4, [0, 1, 2]]
-[-1, 'one', 'two', 'three', 'four', 1.0e-15, 0.22, 7, 9, 12, 18, 22, 255]
-['bar', 'BAR', 'Bar', 'Foo', 'FOO', 'foo', 'FOOBAR', -1, 0, 0, 0.22, 1.0e-15, 12, 18, 22, 255, 7, 9, [], {}]
-['bar', 'BAR', 'Bar', 'Foo', 'FOO', 'foo', 'FOOBAR', -1, 0, 0, 0.22, 1.0e-15, 12, 18, 22, 255, 7, 9, [], {}]
-['BAR', 'Bar', 'FOO', 'FOOBAR', 'Foo', 'bar', 'foo', -1, 0, 0, 0.22, 1.0e-15, 12, 18, 22, 255, 7, 9, [], {}]
-['aa', 'bb']
-['aa', 'bb']
-['', 'aa', 'bb', '']
-['', '', 'aa', '', 'bb', '', '']
-['aa', '', 'bb']
-['', 'aa', '', 'bb', '']
-['aa', '', 'bb', 'cc', '']
-['a', 'b', 'c']
-['', 'a', '', 'b', '', 'c', '']
-1
-1
-0
-0
-1
-1
-[1, 2, 3, 4, 5, 1, 2, 3, 4, 5]
-{'a': {'b': 'B'}}
-Vim(call):E737: a
-{'a': {'b': 'B'}}
-[1, 2]
-Vim(foldopen):E490:
-
-
-Error detected while processing :
-E492: Not an editor command: foobar|catch|let a = matchstr(v:exception,'^[^ ]*')|endtry
-
diff --git a/src/nvim/testdir/test_hardcopy.vim b/src/nvim/testdir/test_hardcopy.vim
new file mode 100644
index 0000000000..4629d17dd2
--- /dev/null
+++ b/src/nvim/testdir/test_hardcopy.vim
@@ -0,0 +1,58 @@
+" Test :hardcopy
+
+func Test_printoptions_parsing()
+ " Only test that this doesn't throw an error.
+ set printoptions=left:5in,right:10pt,top:8mm,bottom:2pc
+ set printoptions=left:2in,top:30pt,right:16mm,bottom:3pc
+ set printoptions=header:3,syntax:y,number:7,wrap:n
+ set printoptions=duplex:short,collate:n,jobsplit:y,portrait:n
+ set printoptions=paper:10x14
+ set printoptions=paper:A3
+ set printoptions=paper:A4
+ set printoptions=paper:A5
+ set printoptions=paper:B4
+ set printoptions=paper:B5
+ set printoptions=paper:executive
+ set printoptions=paper:folio
+ set printoptions=paper:ledger
+ set printoptions=paper:legal
+ set printoptions=paper:letter
+ set printoptions=paper:quarto
+ set printoptions=paper:statement
+ set printoptions=paper:tabloid
+ set printoptions=formfeed:y
+ set printoptions=
+ set printoptions&
+endfunc
+
+func Test_printmbfont_parsing()
+ " Only test that this doesn't throw an error.
+ set printmbfont=r:WadaMin-Regular,b:WadaMin-Bold,i:WadaMin-Italic,o:WadaMin-Bold-Italic,c:yes,a:no
+ set printmbfont=
+ set printmbfont&
+endfunc
+
+func Test_printheader_parsing()
+ " Only test that this doesn't throw an error.
+ set printheader=%<%f\ %h%m%r%=%-14.(%l,%c%V%)\ %P
+ set printheader=%<%f%h%m%r%=%b\ 0x%B\ \ %l,%c%V\ %P
+ set printheader=%<%f%=\ [%1*%M%*%n%R%H]\ %-19(%3l,%02c%03V%)%O'%02b'
+ set printheader=...%r%{VarExists('b:gzflag','\ [GZ]')}%h...
+ set printheader=
+ set printheader&
+endfunc
+
+" Test that :hardcopy produces a non-empty file.
+" We don't check much of the contents.
+func Test_with_syntax()
+ if has('postscript')
+ set printoptions=syntax:y
+ syn on
+ hardcopy > Xhardcopy
+ let lines = readfile('Xhardcopy')
+ call assert_true(len(lines) > 20)
+ call assert_true(lines[0] =~ 'PS-Adobe')
+ call delete('Xhardcopy')
+ set printoptions&
+ endif
+endfunc
diff --git a/src/nvim/testdir/test_langmap.vim b/src/nvim/testdir/test_langmap.vim
new file mode 100644
index 0000000000..066c3bf2bd
--- /dev/null
+++ b/src/nvim/testdir/test_langmap.vim
@@ -0,0 +1,24 @@
+" tests for 'langmap'
+
+func Test_langmap()
+ new
+ set langmap=}l,^x,%v
+
+ call setline(1, ['abc'])
+ call feedkeys('gg0}^', 'tx')
+ call assert_equal('ac', getline(1))
+
+ " in Replace mode
+ " need silent! to avoid a delay when entering Insert mode
+ call setline(1, ['abcde'])
+ silent! call feedkeys("gg0lR%{z\<Esc>00", 'tx')
+ call assert_equal('a%{ze', getline(1))
+
+ " in Select mode
+ " need silent! to avoid a delay when entering Insert mode
+ call setline(1, ['abcde'])
+ silent! call feedkeys("gg0}%}\<C-G>}^\<Esc>00", 'tx')
+ call assert_equal('a}^de', getline(1))
+
+ quit!
+endfunc
diff --git a/src/nvim/testdir/test_syntax.vim b/src/nvim/testdir/test_syntax.vim
new file mode 100644
index 0000000000..309c0f460b
--- /dev/null
+++ b/src/nvim/testdir/test_syntax.vim
@@ -0,0 +1,63 @@
+" Test for syntax and syntax iskeyword option
+
+func GetSyntaxItem(pat)
+ let c = ''
+ let a = ['a', getreg('a'), getregtype('a')]
+ 0
+ redraw!
+ call search(a:pat, 'W')
+ let synid = synID(line('.'), col('.'), 1)
+ while synid == synID(line('.'), col('.'), 1)
+ norm! v"ay
+ " stop at whitespace
+ if @a =~# '\s'
+ break
+ endif
+ let c .= @a
+ norm! l
+ endw
+ call call('setreg', a)
+ 0
+ return c
+endfunc
+
+func Test_syn_iskeyword()
+ new
+ call setline(1, [
+ \ 'CREATE TABLE FOOBAR(',
+ \ ' DLTD_BY VARCHAR2(100)',
+ \ ');',
+ \ ''])
+
+ syntax on
+ set ft=sql
+ syn match SYN /C\k\+\>/
+ hi link SYN ErrorMsg
+ call assert_equal('DLTD_BY', GetSyntaxItem('DLTD'))
+ /\<D\k\+\>/:norm! ygn
+ call assert_equal('DLTD_BY', @0)
+ redir @c
+ syn iskeyword
+ redir END
+ call assert_equal("\nsyntax iskeyword not set", @c)
+
+ syn iskeyword @,48-57,_,192-255
+ redir @c
+ syn iskeyword
+ redir END
+ call assert_equal("\nsyntax iskeyword @,48-57,_,192-255", @c)
+
+ setlocal isk-=_
+ call assert_equal('DLTD_BY', GetSyntaxItem('DLTD'))
+ /\<D\k\+\>/:norm! ygn
+ let b2=@0
+ call assert_equal('DLTD', @0)
+
+ syn iskeyword clear
+ redir @c
+ syn iskeyword
+ redir END
+ call assert_equal("\nsyntax iskeyword not set", @c)
+
+ quit!
+endfunc
diff --git a/src/nvim/testdir/test_unlet.vim b/src/nvim/testdir/test_unlet.vim
new file mode 100644
index 0000000000..f6705997a9
--- /dev/null
+++ b/src/nvim/testdir/test_unlet.vim
@@ -0,0 +1,26 @@
+" Tests for :unlet
+
+func Test_read_only()
+ try
+ " this caused a crash
+ unlet count
+ catch
+ call assert_true(v:exception =~ ':E795:')
+ endtry
+endfunc
+
+func Test_existing()
+ let does_exist = 1
+ call assert_true(exists('does_exist'))
+ unlet does_exist
+ call assert_false(exists('does_exist'))
+endfunc
+
+func Test_not_existing()
+ unlet! does_not_exist
+ try
+ unlet does_not_exist
+ catch
+ call assert_true(v:exception =~ ':E108:')
+ endtry
+endfunc
diff --git a/src/nvim/tui/input.c b/src/nvim/tui/input.c
index 99eb230a88..be256f3ebc 100644
--- a/src/nvim/tui/input.c
+++ b/src/nvim/tui/input.c
@@ -4,6 +4,7 @@
#include "nvim/api/vim.h"
#include "nvim/api/private/helpers.h"
#include "nvim/ascii.h"
+#include "nvim/main.h"
#include "nvim/misc2.h"
#include "nvim/os/os.h"
#include "nvim/os/input.h"
@@ -92,7 +93,7 @@ static void flush_input(TermInput *input, bool wait_until_empty)
size_t drain_boundary = wait_until_empty ? 0 : 0xff;
do {
uv_mutex_lock(&input->key_buffer_mutex);
- loop_schedule(&loop, event_create(1, wait_input_enqueue, 1, input));
+ loop_schedule(&main_loop, event_create(1, wait_input_enqueue, 1, input));
input->waiting = true;
while (input->waiting) {
uv_cond_wait(&input->key_buffer_cond, &input->key_buffer_mutex);
@@ -138,6 +139,9 @@ static void forward_modified_utf8(TermInput *input, TermKeyKey *key)
if (key->type == TERMKEY_TYPE_KEYSYM
&& key->code.sym == TERMKEY_SYM_ESCAPE) {
len = (size_t)snprintf(buf, sizeof(buf), "<Esc>");
+ } else if (key->type == TERMKEY_TYPE_KEYSYM
+ && key->code.sym == TERMKEY_SYM_SUSPEND) {
+ len = (size_t)snprintf(buf, sizeof(buf), "<C-Z>");
} else {
len = termkey_strfkey(input->tk, buf, sizeof(buf), key, TERMKEY_FORMAT_VIM);
}
@@ -153,7 +157,8 @@ static void forward_mouse_event(TermInput *input, TermKeyKey *key)
TermKeyMouseEvent ev;
termkey_interpret_mouse(input->tk, key, &ev, &button, &row, &col);
- if (ev != TERMKEY_MOUSE_PRESS && ev != TERMKEY_MOUSE_DRAG) {
+ if (ev != TERMKEY_MOUSE_PRESS && ev != TERMKEY_MOUSE_DRAG
+ && ev != TERMKEY_MOUSE_RELEASE) {
return;
}
@@ -190,6 +195,8 @@ static void forward_mouse_event(TermInput *input, TermKeyKey *key)
}
} else if (ev == TERMKEY_MOUSE_DRAG) {
len += (size_t)snprintf(buf + len, sizeof(buf) - len, "Drag");
+ } else if (ev == TERMKEY_MOUSE_RELEASE) {
+ len += (size_t)snprintf(buf + len, sizeof(buf) - len, "Release");
}
len += (size_t)snprintf(buf + len, sizeof(buf) - len, "><%d,%d>", col, row);
@@ -336,7 +343,7 @@ static void read_cb(Stream *stream, RBuffer *buf, size_t c, void *data,
stream_close(&input->read_stream, NULL);
queue_put(input->loop->fast_events, restart_reading, 1, input);
} else {
- loop_schedule(&loop, event_create(1, input_done_event, 0));
+ loop_schedule(&main_loop, event_create(1, input_done_event, 0));
}
return;
}
diff --git a/src/nvim/tui/tui.c b/src/nvim/tui/tui.c
index 62bc81ba64..50558e644a 100644
--- a/src/nvim/tui/tui.c
+++ b/src/nvim/tui/tui.c
@@ -11,6 +11,7 @@
#include "nvim/vim.h"
#include "nvim/ui.h"
#include "nvim/map.h"
+#include "nvim/main.h"
#include "nvim/memory.h"
#include "nvim/api/vim.h"
#include "nvim/api/private/helpers.h"
@@ -261,7 +262,7 @@ static void sigwinch_cb(SignalWatcher *watcher, int signum, void *data)
UI *ui = data;
update_size(ui);
// run refresh_event in nvim main loop
- loop_schedule(&loop, event_create(1, refresh_event, 0));
+ loop_schedule(&main_loop, event_create(1, refresh_event, 0));
}
static bool attrs_differ(HlAttrs a1, HlAttrs a2)
@@ -681,7 +682,7 @@ static void invalidate(UI *ui, int top, int bot, int left, int right)
intersects->right = MAX(right, intersects->right);
} else {
// Else just add a new entry;
- kv_push(Rect, data->invalid_regions, ((Rect){top, bot, left, right}));
+ kv_push(data->invalid_regions, ((Rect) { top, bot, left, right }));
}
}
diff --git a/src/nvim/ugrid.h b/src/nvim/ugrid.h
index ad6d96a168..268362bf1b 100644
--- a/src/nvim/ugrid.h
+++ b/src/nvim/ugrid.h
@@ -23,16 +23,16 @@ struct ugrid {
#define EMPTY_ATTRS ((HlAttrs){ false, false, false, false, false, -1, -1, -1 })
-#define UGRID_FOREACH_CELL(grid, top, bot, left, right, code) \
- do { \
- for (int row = top; row <= bot; ++row) { \
- UCell *row_cells = (grid)->cells[row]; \
- for (int col = left; col <= right; ++col) { \
- UCell *cell = row_cells + col; \
- (void)(cell); \
- code; \
- } \
- } \
+#define UGRID_FOREACH_CELL(grid, top, bot, left, right, code) \
+ do { \
+ for (int row = top; row <= bot; row++) { \
+ UCell *row_cells = (grid)->cells[row]; \
+ for (int col = left; col <= right; col++) { \
+ UCell *cell = row_cells + col; \
+ (void)(cell); \
+ code; \
+ } \
+ } \
} while (0)
#ifdef INCLUDE_GENERATED_DECLARATIONS
diff --git a/src/nvim/ui.c b/src/nvim/ui.c
index ae38754c1e..d968cbc390 100644
--- a/src/nvim/ui.c
+++ b/src/nvim/ui.c
@@ -60,22 +60,22 @@ static int height, width;
// See http://stackoverflow.com/a/11172679 for a better explanation of how it
// works.
#ifdef _MSC_VER
- #define UI_CALL(funname, ...) \
- do { \
- flush_cursor_update(); \
- for (size_t i = 0; i < ui_count; i++) { \
- UI *ui = uis[i]; \
- UI_CALL_MORE(funname, __VA_ARGS__); \
- } \
+# define UI_CALL(funname, ...) \
+ do { \
+ flush_cursor_update(); \
+ for (size_t i = 0; i < ui_count; i++) { \
+ UI *ui = uis[i]; \
+ UI_CALL_MORE(funname, __VA_ARGS__); \
+ } \
} while (0)
#else
- #define UI_CALL(...) \
- do { \
- flush_cursor_update(); \
- for (size_t i = 0; i < ui_count; i++) { \
- UI *ui = uis[i]; \
- UI_CALL_HELPER(CNT(__VA_ARGS__), __VA_ARGS__); \
- } \
+# define UI_CALL(...) \
+ do { \
+ flush_cursor_update(); \
+ for (size_t i = 0; i < ui_count; i++) { \
+ UI *ui = uis[i]; \
+ UI_CALL_HELPER(CNT(__VA_ARGS__), __VA_ARGS__); \
+ } \
} while (0)
#endif
#define CNT(...) SELECT_NTH(__VA_ARGS__, MORE, MORE, MORE, MORE, ZERO, ignore)
diff --git a/src/nvim/ui_bridge.c b/src/nvim/ui_bridge.c
index d17fa4a782..6290fb3d87 100644
--- a/src/nvim/ui_bridge.c
+++ b/src/nvim/ui_bridge.c
@@ -5,6 +5,7 @@
#include <stdio.h>
#include <limits.h>
+#include "nvim/main.h"
#include "nvim/vim.h"
#include "nvim/ui.h"
#include "nvim/memory.h"
@@ -18,9 +19,9 @@
#define UI(b) (((UIBridgeData *)b)->ui)
// Call a function in the UI thread
-#define UI_CALL(ui, name, argc, ...) \
- ((UIBridgeData *)ui)->scheduler( \
- event_create(1, ui_bridge_##name##_event, argc, __VA_ARGS__), UI(ui))
+#define UI_CALL(ui, name, argc, ...) \
+ ((UIBridgeData *)ui)->scheduler( \
+ event_create(1, ui_bridge_##name##_event, argc, __VA_ARGS__), UI(ui))
#define INT2PTR(i) ((void *)(uintptr_t)i)
#define PTR2INT(p) ((int)(uintptr_t)p)
@@ -100,7 +101,7 @@ static void ui_bridge_stop(UI *b)
if (stopped) {
break;
}
- loop_poll_events(&loop, 10);
+ loop_poll_events(&main_loop, 10);
}
uv_thread_join(&bridge->ui_thread);
uv_mutex_destroy(&bridge->mutex);
diff --git a/src/nvim/ui_bridge.h b/src/nvim/ui_bridge.h
index 31b9a69216..561ddb6b24 100644
--- a/src/nvim/ui_bridge.h
+++ b/src/nvim/ui_bridge.h
@@ -28,13 +28,13 @@ struct ui_bridge_data {
bool stopped;
};
-#define CONTINUE(b) \
- do { \
- UIBridgeData *d = (UIBridgeData *)b; \
- uv_mutex_lock(&d->mutex); \
- d->ready = true; \
- uv_cond_signal(&d->cond); \
- uv_mutex_unlock(&d->mutex); \
+#define CONTINUE(b) \
+ do { \
+ UIBridgeData *d = (UIBridgeData *)b; \
+ uv_mutex_lock(&d->mutex); \
+ d->ready = true; \
+ uv_cond_signal(&d->cond); \
+ uv_mutex_unlock(&d->mutex); \
} while (0)
diff --git a/src/nvim/version.c b/src/nvim/version.c
index 844c21916f..04a6f63451 100644
--- a/src/nvim/version.c
+++ b/src/nvim/version.c
@@ -85,6 +85,7 @@ static int included_patches[] = {
1755,
1753,
1728,
+ 1695,
1654,
1652,
1643,
@@ -224,11 +225,11 @@ static int included_patches[] = {
// 1471 NA
// 1470 NA
// 1469 NA
- // 1468,
+ 1468,
// 1467 NA
// 1466 NA
// 1465 NA
- // 1464,
+ 1464,
// 1463 NA
// 1462 NA
// 1461 NA
@@ -295,10 +296,10 @@ static int included_patches[] = {
// 1400 NA
// 1399 NA
// 1398 NA
- // 1397,
+ 1397,
// 1396,
// 1395 NA
- // 1394,
+ 1394,
// 1393 NA
// 1392 NA
// 1391 NA
@@ -340,7 +341,7 @@ static int included_patches[] = {
// 1355 NA
// 1354 NA
// 1353 NA
- // 1352,
+ 1352,
// 1351 NA
// 1350 NA
// 1349 NA
@@ -469,7 +470,7 @@ static int included_patches[] = {
// 1226 NA
// 1225 NA
// 1224 NA
- // 1223,
+ 1223,
// 1222 NA
// 1221 NA
// 1220 NA
@@ -539,10 +540,10 @@ static int included_patches[] = {
// 1156 NA
// 1155 NA
// 1154 NA
- // 1153,
+ 1153,
// 1152 NA
- // 1151,
- // 1150,
+ 1151,
+ 1150,
1149,
// 1148 NA
// 1147,
@@ -550,7 +551,7 @@ static int included_patches[] = {
// 1145 NA
1144,
1143,
- // 1142,
+ 1142,
1141,
// 1140,
// 1139 NA
@@ -562,7 +563,7 @@ static int included_patches[] = {
// 1133 NA
1132,
// 1131 NA
- // 1130,
+ // 1130 NA
// 1129 NA
// 1128 NA
// 1127 NA
@@ -584,7 +585,7 @@ static int included_patches[] = {
// 1111,
1110,
// 1109 NA
- // 1108,
+ 1108,
1107,
// 1106 NA
1105,
@@ -598,11 +599,11 @@ static int included_patches[] = {
// 1097,
1096,
// 1095 NA
- // 1094,
+ 1094,
1093,
1092,
1091,
- // 1090,
+ 1090,
1089,
1088,
1087,
@@ -624,7 +625,7 @@ static int included_patches[] = {
1071,
// 1070 NA
// 1069 NA
- // 1068,
+ 1068,
// 1067 NA
// 1066 NA
1065,
@@ -636,12 +637,12 @@ static int included_patches[] = {
1059,
// 1058,
1057,
- // 1056,
+ 1056,
1055,
1054,
1053,
1052,
- // 1051,
+ 1051,
1050,
1049,
1048,
@@ -2019,3 +2020,4 @@ void ex_intro(exarg_T *eap)
intro_message(TRUE);
wait_return(TRUE);
}
+