aboutsummaryrefslogtreecommitdiff
path: root/vte_generate_state_changes/src/lib.rs
diff options
context:
space:
mode:
authorChristian Duerr <contact@christianduerr.com>2025-01-09 06:27:15 +0000
committerGitHub <noreply@github.com>2025-01-09 06:27:15 +0000
commit7321a442a6fc0fc5b6d6ed7af364477d25e706fd (patch)
tree11ff2608e63a160b8b204b6f78ec3977f019d081 /vte_generate_state_changes/src/lib.rs
parent89c12df969145ffb5084d1122627d7292c2c638f (diff)
downloadr-alacritty-vte-7321a442a6fc0fc5b6d6ed7af364477d25e706fd.tar.gz
r-alacritty-vte-7321a442a6fc0fc5b6d6ed7af364477d25e706fd.tar.bz2
r-alacritty-vte-7321a442a6fc0fc5b6d6ed7af364477d25e706fd.zip
Switch parser to multi-byte processing
This patch overhauls the `Parser::advance` API to operate on byte slices instead of individual bytes, which allows for additional performance optimizations. VTE does not support C1 escapes and C0 escapes always start with an escape character. This makes it possible to simplify processing if a byte stream is determined to not contain any escapes. The `memchr` crate provides a battle-tested implementation for SIMD-accelerated byte searches, which is why this implementation makes use of it. VTE also only supports UTF8 characters in the ground state, which means that the new non-escape parsing path is able to rely completely on STD's `str::from_utf8` since `memchr` gives us the full length of the plain text character buffer. This allows us to completely remove `utf8parse` and all related code. We also make use of `memchr` in the synchronized escape handling in `ansi.rs`, since it relies heavily on scanning large amounts of text for the extension/termination escape sequences.
Diffstat (limited to 'vte_generate_state_changes/src/lib.rs')
-rw-r--r--vte_generate_state_changes/src/lib.rs12
1 files changed, 6 insertions, 6 deletions
diff --git a/vte_generate_state_changes/src/lib.rs b/vte_generate_state_changes/src/lib.rs
index b016518..ff8ea49 100644
--- a/vte_generate_state_changes/src/lib.rs
+++ b/vte_generate_state_changes/src/lib.rs
@@ -25,8 +25,8 @@ pub fn generate_state_changes(item: proc_macro::TokenStream) -> proc_macro::Toke
let assignments_stream = states_stream(&mut iter);
quote!(
- const fn #fn_name() -> [[u8; 256]; 16] {
- let mut state_changes = [[0; 256]; 16];
+ const fn #fn_name() -> [[u8; 256]; 13] {
+ let mut state_changes = [[0; 256]; 13];
#assignments_stream
@@ -71,7 +71,8 @@ fn state_entry_stream(iter: &mut Peekable<token_stream::IntoIter>) -> TokenStrea
tokens
}
-/// Generate the array assignment statement for a single byte->target mapping for one state.
+/// Generate the array assignment statement for a single byte->target mapping
+/// for one state.
fn change_stream(iter: &mut Peekable<token_stream::IntoIter>, state: &TokenTree) -> TokenStream {
// Start of input byte range
let start = next_usize(iter);
@@ -101,8 +102,6 @@ fn change_stream(iter: &mut Peekable<token_stream::IntoIter>, state: &TokenTree)
// Create a new entry for every byte in the range
for byte in start..=end {
- // TODO: Force adding `State::` and `Action::`?
- // TODO: Should we really use `pack` here without import?
tokens.extend(quote!(
state_changes[State::#state as usize][#byte] =
pack(State::#target_state, Action::#target_action);
@@ -148,7 +147,8 @@ fn expect_punct(iter: &mut impl Iterator<Item = TokenTree>, c: char) {
///
/// # Panics
///
-/// Panics if the next token is not a [`usize`] in hex or decimal literal format.
+/// Panics if the next token is not a [`usize`] in hex or decimal literal
+/// format.
fn next_usize(iter: &mut impl Iterator<Item = TokenTree>) -> usize {
match iter.next() {
Some(Literal(literal)) => {