diff --git a/README.md b/README.md index 6232e9d..a97abe3 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ A collection of useful algorithms written in Rust. Currently contains: - [`geo_filters`](crates/geo_filters): probabilistic data structures that solve the [Distinct Count Problem](https://en.wikipedia.org/wiki/Count-distinct_problem) using geometric filters. - [`bpe`](crates/bpe): fast, correct, and novel algorithms for the [Byte Pair Encoding Algorithm](https://en.wikipedia.org/wiki/Large_language_model#BPE) which are particularly useful for chunking of documents. +- [`string-offsets`](crates/string-offsets): converts string positions between bytes, chars, UTF-16 code units, and line numbers. Useful when sending string indices across language boundaries. ## Background diff --git a/crates/string-offsets/Cargo.toml b/crates/string-offsets/Cargo.toml new file mode 100644 index 0000000..fd9b838 --- /dev/null +++ b/crates/string-offsets/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "string-offsets" +authors = ["The blackbird team "] +version = "0.1.0" +edition = "2021" +description = "Converts string offsets between UTF-8 bytes, UTF-16 code units, Unicode code points, and lines." +repository = "https://github.com/github/rust-gems" +license = "MIT" +keywords = ["unicode", "positions", "utf16", "characters", "lines"] +categories = ["algorithms", "data-structures", "text-processing", "development-tools::ffi"] + +[dev-dependencies] +rand = "0.8" +rand_chacha = "0.3" diff --git a/crates/string-offsets/README.md b/crates/string-offsets/README.md new file mode 100644 index 0000000..7ad8c23 --- /dev/null +++ b/crates/string-offsets/README.md @@ -0,0 +1,45 @@ +# string-offsets + +Converts string offsets between UTF-8 bytes, UTF-16 code units, Unicode code points, and lines. + +Rust strings are UTF-8, but JavaScript has UTF-16 strings, and in Python, strings are sequences of +Unicode code points. It's therefore necessary to adjust string offsets when communicating across +programming language boundaries. [`StringOffsets`] does these adjustments. + +Each `StringOffsets` instance contains offset information for a single string. [Building the data +structure](StringOffsets::new) takes O(n) time and memory, but then most conversions are O(1). + +["UTF-8 Conversions with BitRank"](https://adaptivepatchwork.com/2023/07/10/utf-conversion/) is a +blog post explaining the implementation. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +string-offsets = "0.1" +``` + +Then: + +```rust +use string_offsets::StringOffsets; + +let s = "☀️hello\n🗺️world\n"; +let offsets = StringOffsets::new(s); + +// Find offsets where lines begin and end. +assert_eq!(offsets.line_to_utf8s(0), 0..12); // note: 0-based line numbers + +// Translate string offsets between UTF-8 and other encodings. +// This map emoji is 7 UTF-8 bytes... +assert_eq!(&s[12..19], "🗺️"); +// ...but only 3 UTF-16 code units... +assert_eq!(offsets.utf8_to_utf16(12), 8); +assert_eq!(offsets.utf8_to_utf16(19), 11); +// ...and only 2 Unicode characters. +assert_eq!(offsets.utf8s_to_chars(12..19), 8..10); +``` + +See [the documentation](https://docs.rs/string-offsets/latest/string_offsets/struct.StringOffsets.html) for more. diff --git a/crates/string-offsets/src/bitrank.rs b/crates/string-offsets/src/bitrank.rs new file mode 100644 index 0000000..6524769 --- /dev/null +++ b/crates/string-offsets/src/bitrank.rs @@ -0,0 +1,370 @@ +//! A bit-vector data structure, optimized for +//! [rank](http://bitmagic.io/rank-select.html) operations. +//! +//! See also: ["Succinct data structure"](https://en.wikipedia.org/wiki/Succinct_data_structure). + +type SubblockBits = u128; + +// Static sizing of the various components of the data structure. +const BITS_PER_BLOCK: usize = 16384; +const BITS_PER_SUB_BLOCK: usize = SubblockBits::BITS as usize; +const SUB_BLOCKS_PER_BLOCK: usize = BITS_PER_BLOCK / BITS_PER_SUB_BLOCK; + +/// A container for a portion of the total bit vector and the associated indices. +/// The bits within each chunk are stored from most significant bit (msb) to least significant bit (lsb). +/// i.e. index 0 of a Chunk is at the start of visual binary representation or a value of +/// 1u128 << 127. +/// +/// The actual bits are stored alongside the indices because the common case will be reading this +/// information from disk (rather than random access memory), so it is beneficial to have all of +/// the data that we need in the same page. +/// +/// ```text +/// index: [ 0, 1, 2, 3, 4, 5, 6, 7 ] +/// bits: [ 0, 1, 0, 1, 1, 0, 1, 0 ] +/// rank(exclusive): [ 0, 0, 1, 1, 2, 3, 3, 4 ] +/// block rank: [ 0 ] +/// sub-block rank: [ 0 ][ 2 ] +/// ``` +#[derive(Clone, Debug)] +struct Block { + /// Rank of the first bit in this block (that is, the number of bits set in previous blocks). + rank: u64, + /// Rank of the first bit (bit 0) of each subblock, relative to the start of the block. + /// That is, `sub_blocks[i]` is the number of bits set in the `bits` representing + /// sub-blocks `0..i`. `sub_blocks[0]` is always zero. + sub_blocks: [u16; SUB_BLOCKS_PER_BLOCK], + /// The bit-vector. + bits: [SubblockBits; SUB_BLOCKS_PER_BLOCK], +} + +impl Block { + /// Set a bit without updating `self.sub_blocks`. + /// + /// This panics if the bit was already set, because that indicates that the original positions + /// list is invalid/had duplicates. + fn set(&mut self, index: usize) { + assert!(index < BITS_PER_BLOCK); + let chunk_idx = index / BITS_PER_SUB_BLOCK; + let bit_idx = index % BITS_PER_SUB_BLOCK; + let mask = 1 << ((BITS_PER_SUB_BLOCK - 1) - bit_idx); + assert_eq!(self.bits[chunk_idx] & mask, 0, "toggling bits off indicates that the original data was incorrect, most likely containing duplicate values."); + self.bits[chunk_idx] ^= mask; + } + + /// The **total rank** of the block relative local index, and the index of the one + /// bit that establishes that rank (aka "select") **if** it occurs within that same + /// chunk, otherwise ['None']. The assumption is that if you would have to look back + /// through previous chunks it would actually be cheaper to do a lookup in the original + /// data structure that the bit vector was created from. + fn rank_select(&self, local_idx: usize) -> (usize, Option) { + let mut rank = self.rank as usize; + let sub_block = local_idx / BITS_PER_SUB_BLOCK; + rank += self.sub_blocks[sub_block] as usize; + + let remainder = local_idx % BITS_PER_SUB_BLOCK; + + let last_chunk = local_idx / BITS_PER_SUB_BLOCK; + let masked = if remainder == 0 { + 0 + } else { + self.bits[last_chunk] >> (BITS_PER_SUB_BLOCK - remainder) + }; + rank += masked.count_ones() as usize; + let select = if masked == 0 { + None + } else { + Some(local_idx - masked.trailing_zeros() as usize - 1) + }; + (rank, select) + } + + fn total_rank(&self) -> usize { + self.sub_blocks[SUB_BLOCKS_PER_BLOCK - 1] as usize + + self.rank as usize + + self.bits[SUB_BLOCKS_PER_BLOCK - 1..] + .iter() + .map(|c| c.count_ones() as usize) + .sum::() + } +} + +/// Builder for creating a [`BitRank`]. +/// +/// # Examples +/// +/// ```text +/// let mut builder = BitRankBuilder::new(); +/// builder.push(17); +/// builder.push(23); +/// builder.push(102); +/// let set = builder.finish(); +/// assert_eq!(set.rank(100), 2); +/// ``` +#[derive(Default)] +pub struct BitRankBuilder { + blocks: Vec, +} + +impl BitRankBuilder { + /// Returns a new builder. + #[cfg(test)] + pub fn new() -> Self { + Self::default() + } + + /// Returns a builder that can hold integers with values `0..cap`. + pub fn with_capacity(cap: usize) -> Self { + Self { + blocks: Vec::with_capacity(cap.div_ceil(BITS_PER_BLOCK)), + } + } + + fn finish_last_block(&mut self) -> u64 { + if let Some(block) = self.blocks.last_mut() { + let mut local_rank = 0; + for (i, chunk) in block.bits.iter().enumerate() { + block.sub_blocks[i] = local_rank; + local_rank += chunk.count_ones() as u16; + } + block.rank + local_rank as u64 + } else { + 0 + } + } + + /// Adds a bit. Bits must be added in order of increasing `position`. + pub fn push(&mut self, position: usize) { + let block_id = position / BITS_PER_BLOCK; + assert!( + self.blocks.len() <= block_id + 1, + "positions must be increasing!" + ); + if block_id >= self.blocks.len() { + let curr_rank = self.finish_last_block(); + while block_id >= self.blocks.len() { + // Without this declared as a `const`, rustc 1.82 creates the Block value on the + // stack first, then `memcpy`s it into `self.blocks`. + const ZERO_BLOCK: Block = Block { + rank: 0, + sub_blocks: [0; SUB_BLOCKS_PER_BLOCK], + bits: [0; SUB_BLOCKS_PER_BLOCK], + }; + self.blocks.push(ZERO_BLOCK); + self.blocks.last_mut().expect("just inserted").rank = curr_rank; + } + } + self.blocks + .last_mut() + .expect("just ensured there are enough blocks") + .set(position % BITS_PER_BLOCK); + } + + /// Finishes the `BitRank` by writing the last block of data. + pub fn finish(mut self) -> BitRank { + self.finish_last_block(); + BitRank { + blocks: self.blocks, + } + } +} + +/// An immutable set of unsigned integers with an efficient `rank` method. +#[derive(Clone)] +pub struct BitRank { + blocks: Vec, +} + +impl BitRank { + /// The rank at the specified index (exclusive). + /// + /// The (one) rank is defined as: `rank(i) = sum(b[j] for j in 0..i)` + /// i.e. the number of elements less than `i`. + pub fn rank(&self, idx: usize) -> usize { + self.rank_select(idx).0 + } + + /// Returns the number of elements in the set. + pub fn max_rank(&self) -> usize { + self.blocks + .last() + .map(|b| b.total_rank()) + .unwrap_or_default() // fall back to 0 when the bitrank data structure is empty. + } + + /// The rank at the specified index(exclusive) and the index of the one bit that + /// establishes that rank (aka "select") **if** it occurs within that same chunk, + /// otherwise ['None']. The assumption is that if you would have to look back + /// through previous chunks it would actually be cheaper to do a lookup in the original + /// data structure that the bit vector was created from. + pub fn rank_select(&self, idx: usize) -> (usize, Option) { + let block_num = idx / BITS_PER_BLOCK; + // assert!(block_num < self.blocks.len(), "index out of bounds"); + if block_num >= self.blocks.len() { + ( + self.max_rank(), // fall back to 0 when the bitrank data structure is empty. + None, + ) + } else { + let (rank, b_idx) = self.blocks[block_num].rank_select(idx % BITS_PER_BLOCK); + (rank, b_idx.map(|i| (block_num * BITS_PER_BLOCK) + i)) + } + } +} + +#[cfg(test)] +mod tests { + use rand::distributions::Uniform; + use rand::prelude::*; + use rand_chacha::ChaCha8Rng; + + use super::*; + + /// Creates a `BitRank` containing the integers in `iter` (which should be strictly + /// increasing). + pub fn bitrank>(iter: I) -> BitRank { + let mut builder = BitRankBuilder::new(); + for position in iter { + builder.push(position); + } + builder.finish() + } + + #[test] + fn test_rank_zero() { + let br = bitrank([0]); + assert_eq!(br.rank(0), 0); + assert_eq!(br.rank(1), 1); + } + + #[test] + fn test_empty() { + let br = bitrank([]); + assert!(br.blocks.is_empty()); + } + + #[test] + fn test_index_out_of_bounds() { + let br = bitrank([BITS_PER_BLOCK - 1]); + assert_eq!(br.rank(BITS_PER_BLOCK), 1); + } + + #[test] + #[should_panic] + fn test_duplicate_position() { + bitrank([64, 66, 68, 68, 90]); + } + + #[test] + fn test_rank_exclusive() { + let br = bitrank(0..132); + assert_eq!(br.blocks.len(), 1); + assert_eq!(br.rank(64), 64); + assert_eq!(br.rank(132), 132); + } + + #[test] + fn test_rank() { + let mut positions: Vec = (0..132).collect(); + positions.append(&mut vec![138usize, 140, 146]); + let br = bitrank(positions); + assert_eq!(br.rank(135), 132); + + let br2 = bitrank(0..BITS_PER_BLOCK - 5); + assert_eq!(br2.rank(169), 169); + + let br3 = bitrank(0..BITS_PER_BLOCK + 5); + assert_eq!(br3.rank(BITS_PER_BLOCK), BITS_PER_BLOCK); + } + + #[test] + fn test_rank_idx() { + let mut positions: Vec = (0..132).collect(); + positions.append(&mut vec![138usize, 140, 146]); + let br = bitrank(positions); + assert_eq!(br.rank_select(135), (132, Some(131))); + + let bits2: Vec = (0..BITS_PER_BLOCK - 5).collect(); + let br2 = bitrank(bits2); + assert_eq!(br2.rank_select(169), (169, Some(168))); + + let bits3: Vec = (0..BITS_PER_BLOCK + 5).collect(); + let br3 = bitrank(bits3); + assert_eq!(br3.rank_select(BITS_PER_BLOCK), (BITS_PER_BLOCK, None)); + + let bits4: Vec = vec![1, 1000, 9999, BITS_PER_BLOCK + 1]; + let br4 = bitrank(bits4); + assert_eq!(br4.rank_select(10000), (3, Some(9999))); + + let bits5: Vec = vec![1, 1000, 9999, BITS_PER_BLOCK + 1]; + let br5 = bitrank(bits5); + assert_eq!(br5.rank_select(BITS_PER_BLOCK), (3, None)); + } + + #[test] + fn test_rank_large_random() { + let mut rng = ChaCha8Rng::seed_from_u64(2); + let uniform = Uniform::::from(0..1_000_000); + let mut random_bits = Vec::with_capacity(100_000); + for _ in 0..100_000 { + random_bits.push(uniform.sample(&mut rng)); + } + random_bits.sort_unstable(); + // This isn't strictly necessary, given that the bit would just be toggled again, but it + // ensures that we are meeting the contract. + random_bits.dedup(); + let br = bitrank(random_bits.iter().copied()); + let mut rank = 0; + let mut select = None; + for i in 0..random_bits.capacity() { + if i % BITS_PER_SUB_BLOCK == 0 { + select = None; + } + assert_eq!(br.rank_select(i), (rank, select)); + if i == random_bits[rank] { + rank += 1; + select = Some(i); + } + } + } + + /// Test that we properly handle the case where the position is out of bounds for all + /// potentially tricky bit positions. + #[test] + fn test_rank_out_of_bounds() { + for i in 1..30 { + let br = bitrank([BITS_PER_BLOCK * i - 1]); + assert_eq!(br.max_rank(), 1); + assert_eq!(br.rank(BITS_PER_BLOCK * i - 1), 0); + for j in 0..10 { + assert_eq!(br.rank(BITS_PER_BLOCK * (i + j)), 1); + } + } + } + + #[test] + fn test_large_gap() { + let br = bitrank((3..4).chain(BITS_PER_BLOCK * 15..BITS_PER_BLOCK * 15 + 17)); + for i in 1..15 { + assert_eq!(br.rank(BITS_PER_BLOCK * i), 1); + } + for i in 0..18 { + assert_eq!(br.rank(BITS_PER_BLOCK * 15 + i), 1 + i); + } + } + + #[test] + fn test_with_capacity() { + let mut b = BitRankBuilder::with_capacity(BITS_PER_BLOCK * 3 - 1); + let initial_capacity = b.blocks.capacity(); + assert!(initial_capacity >= 3); + b.push(BITS_PER_BLOCK * 3 - 2); // should not have to grow + assert_eq!(b.blocks.capacity(), initial_capacity); + + let mut b = BitRankBuilder::with_capacity(BITS_PER_BLOCK * 3 + 1); + let initial_capacity = b.blocks.capacity(); + assert!(initial_capacity >= 4); + b.push(BITS_PER_BLOCK * 3); // should not have to grow + assert_eq!(b.blocks.capacity(), initial_capacity); + } +} diff --git a/crates/string-offsets/src/lib.rs b/crates/string-offsets/src/lib.rs new file mode 100644 index 0000000..ee05e54 --- /dev/null +++ b/crates/string-offsets/src/lib.rs @@ -0,0 +1,607 @@ +//! Converts string offsets between UTF-8 bytes, UTF-16 code units, Unicode code points, and lines. +//! +//! # Example +//! +//! ``` +//! use string_offsets::StringOffsets; +//! +//! let s = "☀️hello\n🗺️world\n"; +//! let offsets = StringOffsets::new(s); +//! +//! // Find offsets where lines begin and end. +//! assert_eq!(offsets.line_to_utf8s(0), 0..12); // note: 0-based line numbers +//! +//! // Translate string offsets between UTF-8 and other encodings. +//! // This map emoji is 7 UTF-8 bytes... +//! assert_eq!(&s[12..19], "🗺️"); +//! // ...but only 3 UTF-16 code units... +//! assert_eq!(offsets.utf8_to_utf16(12), 8); +//! assert_eq!(offsets.utf8_to_utf16(19), 11); +//! // ...and only 2 Unicode code points. +//! assert_eq!(offsets.utf8s_to_chars(12..19), 8..10); +//! ``` +//! +//! See [`StringOffsets`] for details. +#![deny(missing_docs)] + +use std::ops::Range; + +mod bitrank; + +use bitrank::{BitRank, BitRankBuilder}; + +/// Converts positions within a given string between UTF-8 byte offsets (the usual in Rust), UTF-16 +/// code units, Unicode code points, and line numbers. +/// +/// Rust strings are UTF-8, but JavaScript has UTF-16 strings, and in Python, strings are sequences +/// of Unicode code points. It's therefore necessary to adjust string offsets when communicating +/// across programming language boundaries. [`StringOffsets`] does these adjustments. +/// +/// Each `StringOffsets` instance contains offset information for a single string. [Building the +/// data structure](StringOffsets::new) takes O(n) time and memory, but then most conversions are +/// O(1). +/// +/// ["UTF-8 Conversions with BitRank"](https://adaptivepatchwork.com/2023/07/10/utf-conversion/) +/// is a blog post explaining the implementation. +/// +/// ## Converting offsets +/// +/// The conversion methods follow a naming scheme that uses these terms for different kinds of +/// offsets: +/// +/// - `utf8` - UTF-8 byte offsets (Rust style). +/// - `utf16` - UTF-16 code unit offsets (JavaScript style). +/// - `char` - Count of Unicode scalar values (Python style). +/// - `utf16_pos` - Zero-based line number and `utf16` offset within the line. +/// - `char_pos` - Zero-based line number and `char` offset within the line. +/// +/// For example, [`StringOffsets::utf8_to_utf16`] converts a Rust byte offset to a number that will +/// index to the same position in a JavaScript string. Offsets are expressed as `usize` or [`Pos`] +/// values. +/// +/// All methods accept arguments that are past the end of the string, interpreting them as pointing +/// to the end of the string. +/// +/// ## Converting ranges +/// +/// Some methods translate position *ranges*. These are expressed as `Range` except for +/// `line`, which is a `usize`: +/// +/// - `line` - Zero-based line numbers. The range a `line` refers to is the whole line, including +/// the trailing newline character if any. +/// - `lines` - A range of line numbers. +/// - `utf8s` - UTF-8 byte ranges. +/// - `utf16s` - UTF-16 code unit ranges. +/// - `chars` - Ranges of Unicode scalar values. +/// +/// When mapping offsets to line ranges, it is important to use a `_to_lines` function in order to +/// end up with the correct line range. We have these methods because if you tried to do it +/// yourself you would screw it up; use them! (And see the source code for +/// [`StringOffsets::utf8s_to_lines`] if you don't believe us.) +/// +/// ## Complexity +/// +/// Most operations run in O(1) time. A few require O(log n) time. The memory consumed by this +/// data structure is typically less than the memory occupied by the actual content. In the best +/// case, it requires ~45% of the content space. +pub struct StringOffsets { + /// Vector storing, for every line, the byte position at which the line starts. + line_begins: Vec, + + /// Encoded bitrank where the rank of a byte position corresponds to the line number to which + /// the byte belongs. + utf8_to_line: BitRank, + + /// Encoded bitrank where the rank of a byte position corresponds to the char position to which + /// the byte belongs. + utf8_to_char: BitRank, + + /// Encoded bitrank where the rank of a byte position corresponds to the UTF-16 encoded word + /// position to which the byte belongs. + utf8_to_utf16: BitRank, + + /// Marks, for every line, whether it consists only of whitespace characters. + whitespace_only: Vec, +} + +/// A position in a string, specified by line and column number. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Pos { + /// Zero-indexed line number. + pub line: usize, + /// Zero-indexed column number. The units of this field depend on the method that produces the + /// value. See [`StringOffsets::utf8_to_char_pos`], [`StringOffsets::utf8_to_utf16_pos`]. + pub col: usize, +} + +// The actual conversion implementation between utf8, utf16, chars, and line numbers. +// New methods must follow the existing conventions: +// +// - All conversions saturate when the input is out of bounds. +// - Lines INCLUDE the terminating newline. +// - Line numbers and column numbers are 0-based. +// - `.xyz_to_lines(range)` methods behave like `.utf8_to_lines(the corresponding byte range)`. +// +// This last one is tricky, because in these methods, `range.begin` "rounds down" to the beginning +// of the line, but `range.end` "rounds up"; and because there are many corner cases. +// +// E.g.: The empty character range at the end of one line cannot be distinguished from the empty +// character range at the start of the subsequent line! This ambiguity is resolved by returning the +// line which starts with the empty character range. +// +// Question: Consider whether we should return an empty line range in this case which would +// probably be consistent from a mathematical point of view. But then we should also return empty +// line ranges for empty character ranges in the middle of a line... +impl StringOffsets { + /// Create a new converter to work with offsets into the given string. + pub fn new(content: &str) -> Self { + new_converter(content.as_bytes()) + } + + /// Create a new converter to work with offsets into the given byte-string. + /// + /// If `content` is UTF-8, this is just like [`StringOffsets::new`]. Otherwise, the + /// conversion methods will produce unspecified (but memory-safe) results. + pub fn from_bytes(content: &[u8]) -> Self { + new_converter(content) + } + + /// Returns the number of Unicode characters on the specified line. + pub fn line_chars(&self, line_number: usize) -> usize { + let r = self.utf8s_to_chars(self.line_to_utf8s(line_number)); + r.end - r.start + } + + /// Returns the number of lines in the string. + pub fn lines(&self) -> usize { + self.line_begins.len() - 1 + } + + /// Returns true if the specified line is empty except for whitespace. + pub fn only_whitespaces(&self, line_number: usize) -> bool { + self.whitespace_only + .get(line_number) + .copied() + .unwrap_or(true) + } + + /// Return the byte offset of the first character on the specified (zero-based) line. + /// + /// If `line_number` is greater than or equal to the number of lines in the text, this returns + /// the length of the string. + pub fn line_to_utf8_begin(&self, line_number: usize) -> usize { + self.line_begins[line_number.min(self.lines())] as usize + } + + /// UTF-16 offset of the first character of a line. + /// + /// That is, return the offset that would point to the start of that line in a UTF-16 + /// representation of the source string. + pub fn line_to_utf16_begin(&self, line_number: usize) -> usize { + self.utf8_to_utf16(self.line_to_utf8_begin(line_number)) + } + + /// UTF-32 offset of the first character of a line. + /// + /// That is, return the offset that would point to the start of that line in a UTF-32 + /// representation of the source string. + pub fn line_to_char_begin(&self, line_number: usize) -> usize { + self.utf8_to_char(self.line_to_utf8_begin(line_number)) + } + + /// UTF-8 offset of the first character of a line. + pub fn line_to_utf8_end(&self, line_number: usize) -> usize { + self.line_to_utf8_begin(line_number + 1) + } + + /// UTF-16 offset one past the end of a line (the offset of the start of the next line). + pub fn line_to_utf16_end(&self, line_number: usize) -> usize { + self.utf8_to_utf16(self.line_to_utf8_end(line_number)) + } + + /// UTF-32 offset one past the end of a line (the offset of the start of the next line). + pub fn line_to_char_end(&self, line_number: usize) -> usize { + self.utf8_to_char(self.line_to_utf8_end(line_number)) + } + + /// UTF-8 offset one past the end of a line (the offset of the start of the next line). + pub fn line_to_utf8s(&self, line_number: usize) -> Range { + self.line_to_utf8_begin(line_number)..self.line_to_utf8_end(line_number) + } + + /// UTF-32 offsets for the beginning and end of a line, including the newline if any. + pub fn line_to_chars(&self, line_number: usize) -> Range { + self.utf8s_to_chars(self.line_to_utf8s(line_number)) + } + + /// UTF-8 offsets for the beginning and end of a range of lines, including the newline if any. + pub fn lines_to_utf8s(&self, line_numbers: Range) -> Range { + self.line_to_utf8_begin(line_numbers.start)..self.line_to_utf8_begin(line_numbers.end) + } + + /// UTF-32 offsets for the beginning and end of a range of lines, including the newline if any. + pub fn lines_to_chars(&self, line_numbers: Range) -> Range { + self.utf8s_to_chars(self.lines_to_utf8s(line_numbers)) + } + + /// Return the zero-based line number of the line containing the specified UTF-8 offset. + /// Newline characters count as part of the preceding line. + pub fn utf8_to_line(&self, byte_number: usize) -> usize { + self.utf8_to_line.rank(byte_number) + } + + /// Converts a UTF-8 offset to a zero-based line number and UTF-32 offset within the + /// line. + pub fn utf8_to_char_pos(&self, byte_number: usize) -> Pos { + let line = self.utf8_to_line(byte_number); + let line_start_char_number = self.line_to_char_begin(line); + let char_idx = self.utf8_to_char(byte_number); + Pos { + line, + col: char_idx - line_start_char_number, + } + } + + /// Converts a UTF-8 offset to a zero-based line number and UTF-16 offset within the + /// line. + pub fn utf8_to_utf16_pos(&self, byte_number: usize) -> Pos { + let line = self.utf8_to_line(byte_number); + let line_start_char_number = self.line_to_utf16_begin(line); + let char_idx = self.utf8_to_utf16(byte_number); + Pos { + line, + col: char_idx - line_start_char_number, + } + } + + /// Returns the range of line numbers containing the substring specified by the Rust-style + /// range `bytes`. Newline characters count as part of the preceding line. + /// + /// If `bytes` is an empty range at a position within or at the beginning of a line, this + /// returns a nonempty range containing the line number of that one line. An empty range at or + /// beyond the end of the string translates to an empty range of line numbers. + pub fn utf8s_to_lines(&self, bytes: Range) -> Range { + // The fiddly parts of this formula are necessary because `bytes.start` rounds down to the + // beginning of the line, but `bytes.end` "rounds up" to the end of the line. the final + // `+1` is to produce a half-open range. + self.utf8_to_line(bytes.start) + ..self + .lines() + .min(self.utf8_to_line(bytes.end.saturating_sub(1).max(bytes.start)) + 1) + } + + /// Returns the range of line numbers containing the substring specified by the UTF-32 + /// range `chars`. Newline characters count as part of the preceding line. + pub fn chars_to_lines(&self, chars: Range) -> Range { + self.utf8s_to_lines(self.chars_to_utf8s(chars)) + } + + /// Converts a UTF-8 offset to a UTF-32 offset. + pub fn utf8_to_char(&self, byte_number: usize) -> usize { + self.utf8_to_char.rank(byte_number) + } + + /// Converts a UTF-8 offset to a UTF-16 offset. + pub fn utf8_to_utf16(&self, byte_number: usize) -> usize { + self.utf8_to_utf16.rank(byte_number) + } + + /// Converts a UTF-32 offset to a UTF-8 offset. + pub fn char_to_utf8(&self, char_number: usize) -> usize { + let mut byte_number = char_number; + for _ in 0..128 { + let char_number2 = self.utf8_to_char(byte_number); + if char_number2 == char_number { + return byte_number; + } + byte_number += char_number - char_number2; + } + // If we couldn't find the char within 128 steps, then the char_number might be invalid! + // This does not usually happen. For consistency with the rest of the code, we simply return + // the max utf8 position in this case. + if char_number > self.utf8_to_char.max_rank() { + return self + .line_begins + .last() + .copied() + .expect("last entry represents the length of the file!") + as usize; + } + let limit = *self.line_begins.last().expect("no line begins") as usize; + // Otherwise, we keep searching, but are a bit more careful and add a check that we don't run into an infinite loop. + loop { + let char_number2 = self.utf8_to_char(byte_number); + if char_number2 == char_number { + return byte_number; + } + byte_number += char_number - char_number2; + assert!(byte_number < limit); + } + } + + /// Converts a UTF-8 offset range to a UTF-32 offset range. + pub fn utf8s_to_chars(&self, bytes: Range) -> Range { + self.utf8_to_char(bytes.start)..self.utf8_to_char(bytes.end) + } + + /// Converts a UTF-32 offset range to a UTF-8 offset range. + pub fn chars_to_utf8s(&self, chars: Range) -> Range { + self.char_to_utf8(chars.start)..self.char_to_utf8(chars.end) + } +} + +fn new_converter(content: &[u8]) -> StringOffsets { + let n = content.len(); + let mut utf8_builder = BitRankBuilder::with_capacity(n); + let mut utf16_builder = BitRankBuilder::with_capacity(n); + let mut line_builder = BitRankBuilder::with_capacity(n); + let mut line_begins = vec![0]; + let mut i = 0; + let mut whitespace_only = vec![]; + let mut only_whitespaces = true; // true if all characters in the current line are whitespaces. + while i < content.len() { + // In case of invalid utf8, we might get a utf8_len of 0. + // In this case, we just treat the single byte character. + // In principle, a single incorrect byte can break the whole decoding... + let c = content[i]; + let utf8_len = utf8_width(c).max(1); + if i > 0 { + utf8_builder.push(i - 1); + utf16_builder.push(i - 1); + } + if utf8_to_utf16_width(&content[i..]) > 1 { + utf16_builder.push(i); + } + if c == b'\n' { + whitespace_only.push(only_whitespaces); + line_begins.push(i as u32 + 1); + line_builder.push(i); + only_whitespaces = true; // reset for next line. + } else { + only_whitespaces &= matches!(c, b'\t' | b'\r' | b' '); + } + i += utf8_len; + } + if !content.is_empty() { + utf8_builder.push(content.len() - 1); + utf16_builder.push(content.len() - 1); + } + if line_begins.last() != Some(&(content.len() as u32)) { + whitespace_only.push(only_whitespaces); + line_begins.push(content.len() as u32); + line_builder.push(content.len() - 1); + } + + StringOffsets { + line_begins, + utf8_to_line: line_builder.finish(), + whitespace_only, + utf8_to_char: utf8_builder.finish(), + utf8_to_utf16: utf16_builder.finish(), + } +} + +/// Returns the number of bytes a UTF-8 char occupies, given the first byte of the UTF-8 encoding. +/// Returns 0 if the byte is not a valid first byte of a UTF-8 char. +fn utf8_width(c: u8) -> usize { + // Every nibble represents the utf8 length given the first 4 bits of a utf8 encoded byte. + const UTF8_WIDTH: usize = 0x4322_0000_1111_1111; + (UTF8_WIDTH >> ((c >> 4) * 4)) & 0xf +} + +fn utf8_to_utf16_width(content: &[u8]) -> usize { + let len = utf8_width(content[0]); + match len { + 0 => 0, + 1..=3 => 1, + 4 => 2, + _ => panic!("invalid utf8 char width: {}", len), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Returns true if, in a UTF-8 string, `b` indicates the first byte of a character. + fn is_char_boundary(b: u8) -> bool { + b as i8 >= -0x40 // NB: b < 128 || b >= 192 + } + + #[test] + fn test_utf8_char_width() { + for c in '\0'..=char::MAX { + let mut dst = [0; 4]; + let len = c.encode_utf8(&mut dst).len(); + assert_eq!(len, utf8_width(dst[0]), "char: {:?} {len}", dst[0] >> 4); + } + + for b in 0..=255u8 { + if !is_char_boundary(b) { + assert_eq!(utf8_width(b), 0, "char: {:?}", b >> 4); + } else { + assert!(utf8_width(b) > 0, "char: {:?}", b >> 4); + } + } + } + + #[test] + fn test_utf8_to_utf16_len() { + for c in '\0'..=char::MAX { + let mut dst = [0; 4]; + let _len = c.encode_utf8(&mut dst).len(); + assert_eq!(utf8_to_utf16_width(&dst), c.len_utf16()); + } + + for b in 0..=255u8 { + if !is_char_boundary(b) { + assert_eq!(utf8_to_utf16_width(&[b]), 0); + } + } + } + + #[test] + fn test_line_map() { + let content = r#"a short line. +followed by another one. +no terminating newline!"#; + let lines = StringOffsets::new(content); + assert_eq!(lines.line_to_utf8s(0), 0..14); + assert_eq!(&content[0..14], "a short line.\n"); + assert_eq!(lines.line_to_utf8s(1), 14..39); + assert_eq!(&content[14..39], "followed by another one.\n"); + assert_eq!(lines.line_to_utf8s(2), 39..62); + assert_eq!(&content[39..62], "no terminating newline!"); + assert_eq!(lines.utf8_to_line(0), 0); + assert_eq!(lines.utf8_to_line(13), 0); + assert_eq!(lines.utf8_to_line(14), 1); + assert_eq!(lines.utf8_to_line(38), 1); + assert_eq!(lines.utf8_to_line(39), 2); + assert_eq!(lines.utf8_to_line(61), 2); + assert_eq!(lines.utf8_to_line(62), 3); // <<-- this character is beyond the content. + assert_eq!(lines.utf8_to_line(100), 3); + assert_eq!(lines.utf8s_to_chars(4..10), 4..10); + assert_eq!(lines.chars_to_utf8s(4..10), 4..10); + + assert_eq!(content.len(), 62); + assert_eq!(lines.lines_to_utf8s(2..3), 39..62); + assert_eq!(lines.lines_to_utf8s(2..4), 39..62); + assert_eq!(lines.lines_to_chars(2..4), 39..62); + assert_eq!(lines.utf8s_to_lines(39..62), 2..3); + assert_eq!(lines.utf8s_to_lines(39..63), 2..3); // The "invalid" utf8 position results in a valid line position. + assert_eq!(lines.char_to_utf8(62), 62); + assert_eq!(lines.char_to_utf8(63), 62); // char 63 doesn't exist, so we map to the closest valid utf8 position. + + // Empty ranges + assert_eq!(lines.utf8s_to_lines(0..0), 0..1); + assert_eq!(lines.utf8s_to_lines(13..13), 0..1); + assert_eq!(lines.utf8s_to_lines(14..14), 1..2); + assert_eq!(lines.utf8s_to_lines(38..38), 1..2); + assert_eq!(lines.utf8s_to_lines(39..39), 2..3); + assert_eq!(lines.utf8s_to_lines(61..61), 2..3); + assert_eq!(lines.utf8s_to_lines(62..62), 3..3); + assert_eq!(lines.utf8s_to_lines(63..63), 3..3); + } + + fn pos(line: usize, col: usize) -> Pos { + Pos { line, col } + } + + #[test] + fn test_convert_ascii() { + let content = r#"line0 +line1"#; + let lines = StringOffsets::new(content); + assert_eq!(lines.utf8_to_char_pos(0), pos(0, 0)); + assert_eq!(lines.utf8_to_char_pos(1), pos(0, 1)); + assert_eq!(lines.utf8_to_char_pos(6), pos(1, 0)); + assert_eq!(lines.utf8_to_char_pos(7), pos(1, 1)); + } + + #[test] + fn test_convert_unicode() { + // Á - 2 bytes utf8 + let content = r#"❤️ line0 +line1 +✅ line2"#; + let lines = StringOffsets::new(content); + assert_eq!(lines.utf8_to_char_pos(0), pos(0, 0)); // ❤️ takes 6 bytes to represent in utf8 (2 code points) + assert_eq!(lines.utf8_to_char_pos(1), pos(0, 0)); + assert_eq!(lines.utf8_to_char_pos(2), pos(0, 0)); + assert_eq!(lines.utf8_to_char_pos(3), pos(0, 1)); + assert_eq!(lines.utf8_to_char_pos(4), pos(0, 1)); + assert_eq!(lines.utf8_to_char_pos(5), pos(0, 1)); + + assert_eq!(lines.utf8_to_char_pos(6), pos(0, 2)); // + assert_eq!(lines.utf8_to_char_pos(7), pos(0, 3)); // line + // ^ + + assert_eq!(lines.utf8_to_char_pos(13), pos(1, 0)); // line + // ^ + + assert_eq!(lines.utf8_to_char_pos(19), pos(2, 0)); // ✅ takes 3 bytes to represent in utf8 (1 code point) + assert_eq!(lines.utf8_to_char_pos(20), pos(2, 0)); + assert_eq!(lines.utf8_to_char_pos(21), pos(2, 0)); + + assert_eq!(lines.utf8_to_char_pos(22), pos(2, 1)); // + + assert_eq!(lines.utf8_to_utf16_pos(0), pos(0, 0)); // ❤️ takes 4 bytes to represent in utf16 (2 code points) + assert_eq!(lines.utf8_to_utf16_pos(1), pos(0, 0)); + assert_eq!(lines.utf8_to_utf16_pos(2), pos(0, 0)); + assert_eq!(lines.utf8_to_utf16_pos(3), pos(0, 1)); + } + + #[test] + fn test_small() { + // Á - 2 bytes utf8 + let content = r#"❤️ line0 ❤️Á 👋"#; + let lines = StringOffsets::new(content); + let mut utf16_index = 0; + let mut char_index = 0; + for (byte_index, char) in content.char_indices() { + assert_eq!(lines.utf8_to_char(byte_index), char_index); + assert_eq!(lines.utf8_to_utf16(byte_index), utf16_index); + char_index += 1; + utf16_index += char.len_utf16(); + } + assert_eq!(lines.utf8_to_char(content.len()), char_index); + assert_eq!(lines.utf8_to_utf16(content.len()), utf16_index); + } + + #[test] + fn test_variable_lengths() { + let content = r#"❤️Á 👋"#; + // ^~ utf8: 1 char, 4 bytes, utf16: 2 code units + // ^~~~ utf8: 1 char, 1 byte, utf16: 1 code unit + // ^~~~~ utf8: 1 char, 2 bytes, utf16: 1 code unit + // ^~~~~~ utf8: 2 chars, 3 byte ea., utf16: 2 code units + let lines = StringOffsets::new(content); + + // UTF-16 positions + assert_eq!(lines.utf8_to_utf16_pos(0), pos(0, 0)); // ❤️ + assert_eq!(lines.utf8_to_utf16_pos(1), pos(0, 0)); + assert_eq!(lines.utf8_to_utf16_pos(2), pos(0, 0)); + assert_eq!(lines.utf8_to_utf16_pos(3), pos(0, 1)); + assert_eq!(lines.utf8_to_utf16_pos(5), pos(0, 1)); + assert_eq!(lines.utf8_to_utf16_pos(4), pos(0, 1)); + assert_eq!(lines.utf8_to_utf16_pos(6), pos(0, 2)); // Á + assert_eq!(lines.utf8_to_utf16_pos(7), pos(0, 2)); + assert_eq!(lines.utf8_to_utf16_pos(8), pos(0, 3)); // + assert_eq!(lines.utf8_to_utf16_pos(9), pos(0, 4)); // 👋 + + // These middle utf8 byte positions don't have valid mappings: + // assert_eq!(lines.utf8_to_utf16_pos(10), pos(0, 4)); + // assert_eq!(lines.utf8_to_utf16_pos(11), pos(0, 5)); + // + // 👋 in utf16: 0xd83d 0xdc4b + // 👋 in utf8: 0xf0 0x9f 0x91 0x8b + // ^ ^ + // It's not really defined where these inner bytes map to and it + // doesn't matter because we would never report those byte offset as + // they are in the middle of a character and therefore invalid. + + assert_eq!(lines.utf8_to_utf16_pos(12), pos(0, 5)); + + // UTF-8 positions + assert_eq!(lines.utf8_to_char_pos(0), pos(0, 0)); // ❤️ + assert_eq!(lines.utf8_to_char_pos(1), pos(0, 0)); + assert_eq!(lines.utf8_to_char_pos(2), pos(0, 0)); + assert_eq!(lines.utf8_to_char_pos(3), pos(0, 1)); + assert_eq!(lines.utf8_to_char_pos(4), pos(0, 1)); + assert_eq!(lines.utf8_to_char_pos(5), pos(0, 1)); + assert_eq!(lines.utf8_to_char_pos(6), pos(0, 2)); // Á + assert_eq!(lines.utf8_to_char_pos(7), pos(0, 2)); + assert_eq!(lines.utf8_to_char_pos(8), pos(0, 3)); // + assert_eq!(lines.utf8_to_char_pos(9), pos(0, 4)); // 👋 + assert_eq!(lines.utf8_to_char_pos(10), pos(0, 4)); + assert_eq!(lines.utf8_to_char_pos(11), pos(0, 4)); + assert_eq!(lines.utf8_to_char_pos(12), pos(0, 4)); + } + + #[test] + fn test_critical_input_len() { + let content = [b'a'; 16384]; + let lines = StringOffsets::from_bytes(&content); + assert_eq!(lines.utf8_to_utf16_pos(16384), pos(1, 0)); + } +}