arrow_buffer/util/
bit_chunk_iterator.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! Types for iterating over bitmasks in 64-bit chunks
19
20use crate::util::bit_util::ceil;
21use std::fmt::Debug;
22
23/// Iterates over an arbitrarily aligned byte buffer
24///
25/// Yields an iterator of aligned u64, along with the leading and trailing
26/// u64 necessary to align the buffer to a 8-byte boundary
27///
28/// This is unlike [`BitChunkIterator`] which only exposes a trailing u64,
29/// and consequently has to perform more work for each read
30#[derive(Debug)]
31pub struct UnalignedBitChunk<'a> {
32    lead_padding: usize,
33    trailing_padding: usize,
34
35    prefix: Option<u64>,
36    chunks: &'a [u64],
37    suffix: Option<u64>,
38}
39
40impl<'a> UnalignedBitChunk<'a> {
41    /// Create a from a byte array, and and an offset and length in bits
42    pub fn new(buffer: &'a [u8], offset: usize, len: usize) -> Self {
43        if len == 0 {
44            return Self {
45                lead_padding: 0,
46                trailing_padding: 0,
47                prefix: None,
48                chunks: &[],
49                suffix: None,
50            };
51        }
52
53        let byte_offset = offset / 8;
54        let offset_padding = offset % 8;
55
56        let bytes_len = (len + offset_padding + 7) / 8;
57        let buffer = &buffer[byte_offset..byte_offset + bytes_len];
58
59        let prefix_mask = compute_prefix_mask(offset_padding);
60
61        // If less than 8 bytes, read into prefix
62        if buffer.len() <= 8 {
63            let (suffix_mask, trailing_padding) = compute_suffix_mask(len, offset_padding);
64            let prefix = read_u64(buffer) & suffix_mask & prefix_mask;
65
66            return Self {
67                lead_padding: offset_padding,
68                trailing_padding,
69                prefix: Some(prefix),
70                chunks: &[],
71                suffix: None,
72            };
73        }
74
75        // If less than 16 bytes, read into prefix and suffix
76        if buffer.len() <= 16 {
77            let (suffix_mask, trailing_padding) = compute_suffix_mask(len, offset_padding);
78            let prefix = read_u64(&buffer[..8]) & prefix_mask;
79            let suffix = read_u64(&buffer[8..]) & suffix_mask;
80
81            return Self {
82                lead_padding: offset_padding,
83                trailing_padding,
84                prefix: Some(prefix),
85                chunks: &[],
86                suffix: Some(suffix),
87            };
88        }
89
90        // Read into prefix and suffix as needed
91        let (prefix, mut chunks, suffix) = unsafe { buffer.align_to::<u64>() };
92        assert!(
93            prefix.len() < 8 && suffix.len() < 8,
94            "align_to did not return largest possible aligned slice"
95        );
96
97        let (alignment_padding, prefix) = match (offset_padding, prefix.is_empty()) {
98            (0, true) => (0, None),
99            (_, true) => {
100                let prefix = chunks[0] & prefix_mask;
101                chunks = &chunks[1..];
102                (0, Some(prefix))
103            }
104            (_, false) => {
105                let alignment_padding = (8 - prefix.len()) * 8;
106
107                let prefix = (read_u64(prefix) & prefix_mask) << alignment_padding;
108                (alignment_padding, Some(prefix))
109            }
110        };
111
112        let lead_padding = offset_padding + alignment_padding;
113        let (suffix_mask, trailing_padding) = compute_suffix_mask(len, lead_padding);
114
115        let suffix = match (trailing_padding, suffix.is_empty()) {
116            (0, _) => None,
117            (_, true) => {
118                let suffix = chunks[chunks.len() - 1] & suffix_mask;
119                chunks = &chunks[..chunks.len() - 1];
120                Some(suffix)
121            }
122            (_, false) => Some(read_u64(suffix) & suffix_mask),
123        };
124
125        Self {
126            lead_padding,
127            trailing_padding,
128            prefix,
129            chunks,
130            suffix,
131        }
132    }
133
134    /// Returns the number of leading padding bits
135    pub fn lead_padding(&self) -> usize {
136        self.lead_padding
137    }
138
139    /// Returns the number of trailing padding bits
140    pub fn trailing_padding(&self) -> usize {
141        self.trailing_padding
142    }
143
144    /// Returns the prefix, if any
145    pub fn prefix(&self) -> Option<u64> {
146        self.prefix
147    }
148
149    /// Returns the suffix, if any
150    pub fn suffix(&self) -> Option<u64> {
151        self.suffix
152    }
153
154    /// Returns reference to the chunks
155    pub fn chunks(&self) -> &'a [u64] {
156        self.chunks
157    }
158
159    /// Returns an iterator over the chunks
160    pub fn iter(&self) -> UnalignedBitChunkIterator<'a> {
161        self.prefix
162            .into_iter()
163            .chain(self.chunks.iter().cloned())
164            .chain(self.suffix)
165    }
166
167    /// Counts the number of ones
168    pub fn count_ones(&self) -> usize {
169        self.iter().map(|x| x.count_ones() as usize).sum()
170    }
171}
172
173/// Iterator over an [`UnalignedBitChunk`]
174pub type UnalignedBitChunkIterator<'a> = std::iter::Chain<
175    std::iter::Chain<std::option::IntoIter<u64>, std::iter::Cloned<std::slice::Iter<'a, u64>>>,
176    std::option::IntoIter<u64>,
177>;
178
179#[inline]
180fn read_u64(input: &[u8]) -> u64 {
181    let len = input.len().min(8);
182    let mut buf = [0_u8; 8];
183    buf[..len].copy_from_slice(input);
184    u64::from_le_bytes(buf)
185}
186
187#[inline]
188fn compute_prefix_mask(lead_padding: usize) -> u64 {
189    !((1 << lead_padding) - 1)
190}
191
192#[inline]
193fn compute_suffix_mask(len: usize, lead_padding: usize) -> (u64, usize) {
194    let trailing_bits = (len + lead_padding) % 64;
195
196    if trailing_bits == 0 {
197        return (u64::MAX, 0);
198    }
199
200    let trailing_padding = 64 - trailing_bits;
201    let suffix_mask = (1 << trailing_bits) - 1;
202    (suffix_mask, trailing_padding)
203}
204
205/// Iterates over an arbitrarily aligned byte buffer
206///
207/// Yields an iterator of u64, and a remainder. The first byte in the buffer
208/// will be the least significant byte in output u64
209///
210#[derive(Debug)]
211pub struct BitChunks<'a> {
212    buffer: &'a [u8],
213    /// offset inside a byte, guaranteed to be between 0 and 7 (inclusive)
214    bit_offset: usize,
215    /// number of complete u64 chunks
216    chunk_len: usize,
217    /// number of remaining bits, guaranteed to be between 0 and 63 (inclusive)
218    remainder_len: usize,
219}
220
221impl<'a> BitChunks<'a> {
222    /// Create a new [`BitChunks`] from a byte array, and an offset and length in bits
223    pub fn new(buffer: &'a [u8], offset: usize, len: usize) -> Self {
224        assert!(ceil(offset + len, 8) <= buffer.len() * 8);
225
226        let byte_offset = offset / 8;
227        let bit_offset = offset % 8;
228
229        // number of complete u64 chunks
230        let chunk_len = len / 64;
231        // number of remaining bits
232        let remainder_len = len % 64;
233
234        BitChunks::<'a> {
235            buffer: &buffer[byte_offset..],
236            bit_offset,
237            chunk_len,
238            remainder_len,
239        }
240    }
241}
242
243/// Iterator over chunks of 64 bits represented as an u64
244#[derive(Debug)]
245pub struct BitChunkIterator<'a> {
246    buffer: &'a [u8],
247    bit_offset: usize,
248    chunk_len: usize,
249    index: usize,
250}
251
252impl<'a> BitChunks<'a> {
253    /// Returns the number of remaining bits, guaranteed to be between 0 and 63 (inclusive)
254    #[inline]
255    pub const fn remainder_len(&self) -> usize {
256        self.remainder_len
257    }
258
259    /// Returns the number of chunks
260    #[inline]
261    pub const fn chunk_len(&self) -> usize {
262        self.chunk_len
263    }
264
265    /// Returns the bitmask of remaining bits
266    #[inline]
267    pub fn remainder_bits(&self) -> u64 {
268        let bit_len = self.remainder_len;
269        if bit_len == 0 {
270            0
271        } else {
272            let bit_offset = self.bit_offset;
273            // number of bytes to read
274            // might be one more than sizeof(u64) if the offset is in the middle of a byte
275            let byte_len = ceil(bit_len + bit_offset, 8);
276            // pointer to remainder bytes after all complete chunks
277            let base = unsafe {
278                self.buffer
279                    .as_ptr()
280                    .add(self.chunk_len * std::mem::size_of::<u64>())
281            };
282
283            let mut bits = unsafe { std::ptr::read(base) } as u64 >> bit_offset;
284            for i in 1..byte_len {
285                let byte = unsafe { std::ptr::read(base.add(i)) };
286                bits |= (byte as u64) << (i * 8 - bit_offset);
287            }
288
289            bits & ((1 << bit_len) - 1)
290        }
291    }
292
293    /// Returns an iterator over chunks of 64 bits represented as an u64
294    #[inline]
295    pub const fn iter(&self) -> BitChunkIterator<'a> {
296        BitChunkIterator::<'a> {
297            buffer: self.buffer,
298            bit_offset: self.bit_offset,
299            chunk_len: self.chunk_len,
300            index: 0,
301        }
302    }
303
304    /// Returns an iterator over chunks of 64 bits, with the remaining bits zero padded to 64-bits
305    #[inline]
306    pub fn iter_padded(&self) -> impl Iterator<Item = u64> + 'a {
307        self.iter().chain(std::iter::once(self.remainder_bits()))
308    }
309}
310
311impl<'a> IntoIterator for BitChunks<'a> {
312    type Item = u64;
313    type IntoIter = BitChunkIterator<'a>;
314
315    fn into_iter(self) -> Self::IntoIter {
316        self.iter()
317    }
318}
319
320impl Iterator for BitChunkIterator<'_> {
321    type Item = u64;
322
323    #[inline]
324    fn next(&mut self) -> Option<u64> {
325        let index = self.index;
326        if index >= self.chunk_len {
327            return None;
328        }
329
330        // cast to *const u64 should be fine since we are using read_unaligned below
331        #[allow(clippy::cast_ptr_alignment)]
332        let raw_data = self.buffer.as_ptr() as *const u64;
333
334        // bit-packed buffers are stored starting with the least-significant byte first
335        // so when reading as u64 on a big-endian machine, the bytes need to be swapped
336        let current = unsafe { std::ptr::read_unaligned(raw_data.add(index)).to_le() };
337
338        let bit_offset = self.bit_offset;
339
340        let combined = if bit_offset == 0 {
341            current
342        } else {
343            // the constructor ensures that bit_offset is in 0..8
344            // that means we need to read at most one additional byte to fill in the high bits
345            let next =
346                unsafe { std::ptr::read_unaligned(raw_data.add(index + 1) as *const u8) as u64 };
347
348            (current >> bit_offset) | (next << (64 - bit_offset))
349        };
350
351        self.index = index + 1;
352
353        Some(combined)
354    }
355
356    #[inline]
357    fn size_hint(&self) -> (usize, Option<usize>) {
358        (
359            self.chunk_len - self.index,
360            Some(self.chunk_len - self.index),
361        )
362    }
363}
364
365impl ExactSizeIterator for BitChunkIterator<'_> {
366    #[inline]
367    fn len(&self) -> usize {
368        self.chunk_len - self.index
369    }
370}
371
372#[cfg(test)]
373mod tests {
374    use rand::prelude::*;
375
376    use crate::buffer::Buffer;
377    use crate::util::bit_chunk_iterator::UnalignedBitChunk;
378
379    #[test]
380    fn test_iter_aligned() {
381        let input: &[u8] = &[0, 1, 2, 3, 4, 5, 6, 7];
382        let buffer: Buffer = Buffer::from(input);
383
384        let bitchunks = buffer.bit_chunks(0, 64);
385        let result = bitchunks.into_iter().collect::<Vec<_>>();
386
387        assert_eq!(vec![0x0706050403020100], result);
388    }
389
390    #[test]
391    fn test_iter_unaligned() {
392        let input: &[u8] = &[
393            0b00000000, 0b00000001, 0b00000010, 0b00000100, 0b00001000, 0b00010000, 0b00100000,
394            0b01000000, 0b11111111,
395        ];
396        let buffer: Buffer = Buffer::from(input);
397
398        let bitchunks = buffer.bit_chunks(4, 64);
399
400        assert_eq!(0, bitchunks.remainder_len());
401        assert_eq!(0, bitchunks.remainder_bits());
402
403        let result = bitchunks.into_iter().collect::<Vec<_>>();
404
405        assert_eq!(
406            vec![0b1111010000000010000000010000000010000000010000000010000000010000],
407            result
408        );
409    }
410
411    #[test]
412    fn test_iter_unaligned_remainder_1_byte() {
413        let input: &[u8] = &[
414            0b00000000, 0b00000001, 0b00000010, 0b00000100, 0b00001000, 0b00010000, 0b00100000,
415            0b01000000, 0b11111111,
416        ];
417        let buffer: Buffer = Buffer::from(input);
418
419        let bitchunks = buffer.bit_chunks(4, 66);
420
421        assert_eq!(2, bitchunks.remainder_len());
422        assert_eq!(0b00000011, bitchunks.remainder_bits());
423
424        let result = bitchunks.into_iter().collect::<Vec<_>>();
425
426        assert_eq!(
427            vec![0b1111010000000010000000010000000010000000010000000010000000010000],
428            result
429        );
430    }
431
432    #[test]
433    fn test_iter_unaligned_remainder_bits_across_bytes() {
434        let input: &[u8] = &[0b00111111, 0b11111100];
435        let buffer: Buffer = Buffer::from(input);
436
437        // remainder contains bits from both bytes
438        // result should be the highest 2 bits from first byte followed by lowest 5 bits of second bytes
439        let bitchunks = buffer.bit_chunks(6, 7);
440
441        assert_eq!(7, bitchunks.remainder_len());
442        assert_eq!(0b1110000, bitchunks.remainder_bits());
443    }
444
445    #[test]
446    fn test_iter_unaligned_remainder_bits_large() {
447        let input: &[u8] = &[
448            0b11111111, 0b00000000, 0b11111111, 0b00000000, 0b11111111, 0b00000000, 0b11111111,
449            0b00000000, 0b11111111,
450        ];
451        let buffer: Buffer = Buffer::from(input);
452
453        let bitchunks = buffer.bit_chunks(2, 63);
454
455        assert_eq!(63, bitchunks.remainder_len());
456        assert_eq!(
457            0b100_0000_0011_1111_1100_0000_0011_1111_1100_0000_0011_1111_1100_0000_0011_1111,
458            bitchunks.remainder_bits()
459        );
460    }
461
462    #[test]
463    fn test_iter_remainder_out_of_bounds() {
464        // allocating a full page should trigger a fault when reading out of bounds
465        const ALLOC_SIZE: usize = 4 * 1024;
466        let input = vec![0xFF_u8; ALLOC_SIZE];
467
468        let buffer: Buffer = Buffer::from_vec(input);
469
470        let bitchunks = buffer.bit_chunks(57, ALLOC_SIZE * 8 - 57);
471
472        assert_eq!(u64::MAX, bitchunks.iter().last().unwrap());
473        assert_eq!(0x7F, bitchunks.remainder_bits());
474    }
475
476    #[test]
477    #[allow(clippy::assertions_on_constants)]
478    fn test_unaligned_bit_chunk_iterator() {
479        let buffer = Buffer::from(&[0xFF; 5]);
480        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 0, 40);
481
482        assert!(unaligned.chunks().is_empty()); // Less than 128 elements
483        assert_eq!(unaligned.lead_padding(), 0);
484        assert_eq!(unaligned.trailing_padding(), 24);
485        // 24x 1 bit then 40x 0 bits
486        assert_eq!(
487            unaligned.prefix(),
488            Some(0b0000000000000000000000001111111111111111111111111111111111111111)
489        );
490        assert_eq!(unaligned.suffix(), None);
491
492        let buffer = buffer.slice(1);
493        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 0, 32);
494
495        assert!(unaligned.chunks().is_empty()); // Less than 128 elements
496        assert_eq!(unaligned.lead_padding(), 0);
497        assert_eq!(unaligned.trailing_padding(), 32);
498        // 32x 1 bit then 32x 0 bits
499        assert_eq!(
500            unaligned.prefix(),
501            Some(0b0000000000000000000000000000000011111111111111111111111111111111)
502        );
503        assert_eq!(unaligned.suffix(), None);
504
505        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 5, 27);
506
507        assert!(unaligned.chunks().is_empty()); // Less than 128 elements
508        assert_eq!(unaligned.lead_padding(), 5); // 5 % 8 == 5
509        assert_eq!(unaligned.trailing_padding(), 32);
510        // 5x 0 bit, 27x 1 bit then 32x 0 bits
511        assert_eq!(
512            unaligned.prefix(),
513            Some(0b0000000000000000000000000000000011111111111111111111111111100000)
514        );
515        assert_eq!(unaligned.suffix(), None);
516
517        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 12, 20);
518
519        assert!(unaligned.chunks().is_empty()); // Less than 128 elements
520        assert_eq!(unaligned.lead_padding(), 4); // 12 % 8 == 4
521        assert_eq!(unaligned.trailing_padding(), 40);
522        // 4x 0 bit, 20x 1 bit then 40x 0 bits
523        assert_eq!(
524            unaligned.prefix(),
525            Some(0b0000000000000000000000000000000000000000111111111111111111110000)
526        );
527        assert_eq!(unaligned.suffix(), None);
528
529        let buffer = Buffer::from(&[0xFF; 14]);
530
531        // Verify buffer alignment
532        let (prefix, aligned, suffix) = unsafe { buffer.as_slice().align_to::<u64>() };
533        assert_eq!(prefix.len(), 0);
534        assert_eq!(aligned.len(), 1);
535        assert_eq!(suffix.len(), 6);
536
537        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 0, 112);
538
539        assert!(unaligned.chunks().is_empty()); // Less than 128 elements
540        assert_eq!(unaligned.lead_padding(), 0); // No offset and buffer aligned on 64-bit boundary
541        assert_eq!(unaligned.trailing_padding(), 16);
542        assert_eq!(unaligned.prefix(), Some(u64::MAX));
543        assert_eq!(unaligned.suffix(), Some((1 << 48) - 1));
544
545        let buffer = Buffer::from(&[0xFF; 16]);
546
547        // Verify buffer alignment
548        let (prefix, aligned, suffix) = unsafe { buffer.as_slice().align_to::<u64>() };
549        assert_eq!(prefix.len(), 0);
550        assert_eq!(aligned.len(), 2);
551        assert_eq!(suffix.len(), 0);
552
553        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 0, 128);
554
555        assert_eq!(unaligned.prefix(), Some(u64::MAX));
556        assert_eq!(unaligned.suffix(), Some(u64::MAX));
557        assert!(unaligned.chunks().is_empty()); // Exactly 128 elements
558
559        let buffer = Buffer::from(&[0xFF; 64]);
560
561        // Verify buffer alignment
562        let (prefix, aligned, suffix) = unsafe { buffer.as_slice().align_to::<u64>() };
563        assert_eq!(prefix.len(), 0);
564        assert_eq!(aligned.len(), 8);
565        assert_eq!(suffix.len(), 0);
566
567        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 0, 512);
568
569        // Buffer is completely aligned and larger than 128 elements -> all in chunks array
570        assert_eq!(unaligned.suffix(), None);
571        assert_eq!(unaligned.prefix(), None);
572        assert_eq!(unaligned.chunks(), [u64::MAX; 8].as_slice());
573        assert_eq!(unaligned.lead_padding(), 0);
574        assert_eq!(unaligned.trailing_padding(), 0);
575
576        let buffer = buffer.slice(1); // Offset buffer 1 byte off 64-bit alignment
577
578        // Verify buffer alignment
579        let (prefix, aligned, suffix) = unsafe { buffer.as_slice().align_to::<u64>() };
580        assert_eq!(prefix.len(), 7);
581        assert_eq!(aligned.len(), 7);
582        assert_eq!(suffix.len(), 0);
583
584        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 0, 504);
585
586        // Need a prefix with 1 byte of lead padding to bring the buffer into alignment
587        assert_eq!(unaligned.prefix(), Some(u64::MAX - 0xFF));
588        assert_eq!(unaligned.suffix(), None);
589        assert_eq!(unaligned.chunks(), [u64::MAX; 7].as_slice());
590        assert_eq!(unaligned.lead_padding(), 8);
591        assert_eq!(unaligned.trailing_padding(), 0);
592
593        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 17, 300);
594
595        // Out of 64-bit alignment by 8 bits from buffer, and 17 bits from provided offset
596        //   => need 8 + 17 = 25 bits of lead padding + 39 bits in prefix
597        //
598        // This leaves 300 - 17 = 261 bits remaining
599        //   => 4x 64-bit aligned 64-bit chunks + 5 remaining bits
600        //   => trailing padding of 59 bits
601        assert_eq!(unaligned.lead_padding(), 25);
602        assert_eq!(unaligned.trailing_padding(), 59);
603        assert_eq!(unaligned.prefix(), Some(u64::MAX - (1 << 25) + 1));
604        assert_eq!(unaligned.suffix(), Some(0b11111));
605        assert_eq!(unaligned.chunks(), [u64::MAX; 4].as_slice());
606
607        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 17, 0);
608
609        assert_eq!(unaligned.prefix(), None);
610        assert_eq!(unaligned.suffix(), None);
611        assert!(unaligned.chunks().is_empty());
612        assert_eq!(unaligned.lead_padding(), 0);
613        assert_eq!(unaligned.trailing_padding(), 0);
614
615        let unaligned = UnalignedBitChunk::new(buffer.as_slice(), 17, 1);
616
617        assert_eq!(unaligned.prefix(), Some(2));
618        assert_eq!(unaligned.suffix(), None);
619        assert!(unaligned.chunks().is_empty());
620        assert_eq!(unaligned.lead_padding(), 1);
621        assert_eq!(unaligned.trailing_padding(), 62);
622    }
623
624    #[test]
625    #[cfg_attr(miri, ignore)]
626    fn fuzz_unaligned_bit_chunk_iterator() {
627        let mut rng = thread_rng();
628
629        for _ in 0..100 {
630            let mask_len = rng.gen_range(0..1024);
631            let bools: Vec<_> = std::iter::from_fn(|| Some(rng.gen()))
632                .take(mask_len)
633                .collect();
634
635            let buffer = Buffer::from_iter(bools.iter().cloned());
636
637            let max_offset = 64.min(mask_len);
638            let offset = rng.gen::<usize>().checked_rem(max_offset).unwrap_or(0);
639
640            let max_truncate = 128.min(mask_len - offset);
641            let truncate = rng.gen::<usize>().checked_rem(max_truncate).unwrap_or(0);
642
643            let unaligned =
644                UnalignedBitChunk::new(buffer.as_slice(), offset, mask_len - offset - truncate);
645
646            let bool_slice = &bools[offset..mask_len - truncate];
647
648            let count = unaligned.count_ones();
649            let expected_count = bool_slice.iter().filter(|x| **x).count();
650
651            assert_eq!(count, expected_count);
652
653            let collected: Vec<u64> = unaligned.iter().collect();
654
655            let get_bit = |idx: usize| -> bool {
656                let padded_index = idx + unaligned.lead_padding();
657                let byte_idx = padded_index / 64;
658                let bit_idx = padded_index % 64;
659                (collected[byte_idx] & (1 << bit_idx)) != 0
660            };
661
662            for (idx, b) in bool_slice.iter().enumerate() {
663                assert_eq!(*b, get_bit(idx))
664            }
665        }
666    }
667}