arrow_buffer/buffer/
immutable.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18use std::alloc::Layout;
19use std::fmt::Debug;
20use std::ptr::NonNull;
21use std::sync::Arc;
22
23use crate::alloc::{Allocation, Deallocation};
24use crate::util::bit_chunk_iterator::{BitChunks, UnalignedBitChunk};
25use crate::BufferBuilder;
26use crate::{bit_util, bytes::Bytes, native::ArrowNativeType};
27
28use super::ops::bitwise_unary_op_helper;
29use super::{MutableBuffer, ScalarBuffer};
30
31/// A contiguous memory region that can be shared with other buffers and across
32/// thread boundaries that stores Arrow data.
33///
34/// `Buffer`s can be sliced and cloned without copying the underlying data and can
35/// be created from memory allocated by non-Rust sources such as C/C++.
36///
37/// # Example: Create a `Buffer` from a `Vec` (without copying)
38/// ```
39/// # use arrow_buffer::Buffer;
40/// let vec: Vec<u32> = vec![1, 2, 3];
41/// let buffer = Buffer::from(vec);
42/// ```
43///
44/// # Example: Convert a `Buffer` to a `Vec` (without copying)
45///
46/// Use [`Self::into_vec`] to convert a `Buffer` back into a `Vec` if there are
47/// no other references and the types are aligned correctly.
48/// ```
49/// # use arrow_buffer::Buffer;
50/// # let vec: Vec<u32> = vec![1, 2, 3];
51/// # let buffer = Buffer::from(vec);
52/// // convert the buffer back into a Vec of u32
53/// // note this will fail if the buffer is shared or not aligned correctly
54/// let vec: Vec<u32> = buffer.into_vec().unwrap();
55/// ```
56///
57/// # Example: Create a `Buffer` from a [`bytes::Bytes`] (without copying)
58///
59/// [`bytes::Bytes`] is a common type in the Rust ecosystem for shared memory
60/// regions. You can create a buffer from a `Bytes` instance using the `From`
61/// implementation, also without copying.
62///
63/// ```
64/// # use arrow_buffer::Buffer;
65/// let bytes = bytes::Bytes::from("hello");
66/// let buffer = Buffer::from(bytes);
67///```
68#[derive(Clone, Debug)]
69pub struct Buffer {
70    /// the internal byte buffer.
71    data: Arc<Bytes>,
72
73    /// Pointer into `data` valid
74    ///
75    /// We store a pointer instead of an offset to avoid pointer arithmetic
76    /// which causes LLVM to fail to vectorise code correctly
77    ptr: *const u8,
78
79    /// Byte length of the buffer.
80    ///
81    /// Must be less than or equal to `data.len()`
82    length: usize,
83}
84
85impl Default for Buffer {
86    #[inline]
87    fn default() -> Self {
88        MutableBuffer::default().into()
89    }
90}
91
92impl PartialEq for Buffer {
93    fn eq(&self, other: &Self) -> bool {
94        self.as_slice().eq(other.as_slice())
95    }
96}
97
98impl Eq for Buffer {}
99
100unsafe impl Send for Buffer where Bytes: Send {}
101unsafe impl Sync for Buffer where Bytes: Sync {}
102
103impl Buffer {
104    /// Create a new Buffer from a (internal) `Bytes`
105    ///
106    /// NOTE despite the same name, `Bytes` is an internal struct in arrow-rs
107    /// and is different than [`bytes::Bytes`].
108    ///
109    /// See examples on [`Buffer`] for ways to create a buffer from a [`bytes::Bytes`].
110    #[deprecated(since = "54.1.0", note = "Use Buffer::from instead")]
111    pub fn from_bytes(bytes: Bytes) -> Self {
112        Self::from(bytes)
113    }
114
115    /// Returns the offset, in bytes, of `Self::ptr` to `Self::data`
116    ///
117    /// self.ptr and self.data can be different after slicing or advancing the buffer.
118    pub fn ptr_offset(&self) -> usize {
119        // Safety: `ptr` is always in bounds of `data`.
120        unsafe { self.ptr.offset_from(self.data.ptr().as_ptr()) as usize }
121    }
122
123    /// Returns the pointer to the start of the buffer without the offset.
124    pub fn data_ptr(&self) -> NonNull<u8> {
125        self.data.ptr()
126    }
127
128    /// Create a [`Buffer`] from the provided [`Vec`] without copying
129    #[inline]
130    pub fn from_vec<T: ArrowNativeType>(vec: Vec<T>) -> Self {
131        MutableBuffer::from(vec).into()
132    }
133
134    /// Initializes a [Buffer] from a slice of items.
135    pub fn from_slice_ref<U: ArrowNativeType, T: AsRef<[U]>>(items: T) -> Self {
136        let slice = items.as_ref();
137        let capacity = std::mem::size_of_val(slice);
138        let mut buffer = MutableBuffer::with_capacity(capacity);
139        buffer.extend_from_slice(slice);
140        buffer.into()
141    }
142
143    /// Creates a buffer from an existing memory region.
144    ///
145    /// Ownership of the memory is tracked via reference counting
146    /// and the memory will be freed using the `drop` method of
147    /// [crate::alloc::Allocation] when the reference count reaches zero.
148    ///
149    /// # Arguments
150    ///
151    /// * `ptr` - Pointer to raw parts
152    /// * `len` - Length of raw parts in **bytes**
153    /// * `owner` - A [crate::alloc::Allocation] which is responsible for freeing that data
154    ///
155    /// # Safety
156    ///
157    /// This function is unsafe as there is no guarantee that the given pointer is valid for `len` bytes
158    pub unsafe fn from_custom_allocation(
159        ptr: NonNull<u8>,
160        len: usize,
161        owner: Arc<dyn Allocation>,
162    ) -> Self {
163        Buffer::build_with_arguments(ptr, len, Deallocation::Custom(owner, len))
164    }
165
166    /// Auxiliary method to create a new Buffer
167    unsafe fn build_with_arguments(
168        ptr: NonNull<u8>,
169        len: usize,
170        deallocation: Deallocation,
171    ) -> Self {
172        let bytes = Bytes::new(ptr, len, deallocation);
173        let ptr = bytes.as_ptr();
174        Buffer {
175            ptr,
176            data: Arc::new(bytes),
177            length: len,
178        }
179    }
180
181    /// Returns the number of bytes in the buffer
182    #[inline]
183    pub fn len(&self) -> usize {
184        self.length
185    }
186
187    /// Returns the capacity of this buffer.
188    /// For externally owned buffers, this returns zero
189    #[inline]
190    pub fn capacity(&self) -> usize {
191        self.data.capacity()
192    }
193
194    /// Tries to shrink the capacity of the buffer as much as possible, freeing unused memory.
195    ///
196    /// If the buffer is shared, this is a no-op.
197    ///
198    /// If the memory was allocated with a custom allocator, this is a no-op.
199    ///
200    /// If the capacity is already less than or equal to the desired capacity, this is a no-op.
201    ///
202    /// The memory region will be reallocated using `std::alloc::realloc`.
203    pub fn shrink_to_fit(&mut self) {
204        let offset = self.ptr_offset();
205        let is_empty = self.is_empty();
206        let desired_capacity = if is_empty {
207            0
208        } else {
209            // For realloc to work, we cannot free the elements before the offset
210            offset + self.len()
211        };
212        if desired_capacity < self.capacity() {
213            if let Some(bytes) = Arc::get_mut(&mut self.data) {
214                if bytes.try_realloc(desired_capacity).is_ok() {
215                    // Realloc complete - update our pointer into `bytes`:
216                    self.ptr = if is_empty {
217                        bytes.as_ptr()
218                    } else {
219                        // SAFETY: we kept all elements leading up to the offset
220                        unsafe { bytes.as_ptr().add(offset) }
221                    }
222                } else {
223                    // Failure to reallocate is fine; we just failed to free up memory.
224                }
225            }
226        }
227    }
228
229    /// Returns true if the buffer is empty.
230    #[inline]
231    pub fn is_empty(&self) -> bool {
232        self.length == 0
233    }
234
235    /// Returns the byte slice stored in this buffer
236    pub fn as_slice(&self) -> &[u8] {
237        unsafe { std::slice::from_raw_parts(self.ptr, self.length) }
238    }
239
240    pub(crate) fn deallocation(&self) -> &Deallocation {
241        self.data.deallocation()
242    }
243
244    /// Returns a new [Buffer] that is a slice of this buffer starting at `offset`.
245    ///
246    /// This function is `O(1)` and does not copy any data, allowing the
247    /// same memory region to be shared between buffers.
248    ///
249    /// # Panics
250    ///
251    /// Panics iff `offset` is larger than `len`.
252    pub fn slice(&self, offset: usize) -> Self {
253        let mut s = self.clone();
254        s.advance(offset);
255        s
256    }
257
258    /// Increases the offset of this buffer by `offset`
259    ///
260    /// # Panics
261    ///
262    /// Panics iff `offset` is larger than `len`.
263    #[inline]
264    pub fn advance(&mut self, offset: usize) {
265        assert!(
266            offset <= self.length,
267            "the offset of the new Buffer cannot exceed the existing length: offset={} length={}",
268            offset,
269            self.length
270        );
271        self.length -= offset;
272        // Safety:
273        // This cannot overflow as
274        // `self.offset + self.length < self.data.len()`
275        // `offset < self.length`
276        self.ptr = unsafe { self.ptr.add(offset) };
277    }
278
279    /// Returns a new [Buffer] that is a slice of this buffer starting at `offset`,
280    /// with `length` bytes.
281    ///
282    /// This function is `O(1)` and does not copy any data, allowing the same
283    /// memory region to be shared between buffers.
284    ///
285    /// # Panics
286    /// Panics iff `(offset + length)` is larger than the existing length.
287    pub fn slice_with_length(&self, offset: usize, length: usize) -> Self {
288        assert!(
289            offset.saturating_add(length) <= self.length,
290            "the offset of the new Buffer cannot exceed the existing length: slice offset={offset} length={length} selflen={}",
291            self.length
292        );
293        // Safety:
294        // offset + length <= self.length
295        let ptr = unsafe { self.ptr.add(offset) };
296        Self {
297            data: self.data.clone(),
298            ptr,
299            length,
300        }
301    }
302
303    /// Returns a pointer to the start of this buffer.
304    ///
305    /// Note that this should be used cautiously, and the returned pointer should not be
306    /// stored anywhere, to avoid dangling pointers.
307    #[inline]
308    pub fn as_ptr(&self) -> *const u8 {
309        self.ptr
310    }
311
312    /// View buffer as a slice of a specific type.
313    ///
314    /// # Panics
315    ///
316    /// This function panics if the underlying buffer is not aligned
317    /// correctly for type `T`.
318    pub fn typed_data<T: ArrowNativeType>(&self) -> &[T] {
319        // SAFETY
320        // ArrowNativeType is trivially transmutable, is sealed to prevent potentially incorrect
321        // implementation outside this crate, and this method checks alignment
322        let (prefix, offsets, suffix) = unsafe { self.as_slice().align_to::<T>() };
323        assert!(prefix.is_empty() && suffix.is_empty());
324        offsets
325    }
326
327    /// Returns a slice of this buffer starting at a certain bit offset.
328    /// If the offset is byte-aligned the returned buffer is a shallow clone,
329    /// otherwise a new buffer is allocated and filled with a copy of the bits in the range.
330    pub fn bit_slice(&self, offset: usize, len: usize) -> Self {
331        if offset % 8 == 0 {
332            return self.slice_with_length(offset / 8, bit_util::ceil(len, 8));
333        }
334
335        bitwise_unary_op_helper(self, offset, len, |a| a)
336    }
337
338    /// Returns a `BitChunks` instance which can be used to iterate over this buffers bits
339    /// in larger chunks and starting at arbitrary bit offsets.
340    /// Note that both `offset` and `length` are measured in bits.
341    pub fn bit_chunks(&self, offset: usize, len: usize) -> BitChunks {
342        BitChunks::new(self.as_slice(), offset, len)
343    }
344
345    /// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
346    /// inspected. Note that both `offset` and `length` are measured in bits.
347    pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
348        UnalignedBitChunk::new(self.as_slice(), offset, len).count_ones()
349    }
350
351    /// Returns `MutableBuffer` for mutating the buffer if this buffer is not shared.
352    /// Returns `Err` if this is shared or its allocation is from an external source or
353    /// it is not allocated with alignment [`ALIGNMENT`]
354    ///
355    /// [`ALIGNMENT`]: crate::alloc::ALIGNMENT
356    pub fn into_mutable(self) -> Result<MutableBuffer, Self> {
357        let ptr = self.ptr;
358        let length = self.length;
359        Arc::try_unwrap(self.data)
360            .and_then(|bytes| {
361                // The pointer of underlying buffer should not be offset.
362                assert_eq!(ptr, bytes.ptr().as_ptr());
363                MutableBuffer::from_bytes(bytes).map_err(Arc::new)
364            })
365            .map_err(|bytes| Buffer {
366                data: bytes,
367                ptr,
368                length,
369            })
370    }
371
372    /// Converts self into a `Vec`, if possible.
373    ///
374    /// This can be used to reuse / mutate the underlying data.
375    ///
376    /// # Errors
377    ///
378    /// Returns `Err(self)` if
379    /// 1. this buffer does not have the same [`Layout`] as the destination Vec
380    /// 2. contains a non-zero offset
381    /// 3. The buffer is shared
382    pub fn into_vec<T: ArrowNativeType>(self) -> Result<Vec<T>, Self> {
383        let layout = match self.data.deallocation() {
384            Deallocation::Standard(l) => l,
385            _ => return Err(self), // Custom allocation
386        };
387
388        if self.ptr != self.data.as_ptr() {
389            return Err(self); // Data is offset
390        }
391
392        let v_capacity = layout.size() / std::mem::size_of::<T>();
393        match Layout::array::<T>(v_capacity) {
394            Ok(expected) if layout == &expected => {}
395            _ => return Err(self), // Incorrect layout
396        }
397
398        let length = self.length;
399        let ptr = self.ptr;
400        let v_len = self.length / std::mem::size_of::<T>();
401
402        Arc::try_unwrap(self.data)
403            .map(|bytes| unsafe {
404                let ptr = bytes.ptr().as_ptr() as _;
405                std::mem::forget(bytes);
406                // Safety
407                // Verified that bytes layout matches that of Vec
408                Vec::from_raw_parts(ptr, v_len, v_capacity)
409            })
410            .map_err(|bytes| Buffer {
411                data: bytes,
412                ptr,
413                length,
414            })
415    }
416
417    /// Returns true if this [`Buffer`] is equal to `other`, using pointer comparisons
418    /// to determine buffer equality. This is cheaper than `PartialEq::eq` but may
419    /// return false when the arrays are logically equal
420    #[inline]
421    pub fn ptr_eq(&self, other: &Self) -> bool {
422        self.ptr == other.ptr && self.length == other.length
423    }
424}
425
426/// Note that here we deliberately do not implement
427/// `impl<T: AsRef<[u8]>> From<T> for Buffer`
428/// As it would accept `Buffer::from(vec![...])` that would cause an unexpected copy.
429/// Instead, we ask user to be explicit when copying is occurring, e.g., `Buffer::from(vec![...].to_byte_slice())`.
430/// For zero-copy conversion, user should use `Buffer::from_vec(vec![...])`.
431///
432/// Since we removed impl for `AsRef<u8>`, we added the following three specific implementations to reduce API breakage.
433/// See <https://github.com/apache/arrow-rs/issues/6033> for more discussion on this.
434impl From<&[u8]> for Buffer {
435    fn from(p: &[u8]) -> Self {
436        Self::from_slice_ref(p)
437    }
438}
439
440impl<const N: usize> From<[u8; N]> for Buffer {
441    fn from(p: [u8; N]) -> Self {
442        Self::from_slice_ref(p)
443    }
444}
445
446impl<const N: usize> From<&[u8; N]> for Buffer {
447    fn from(p: &[u8; N]) -> Self {
448        Self::from_slice_ref(p)
449    }
450}
451
452impl<T: ArrowNativeType> From<Vec<T>> for Buffer {
453    fn from(value: Vec<T>) -> Self {
454        Self::from_vec(value)
455    }
456}
457
458impl<T: ArrowNativeType> From<ScalarBuffer<T>> for Buffer {
459    fn from(value: ScalarBuffer<T>) -> Self {
460        value.into_inner()
461    }
462}
463
464/// Convert from internal `Bytes` (not [`bytes::Bytes`]) to `Buffer`
465impl From<Bytes> for Buffer {
466    #[inline]
467    fn from(bytes: Bytes) -> Self {
468        let length = bytes.len();
469        let ptr = bytes.as_ptr();
470        Self {
471            data: Arc::new(bytes),
472            ptr,
473            length,
474        }
475    }
476}
477
478/// Convert from [`bytes::Bytes`], not internal `Bytes` to `Buffer`
479impl From<bytes::Bytes> for Buffer {
480    fn from(bytes: bytes::Bytes) -> Self {
481        let bytes: Bytes = bytes.into();
482        Self::from(bytes)
483    }
484}
485
486/// Create a `Buffer` instance by storing the boolean values into the buffer
487impl FromIterator<bool> for Buffer {
488    fn from_iter<I>(iter: I) -> Self
489    where
490        I: IntoIterator<Item = bool>,
491    {
492        MutableBuffer::from_iter(iter).into()
493    }
494}
495
496impl std::ops::Deref for Buffer {
497    type Target = [u8];
498
499    fn deref(&self) -> &[u8] {
500        unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
501    }
502}
503
504impl From<MutableBuffer> for Buffer {
505    #[inline]
506    fn from(buffer: MutableBuffer) -> Self {
507        buffer.into_buffer()
508    }
509}
510
511impl<T: ArrowNativeType> From<BufferBuilder<T>> for Buffer {
512    fn from(mut value: BufferBuilder<T>) -> Self {
513        value.finish()
514    }
515}
516
517impl Buffer {
518    /// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
519    ///
520    /// Prefer this to `collect` whenever possible, as it is ~60% faster.
521    ///
522    /// # Example
523    /// ```
524    /// # use arrow_buffer::buffer::Buffer;
525    /// let v = vec![1u32];
526    /// let iter = v.iter().map(|x| x * 2);
527    /// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
528    /// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
529    /// ```
530    /// # Safety
531    /// This method assumes that the iterator's size is correct and is undefined behavior
532    /// to use it on an iterator that reports an incorrect length.
533    // This implementation is required for two reasons:
534    // 1. there is no trait `TrustedLen` in stable rust and therefore
535    //    we can't specialize `extend` for `TrustedLen` like `Vec` does.
536    // 2. `from_trusted_len_iter` is faster.
537    #[inline]
538    pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
539        iterator: I,
540    ) -> Self {
541        MutableBuffer::from_trusted_len_iter(iterator).into()
542    }
543
544    /// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
545    /// if any of the items of the iterator is an error.
546    /// Prefer this to `collect` whenever possible, as it is ~60% faster.
547    /// # Safety
548    /// This method assumes that the iterator's size is correct and is undefined behavior
549    /// to use it on an iterator that reports an incorrect length.
550    #[inline]
551    pub unsafe fn try_from_trusted_len_iter<
552        E,
553        T: ArrowNativeType,
554        I: Iterator<Item = Result<T, E>>,
555    >(
556        iterator: I,
557    ) -> Result<Self, E> {
558        Ok(MutableBuffer::try_from_trusted_len_iter(iterator)?.into())
559    }
560}
561
562impl<T: ArrowNativeType> FromIterator<T> for Buffer {
563    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
564        let vec = Vec::from_iter(iter);
565        Buffer::from_vec(vec)
566    }
567}
568
569#[cfg(test)]
570mod tests {
571    use crate::i256;
572    use std::panic::{RefUnwindSafe, UnwindSafe};
573    use std::thread;
574
575    use super::*;
576
577    #[test]
578    fn test_buffer_data_equality() {
579        let buf1 = Buffer::from(&[0, 1, 2, 3, 4]);
580        let buf2 = Buffer::from(&[0, 1, 2, 3, 4]);
581        assert_eq!(buf1, buf2);
582
583        // slice with same offset and same length should still preserve equality
584        let buf3 = buf1.slice(2);
585        assert_ne!(buf1, buf3);
586        let buf4 = buf2.slice_with_length(2, 3);
587        assert_eq!(buf3, buf4);
588
589        // Different capacities should still preserve equality
590        let mut buf2 = MutableBuffer::new(65);
591        buf2.extend_from_slice(&[0u8, 1, 2, 3, 4]);
592
593        let buf2 = buf2.into();
594        assert_eq!(buf1, buf2);
595
596        // unequal because of different elements
597        let buf2 = Buffer::from(&[0, 0, 2, 3, 4]);
598        assert_ne!(buf1, buf2);
599
600        // unequal because of different length
601        let buf2 = Buffer::from(&[0, 1, 2, 3]);
602        assert_ne!(buf1, buf2);
603    }
604
605    #[test]
606    fn test_from_raw_parts() {
607        let buf = Buffer::from(&[0, 1, 2, 3, 4]);
608        assert_eq!(5, buf.len());
609        assert!(!buf.as_ptr().is_null());
610        assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
611    }
612
613    #[test]
614    fn test_from_vec() {
615        let buf = Buffer::from(&[0, 1, 2, 3, 4]);
616        assert_eq!(5, buf.len());
617        assert!(!buf.as_ptr().is_null());
618        assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
619    }
620
621    #[test]
622    fn test_copy() {
623        let buf = Buffer::from(&[0, 1, 2, 3, 4]);
624        let buf2 = buf;
625        assert_eq!(5, buf2.len());
626        assert_eq!(64, buf2.capacity());
627        assert!(!buf2.as_ptr().is_null());
628        assert_eq!([0, 1, 2, 3, 4], buf2.as_slice());
629    }
630
631    #[test]
632    fn test_slice() {
633        let buf = Buffer::from(&[2, 4, 6, 8, 10]);
634        let buf2 = buf.slice(2);
635
636        assert_eq!([6, 8, 10], buf2.as_slice());
637        assert_eq!(3, buf2.len());
638        assert_eq!(unsafe { buf.as_ptr().offset(2) }, buf2.as_ptr());
639
640        let buf3 = buf2.slice_with_length(1, 2);
641        assert_eq!([8, 10], buf3.as_slice());
642        assert_eq!(2, buf3.len());
643        assert_eq!(unsafe { buf.as_ptr().offset(3) }, buf3.as_ptr());
644
645        let buf4 = buf.slice(5);
646        let empty_slice: [u8; 0] = [];
647        assert_eq!(empty_slice, buf4.as_slice());
648        assert_eq!(0, buf4.len());
649        assert!(buf4.is_empty());
650        assert_eq!(buf2.slice_with_length(2, 1).as_slice(), &[10]);
651    }
652
653    #[test]
654    fn test_shrink_to_fit() {
655        let original = Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7]);
656        assert_eq!(original.as_slice(), &[0, 1, 2, 3, 4, 5, 6, 7]);
657        assert_eq!(original.capacity(), 64);
658
659        let slice = original.slice_with_length(2, 3);
660        drop(original); // Make sure the buffer isn't shared (or shrink_to_fit won't work)
661        assert_eq!(slice.as_slice(), &[2, 3, 4]);
662        assert_eq!(slice.capacity(), 64);
663
664        let mut shrunk = slice;
665        shrunk.shrink_to_fit();
666        assert_eq!(shrunk.as_slice(), &[2, 3, 4]);
667        assert_eq!(shrunk.capacity(), 5); // shrink_to_fit is allowed to keep the elements before the offset
668
669        // Test that we can handle empty slices:
670        let empty_slice = shrunk.slice_with_length(1, 0);
671        drop(shrunk); // Make sure the buffer isn't shared (or shrink_to_fit won't work)
672        assert_eq!(empty_slice.as_slice(), &[]);
673        assert_eq!(empty_slice.capacity(), 5);
674
675        let mut shrunk_empty = empty_slice;
676        shrunk_empty.shrink_to_fit();
677        assert_eq!(shrunk_empty.as_slice(), &[]);
678        assert_eq!(shrunk_empty.capacity(), 0);
679    }
680
681    #[test]
682    #[should_panic(expected = "the offset of the new Buffer cannot exceed the existing length")]
683    fn test_slice_offset_out_of_bound() {
684        let buf = Buffer::from(&[2, 4, 6, 8, 10]);
685        buf.slice(6);
686    }
687
688    #[test]
689    fn test_access_concurrently() {
690        let buffer = Buffer::from([1, 2, 3, 4, 5]);
691        let buffer2 = buffer.clone();
692        assert_eq!([1, 2, 3, 4, 5], buffer.as_slice());
693
694        let buffer_copy = thread::spawn(move || {
695            // access buffer in another thread.
696            buffer
697        })
698        .join();
699
700        assert!(buffer_copy.is_ok());
701        assert_eq!(buffer2, buffer_copy.ok().unwrap());
702    }
703
704    macro_rules! check_as_typed_data {
705        ($input: expr, $native_t: ty) => {{
706            let buffer = Buffer::from_slice_ref($input);
707            let slice: &[$native_t] = buffer.typed_data::<$native_t>();
708            assert_eq!($input, slice);
709        }};
710    }
711
712    #[test]
713    #[allow(clippy::float_cmp)]
714    fn test_as_typed_data() {
715        check_as_typed_data!(&[1i8, 3i8, 6i8], i8);
716        check_as_typed_data!(&[1u8, 3u8, 6u8], u8);
717        check_as_typed_data!(&[1i16, 3i16, 6i16], i16);
718        check_as_typed_data!(&[1i32, 3i32, 6i32], i32);
719        check_as_typed_data!(&[1i64, 3i64, 6i64], i64);
720        check_as_typed_data!(&[1u16, 3u16, 6u16], u16);
721        check_as_typed_data!(&[1u32, 3u32, 6u32], u32);
722        check_as_typed_data!(&[1u64, 3u64, 6u64], u64);
723        check_as_typed_data!(&[1f32, 3f32, 6f32], f32);
724        check_as_typed_data!(&[1f64, 3f64, 6f64], f64);
725    }
726
727    #[test]
728    fn test_count_bits() {
729        assert_eq!(0, Buffer::from(&[0b00000000]).count_set_bits_offset(0, 8));
730        assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 8));
731        assert_eq!(3, Buffer::from(&[0b00001101]).count_set_bits_offset(0, 8));
732        assert_eq!(
733            6,
734            Buffer::from(&[0b01001001, 0b01010010]).count_set_bits_offset(0, 16)
735        );
736        assert_eq!(
737            16,
738            Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 16)
739        );
740    }
741
742    #[test]
743    fn test_count_bits_slice() {
744        assert_eq!(
745            0,
746            Buffer::from(&[0b11111111, 0b00000000])
747                .slice(1)
748                .count_set_bits_offset(0, 8)
749        );
750        assert_eq!(
751            8,
752            Buffer::from(&[0b11111111, 0b11111111])
753                .slice_with_length(1, 1)
754                .count_set_bits_offset(0, 8)
755        );
756        assert_eq!(
757            3,
758            Buffer::from(&[0b11111111, 0b11111111, 0b00001101])
759                .slice(2)
760                .count_set_bits_offset(0, 8)
761        );
762        assert_eq!(
763            6,
764            Buffer::from(&[0b11111111, 0b01001001, 0b01010010])
765                .slice_with_length(1, 2)
766                .count_set_bits_offset(0, 16)
767        );
768        assert_eq!(
769            16,
770            Buffer::from(&[0b11111111, 0b11111111, 0b11111111, 0b11111111])
771                .slice(2)
772                .count_set_bits_offset(0, 16)
773        );
774    }
775
776    #[test]
777    fn test_count_bits_offset_slice() {
778        assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 8));
779        assert_eq!(3, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 3));
780        assert_eq!(5, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 5));
781        assert_eq!(1, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 1));
782        assert_eq!(0, Buffer::from(&[0b11111111]).count_set_bits_offset(8, 0));
783        assert_eq!(2, Buffer::from(&[0b01010101]).count_set_bits_offset(0, 3));
784        assert_eq!(
785            16,
786            Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 16)
787        );
788        assert_eq!(
789            10,
790            Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 10)
791        );
792        assert_eq!(
793            10,
794            Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(3, 10)
795        );
796        assert_eq!(
797            8,
798            Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(8, 8)
799        );
800        assert_eq!(
801            5,
802            Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(11, 5)
803        );
804        assert_eq!(
805            0,
806            Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(16, 0)
807        );
808        assert_eq!(
809            2,
810            Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 5)
811        );
812        assert_eq!(
813            4,
814            Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 9)
815        );
816    }
817
818    #[test]
819    fn test_unwind_safe() {
820        fn assert_unwind_safe<T: RefUnwindSafe + UnwindSafe>() {}
821        assert_unwind_safe::<Buffer>()
822    }
823
824    #[test]
825    fn test_from_foreign_vec() {
826        let mut vector = vec![1_i32, 2, 3, 4, 5];
827        let buffer = unsafe {
828            Buffer::from_custom_allocation(
829                NonNull::new_unchecked(vector.as_mut_ptr() as *mut u8),
830                vector.len() * std::mem::size_of::<i32>(),
831                Arc::new(vector),
832            )
833        };
834
835        let slice = buffer.typed_data::<i32>();
836        assert_eq!(slice, &[1, 2, 3, 4, 5]);
837
838        let buffer = buffer.slice(std::mem::size_of::<i32>());
839
840        let slice = buffer.typed_data::<i32>();
841        assert_eq!(slice, &[2, 3, 4, 5]);
842    }
843
844    #[test]
845    #[should_panic(expected = "the offset of the new Buffer cannot exceed the existing length")]
846    fn slice_overflow() {
847        let buffer = Buffer::from(MutableBuffer::from_len_zeroed(12));
848        buffer.slice_with_length(2, usize::MAX);
849    }
850
851    #[test]
852    fn test_vec_interop() {
853        // Test empty vec
854        let a: Vec<i128> = Vec::new();
855        let b = Buffer::from_vec(a);
856        b.into_vec::<i128>().unwrap();
857
858        // Test vec with capacity
859        let a: Vec<i128> = Vec::with_capacity(20);
860        let b = Buffer::from_vec(a);
861        let back = b.into_vec::<i128>().unwrap();
862        assert_eq!(back.len(), 0);
863        assert_eq!(back.capacity(), 20);
864
865        // Test vec with values
866        let mut a: Vec<i128> = Vec::with_capacity(3);
867        a.extend_from_slice(&[1, 2, 3]);
868        let b = Buffer::from_vec(a);
869        let back = b.into_vec::<i128>().unwrap();
870        assert_eq!(back.len(), 3);
871        assert_eq!(back.capacity(), 3);
872
873        // Test vec with values and spare capacity
874        let mut a: Vec<i128> = Vec::with_capacity(20);
875        a.extend_from_slice(&[1, 4, 7, 8, 9, 3, 6]);
876        let b = Buffer::from_vec(a);
877        let back = b.into_vec::<i128>().unwrap();
878        assert_eq!(back.len(), 7);
879        assert_eq!(back.capacity(), 20);
880
881        // Test incorrect alignment
882        let a: Vec<i128> = Vec::new();
883        let b = Buffer::from_vec(a);
884        let b = b.into_vec::<i32>().unwrap_err();
885        b.into_vec::<i8>().unwrap_err();
886
887        // Test convert between types with same alignment
888        // This is an implementation quirk, but isn't harmful
889        // as ArrowNativeType are trivially transmutable
890        let a: Vec<i64> = vec![1, 2, 3, 4];
891        let b = Buffer::from_vec(a);
892        let back = b.into_vec::<u64>().unwrap();
893        assert_eq!(back.len(), 4);
894        assert_eq!(back.capacity(), 4);
895
896        // i256 has the same layout as i128 so this is valid
897        let mut b: Vec<i128> = Vec::with_capacity(4);
898        b.extend_from_slice(&[1, 2, 3, 4]);
899        let b = Buffer::from_vec(b);
900        let back = b.into_vec::<i256>().unwrap();
901        assert_eq!(back.len(), 2);
902        assert_eq!(back.capacity(), 2);
903
904        // Invalid layout
905        let b: Vec<i128> = vec![1, 2, 3];
906        let b = Buffer::from_vec(b);
907        b.into_vec::<i256>().unwrap_err();
908
909        // Invalid layout
910        let mut b: Vec<i128> = Vec::with_capacity(5);
911        b.extend_from_slice(&[1, 2, 3, 4]);
912        let b = Buffer::from_vec(b);
913        b.into_vec::<i256>().unwrap_err();
914
915        // Truncates length
916        // This is an implementation quirk, but isn't harmful
917        let mut b: Vec<i128> = Vec::with_capacity(4);
918        b.extend_from_slice(&[1, 2, 3]);
919        let b = Buffer::from_vec(b);
920        let back = b.into_vec::<i256>().unwrap();
921        assert_eq!(back.len(), 1);
922        assert_eq!(back.capacity(), 2);
923
924        // Cannot use aligned allocation
925        let b = Buffer::from(MutableBuffer::new(10));
926        let b = b.into_vec::<u8>().unwrap_err();
927        b.into_vec::<u64>().unwrap_err();
928
929        // Test slicing
930        let mut a: Vec<i128> = Vec::with_capacity(20);
931        a.extend_from_slice(&[1, 4, 7, 8, 9, 3, 6]);
932        let b = Buffer::from_vec(a);
933        let slice = b.slice_with_length(0, 64);
934
935        // Shared reference fails
936        let slice = slice.into_vec::<i128>().unwrap_err();
937        drop(b);
938
939        // Succeeds as no outstanding shared reference
940        let back = slice.into_vec::<i128>().unwrap();
941        assert_eq!(&back, &[1, 4, 7, 8]);
942        assert_eq!(back.capacity(), 20);
943
944        // Slicing by non-multiple length truncates
945        let mut a: Vec<i128> = Vec::with_capacity(8);
946        a.extend_from_slice(&[1, 4, 7, 3]);
947
948        let b = Buffer::from_vec(a);
949        let slice = b.slice_with_length(0, 34);
950        drop(b);
951
952        let back = slice.into_vec::<i128>().unwrap();
953        assert_eq!(&back, &[1, 4]);
954        assert_eq!(back.capacity(), 8);
955
956        // Offset prevents conversion
957        let a: Vec<u32> = vec![1, 3, 4, 6];
958        let b = Buffer::from_vec(a).slice(2);
959        b.into_vec::<u32>().unwrap_err();
960
961        let b = MutableBuffer::new(16).into_buffer();
962        let b = b.into_vec::<u8>().unwrap_err(); // Invalid layout
963        let b = b.into_vec::<u32>().unwrap_err(); // Invalid layout
964        b.into_mutable().unwrap();
965
966        let b = Buffer::from_vec(vec![1_u32, 3, 5]);
967        let b = b.into_mutable().unwrap();
968        let b = Buffer::from(b);
969        let b = b.into_vec::<u32>().unwrap();
970        assert_eq!(b, &[1, 3, 5]);
971    }
972
973    #[test]
974    #[should_panic(expected = "capacity overflow")]
975    fn test_from_iter_overflow() {
976        let iter_len = usize::MAX / std::mem::size_of::<u64>() + 1;
977        let _ = Buffer::from_iter(std::iter::repeat(0_u64).take(iter_len));
978    }
979
980    #[test]
981    fn bit_slice_length_preserved() {
982        // Create a boring buffer
983        let buf = Buffer::from_iter(std::iter::repeat(true).take(64));
984
985        let assert_preserved = |offset: usize, len: usize| {
986            let new_buf = buf.bit_slice(offset, len);
987            assert_eq!(new_buf.len(), bit_util::ceil(len, 8));
988
989            // if the offset is not byte-aligned, we have to create a deep copy to a new buffer
990            // (since the `offset` value inside a Buffer is byte-granular, not bit-granular), so
991            // checking the offset should always return 0 if so. If the offset IS byte-aligned, we
992            // want to make sure it doesn't unnecessarily create a deep copy.
993            if offset % 8 == 0 {
994                assert_eq!(new_buf.ptr_offset(), offset / 8);
995            } else {
996                assert_eq!(new_buf.ptr_offset(), 0);
997            }
998        };
999
1000        // go through every available value for offset
1001        for o in 0..=64 {
1002            // and go through every length that could accompany that offset - we can't have a
1003            // situation where offset + len > 64, because that would go past the end of the buffer,
1004            // so we use the map to ensure it's in range.
1005            for l in (o..=64).map(|l| l - o) {
1006                // and we just want to make sure every one of these keeps its offset and length
1007                // when neeeded
1008                assert_preserved(o, l);
1009            }
1010        }
1011    }
1012}