Apache Arrow (C++)
A columnar in-memory analytics layer designed to accelerate big data.
bit-util.h
Go to the documentation of this file.
1 // Licensed to the Apache Software Foundation (ASF) under one
2 // or more contributor license agreements. See the NOTICE file
3 // distributed with this work for additional information
4 // regarding copyright ownership. The ASF licenses this file
5 // to you under the Apache License, Version 2.0 (the
6 // "License"); you may not use this file except in compliance
7 // with the License. You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing,
12 // software distributed under the License is distributed on an
13 // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 // KIND, either express or implied. See the License for the
15 // specific language governing permissions and limitations
16 // under the License.
17 
18 #ifndef ARROW_UTIL_BIT_UTIL_H
19 #define ARROW_UTIL_BIT_UTIL_H
20 
21 #ifdef _WIN32
22 #define ARROW_LITTLE_ENDIAN 1
23 #else
24 #ifdef __APPLE__
25 #include <machine/endian.h>
26 #else
27 #include <endian.h>
28 #endif
29 #
30 #ifndef __BYTE_ORDER__
31 #error "__BYTE_ORDER__ not defined"
32 #endif
33 #
34 #ifndef __ORDER_LITTLE_ENDIAN__
35 #error "__ORDER_LITTLE_ENDIAN__ not defined"
36 #endif
37 #
38 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
39 #define ARROW_LITTLE_ENDIAN 1
40 #else
41 #define ARROW_LITTLE_ENDIAN 0
42 #endif
43 #endif
44 
45 #if defined(_MSC_VER)
46 #include <intrin.h>
47 #pragma intrinsic(_BitScanReverse)
48 #define ARROW_BYTE_SWAP64 _byteswap_uint64
49 #define ARROW_BYTE_SWAP32 _byteswap_ulong
50 #else
51 #define ARROW_BYTE_SWAP64 __builtin_bswap64
52 #define ARROW_BYTE_SWAP32 __builtin_bswap32
53 #endif
54 
55 #include <cstdint>
56 #include <limits>
57 #include <memory>
58 #include <type_traits>
59 #include <vector>
60 
61 #include "arrow/util/macros.h"
62 #include "arrow/util/type_traits.h"
63 #include "arrow/util/visibility.h"
64 
65 #ifdef ARROW_USE_SSE
66 #include "arrow/util/cpu-info.h"
67 #include "arrow/util/sse-util.h"
68 #endif
69 
70 namespace arrow {
71 
72 namespace detail {
73 
74 template <typename Integer>
75 typename std::make_unsigned<Integer>::type as_unsigned(Integer x) {
76  return static_cast<typename std::make_unsigned<Integer>::type>(x);
77 }
78 
79 } // namespace detail
80 
81 class Buffer;
82 class MemoryPool;
83 class MutableBuffer;
84 class Status;
85 
86 namespace BitUtil {
87 
88 //
89 // Utilities for reading and writing individual bits by their index
90 // in a memory area.
91 //
92 
93 // Bitmask selecting the k-th bit in a byte
94 static constexpr uint8_t kBitmask[] = {1, 2, 4, 8, 16, 32, 64, 128};
95 
96 // the bitwise complement version of kBitmask
97 static constexpr uint8_t kFlippedBitmask[] = {254, 253, 251, 247, 239, 223, 191, 127};
98 
99 // Bitmask selecting the (k - 1) preceding bits in a byte
100 static constexpr uint8_t kPrecedingBitmask[] = {0, 1, 3, 7, 15, 31, 63, 127};
101 
102 // the bitwise complement version of kPrecedingBitmask
103 static constexpr uint8_t kTrailingBitmask[] = {255, 254, 252, 248, 240, 224, 192, 128};
104 
105 static inline int64_t CeilByte(int64_t size) { return (size + 7) & ~7; }
106 
107 static inline int64_t BytesForBits(int64_t size) { return CeilByte(size) / 8; }
108 
109 static inline int64_t Ceil2Bytes(int64_t size) { return (size + 15) & ~15; }
110 
111 static inline bool GetBit(const uint8_t* bits, int64_t i) {
112  return (bits[i / 8] & kBitmask[i % 8]) != 0;
113 }
114 
115 static inline bool BitNotSet(const uint8_t* bits, int64_t i) {
116  return (bits[i / 8] & kBitmask[i % 8]) == 0;
117 }
118 
119 static inline void ClearBit(uint8_t* bits, int64_t i) {
120  bits[i / 8] &= kFlippedBitmask[i % 8];
121 }
122 
123 static inline void SetBit(uint8_t* bits, int64_t i) { bits[i / 8] |= kBitmask[i % 8]; }
124 
126 static inline void SetArrayBit(uint8_t* bits, int i, bool is_set) {
127  if (is_set) {
128  SetBit(bits, i);
129  }
130 }
131 
132 static inline void SetBitTo(uint8_t* bits, int64_t i, bool bit_is_set) {
133  // https://graphics.stanford.edu/~seander/bithacks.html
134  // "Conditionally set or clear bits without branching"
135  // NOTE: this seems to confuse Valgrind as it reads from potentially
136  // uninitialized memory
137  bits[i / 8] ^= static_cast<uint8_t>(-static_cast<uint8_t>(bit_is_set) ^ bits[i / 8]) &
138  kBitmask[i % 8];
139 }
140 
141 // Returns the minimum number of bits needed to represent the value of 'x'
142 static inline int NumRequiredBits(uint64_t x) {
143  for (int i = 63; i >= 0; --i) {
144  if (x & (UINT64_C(1) << i)) return i + 1;
145  }
146  return 0;
147 }
148 
153 static inline int64_t NextPower2(int64_t n) {
154  n--;
155  n |= n >> 1;
156  n |= n >> 2;
157  n |= n >> 4;
158  n |= n >> 8;
159  n |= n >> 16;
160  n |= n >> 32;
161  n++;
162  return n;
163 }
164 
165 static inline bool IsMultipleOf64(int64_t n) { return (n & 63) == 0; }
166 
167 static inline bool IsMultipleOf8(int64_t n) { return (n & 7) == 0; }
168 
170 static inline int64_t Ceil(int64_t value, int64_t divisor) {
171  return value / divisor + (value % divisor != 0);
172 }
173 
175 inline int64_t RoundUp(int64_t value, int64_t factor) {
176  return (value + (factor - 1)) / factor * factor;
177 }
178 
180 static inline int64_t RoundDown(int64_t value, int64_t factor) {
181  return (value / factor) * factor;
182 }
183 
186 static inline int RoundUpToPowerOf2(int value, int factor) {
187  // DCHECK((factor > 0) && ((factor & (factor - 1)) == 0));
188  return (value + (factor - 1)) & ~(factor - 1);
189 }
190 
191 static inline int RoundDownToPowerOf2(int value, int factor) {
192  // DCHECK((factor > 0) && ((factor & (factor - 1)) == 0));
193  return value & ~(factor - 1);
194 }
195 
199 static inline uint32_t RoundUpNumBytes(uint32_t bits) { return (bits + 7) >> 3; }
200 
202 static inline uint32_t RoundDownNumBytes(uint32_t bits) { return bits >> 3; }
203 
205 static inline uint32_t RoundUpNumi32(uint32_t bits) { return (bits + 31) >> 5; }
206 
208 static inline uint32_t RoundDownNumi32(uint32_t bits) { return bits >> 5; }
209 
211 static inline uint32_t RoundUpNumi64(uint32_t bits) { return (bits + 63) >> 6; }
212 
214 static inline uint32_t RoundDownNumi64(uint32_t bits) { return bits >> 6; }
215 
216 template <int64_t ROUND_TO>
217 static inline int64_t RoundToPowerOfTwo(int64_t num) {
218  // TODO(wesm): is this definitely needed?
219  // DCHECK_GE(num, 0);
220  constexpr int64_t force_carry_addend = ROUND_TO - 1;
221  constexpr int64_t truncate_bitmask = ~(ROUND_TO - 1);
222  constexpr int64_t max_roundable_num = std::numeric_limits<int64_t>::max() - ROUND_TO;
223  if (num <= max_roundable_num) {
224  return (num + force_carry_addend) & truncate_bitmask;
225  }
226  // handle overflow case. This should result in a malloc error upstream
227  return num;
228 }
229 
230 static inline int64_t RoundUpToMultipleOf64(int64_t num) {
231  return RoundToPowerOfTwo<64>(num);
232 }
233 
234 static inline int64_t RoundUpToMultipleOf8(int64_t num) {
235  return RoundToPowerOfTwo<8>(num);
236 }
237 
241 static inline int PopcountNoHw(uint64_t x) {
242  int count = 0;
243  for (; x != 0; ++count) x &= x - 1;
244  return count;
245 }
246 
248 static inline int Popcount(uint64_t x) {
249 #ifdef ARROW_USE_SSE
251  return POPCNT_popcnt_u64(x);
252  } else {
253  return PopcountNoHw(x);
254  }
255 #else
256  return PopcountNoHw(x);
257 #endif
258 }
259 
260 // Compute correct population count for various-width signed integers
261 template <typename T>
262 static inline int PopcountSigned(T v) {
263  // Converting to same-width unsigned then extending preserves the bit pattern.
264  return BitUtil::Popcount(detail::as_unsigned(v));
265 }
266 
268 static inline uint64_t TrailingBits(uint64_t v, int num_bits) {
269  if (ARROW_PREDICT_FALSE(num_bits == 0)) return 0;
270  if (ARROW_PREDICT_FALSE(num_bits >= 64)) return v;
271  int n = 64 - num_bits;
272  return (v << n) >> n;
273 }
274 
278 static inline int Log2(uint64_t x) {
279  // DCHECK_GT(x, 0);
280  if (x == 1) return 0;
281  // Compute result = ceil(log2(x))
282  // = floor(log2(x - 1)) + 1, for x > 1
283  // by finding the position of the most significant bit (1-indexed) of x - 1
284  // (floor(log2(n)) = MSB(n) (0-indexed))
285  --x;
286  int result = 1;
287  while (x >>= 1) ++result;
288  return result;
289 }
290 
292 static inline int64_t CountLeadingZeros(uint32_t value) {
293 // DCHECK_NE(value, 0);
294 #if defined(__clang__) || defined(__GNUC__)
295  return static_cast<int64_t>(__builtin_clz(value));
296 #elif defined(_MSC_VER)
297  unsigned long index; // NOLINT
298  _BitScanReverse(&index, static_cast<unsigned long>(value)); // NOLINT
299  return 31LL - static_cast<int64_t>(index);
300 #else
301  int64_t bitpos = 0;
302  while (value != 0) {
303  value >>= 1;
304  ++bitpos;
305  }
306  return 32LL - bitpos;
307 #endif
308 }
309 
311 static inline int64_t ByteSwap(int64_t value) { return ARROW_BYTE_SWAP64(value); }
312 static inline uint64_t ByteSwap(uint64_t value) {
313  return static_cast<uint64_t>(ARROW_BYTE_SWAP64(value));
314 }
315 static inline int32_t ByteSwap(int32_t value) { return ARROW_BYTE_SWAP32(value); }
316 static inline uint32_t ByteSwap(uint32_t value) {
317  return static_cast<uint32_t>(ARROW_BYTE_SWAP32(value));
318 }
319 static inline int16_t ByteSwap(int16_t value) {
320  constexpr auto m = static_cast<int16_t>(0xff);
321  return static_cast<int16_t>(((value >> 8) & m) | ((value & m) << 8));
322 }
323 static inline uint16_t ByteSwap(uint16_t value) {
324  return static_cast<uint16_t>(ByteSwap(static_cast<int16_t>(value)));
325 }
326 
328 static inline void ByteSwap(void* dst, const void* src, int len) {
329  switch (len) {
330  case 1:
331  *reinterpret_cast<int8_t*>(dst) = *reinterpret_cast<const int8_t*>(src);
332  return;
333  case 2:
334  *reinterpret_cast<int16_t*>(dst) = ByteSwap(*reinterpret_cast<const int16_t*>(src));
335  return;
336  case 4:
337  *reinterpret_cast<int32_t*>(dst) = ByteSwap(*reinterpret_cast<const int32_t*>(src));
338  return;
339  case 8:
340  *reinterpret_cast<int64_t*>(dst) = ByteSwap(*reinterpret_cast<const int64_t*>(src));
341  return;
342  default:
343  break;
344  }
345 
346  auto d = reinterpret_cast<uint8_t*>(dst);
347  auto s = reinterpret_cast<const uint8_t*>(src);
348  for (int i = 0; i < len; ++i) {
349  d[i] = s[len - i - 1];
350  }
351 }
352 
355 #if ARROW_LITTLE_ENDIAN
356 template <typename T, typename = EnableIfIsOneOf<T, int64_t, uint64_t, int32_t, uint32_t,
357  int16_t, uint16_t>>
358 static inline T ToBigEndian(T value) {
359  return ByteSwap(value);
360 }
361 
362 template <typename T, typename = EnableIfIsOneOf<T, int64_t, uint64_t, int32_t, uint32_t,
363  int16_t, uint16_t>>
364 static inline T ToLittleEndian(T value) {
365  return value;
366 }
367 #else
368 template <typename T, typename = EnableIfIsOneOf<T, int64_t, uint64_t, int32_t, uint32_t,
369  int16_t, uint16_t>>
370 static inline T ToBigEndian(T value) {
371  return value;
372 }
373 #endif
374 
376 #if ARROW_LITTLE_ENDIAN
377 template <typename T, typename = EnableIfIsOneOf<T, int64_t, uint64_t, int32_t, uint32_t,
378  int16_t, uint16_t>>
379 static inline T FromBigEndian(T value) {
380  return ByteSwap(value);
381 }
382 
383 template <typename T, typename = EnableIfIsOneOf<T, int64_t, uint64_t, int32_t, uint32_t,
384  int16_t, uint16_t>>
385 static inline T FromLittleEndian(T value) {
386  return value;
387 }
388 #else
389 template <typename T, typename = EnableIfIsOneOf<T, int64_t, uint64_t, int32_t, uint32_t,
390  int16_t, uint16_t>>
391 static inline T FromBigEndian(T value) {
392  return value;
393 }
394 
395 template <typename T, typename = EnableIfIsOneOf<T, int64_t, uint64_t, int32_t, uint32_t,
396  int16_t, uint16_t>>
397 static inline T FromLittleEndian(T value) {
398  return ByteSwap(value);
399 }
400 #endif
401 
402 // Logical right shift for signed integer types
403 // This is needed because the C >> operator does arithmetic right shift
404 // Negative shift amounts lead to undefined behavior
405 template <typename T>
406 static T ShiftRightLogical(T v, int shift) {
407  // Conversion to unsigned ensures most significant bits always filled with 0's
408  return detail::as_unsigned(v) >> shift;
409 }
410 
411 void FillBitsFromBytes(const std::vector<uint8_t>& bytes, uint8_t* bits);
412 
414 ARROW_EXPORT
415 Status BytesToBits(const std::vector<uint8_t>&, MemoryPool*, std::shared_ptr<Buffer>*);
416 
417 } // namespace BitUtil
418 
419 namespace internal {
420 
421 class BitmapReader {
422  public:
423  BitmapReader(const uint8_t* bitmap, int64_t start_offset, int64_t length)
424  : bitmap_(bitmap), position_(0), length_(length) {
425  current_byte_ = 0;
426  byte_offset_ = start_offset / 8;
427  bit_offset_ = start_offset % 8;
428  if (length > 0) {
429  current_byte_ = bitmap[byte_offset_];
430  }
431  }
432 
433 #if defined(_MSC_VER)
434  // MSVC is finicky about this cast
435  bool IsSet() const { return (current_byte_ & (1 << bit_offset_)) != 0; }
436 #else
437  bool IsSet() const { return current_byte_ & (1 << bit_offset_); }
438 #endif
439 
440  bool IsNotSet() const { return (current_byte_ & (1 << bit_offset_)) == 0; }
441 
442  void Next() {
443  ++bit_offset_;
444  ++position_;
445  if (ARROW_PREDICT_FALSE(bit_offset_ == 8)) {
446  bit_offset_ = 0;
447  ++byte_offset_;
448  if (ARROW_PREDICT_TRUE(position_ < length_)) {
449  current_byte_ = bitmap_[byte_offset_];
450  }
451  }
452  }
453 
454  private:
455  const uint8_t* bitmap_;
456  int64_t position_;
457  int64_t length_;
458 
459  uint8_t current_byte_;
460  int64_t byte_offset_;
461  int64_t bit_offset_;
462 };
463 
464 class BitmapWriter {
465  // A sequential bitwise writer that preserves surrounding bit values.
466 
467  public:
468  BitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
469  : bitmap_(bitmap), position_(0), length_(length) {
470  byte_offset_ = start_offset / 8;
471  bit_mask_ = BitUtil::kBitmask[start_offset % 8];
472  if (length > 0) {
473  current_byte_ = bitmap[byte_offset_];
474  } else {
475  current_byte_ = 0;
476  }
477  }
478 
479  void Set() { current_byte_ |= bit_mask_; }
480 
481  void Clear() { current_byte_ &= bit_mask_ ^ 0xFF; }
482 
483  void Next() {
484  bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
485  ++position_;
486  if (bit_mask_ == 0) {
487  // Finished this byte, need advancing
488  bit_mask_ = 0x01;
489  bitmap_[byte_offset_++] = current_byte_;
490  if (ARROW_PREDICT_TRUE(position_ < length_)) {
491  current_byte_ = bitmap_[byte_offset_];
492  }
493  }
494  }
495 
496  void Finish() {
497  // Store current byte if we didn't went past bitmap storage
498  if (bit_mask_ != 0x01 || position_ < length_) {
499  bitmap_[byte_offset_] = current_byte_;
500  }
501  }
502 
503  int64_t position() const { return position_; }
504 
505  private:
506  uint8_t* bitmap_;
507  int64_t position_;
508  int64_t length_;
509 
510  uint8_t current_byte_;
511  uint8_t bit_mask_;
512  int64_t byte_offset_;
513 };
514 
515 class FirstTimeBitmapWriter {
516  // Like BitmapWriter, but any bit values *following* the bits written
517  // might be clobbered. It is hence faster than BitmapWriter, and can
518  // also avoid false positives with Valgrind.
519 
520  public:
521  FirstTimeBitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
522  : bitmap_(bitmap), position_(0), length_(length) {
523  current_byte_ = 0;
524  byte_offset_ = start_offset / 8;
525  bit_mask_ = BitUtil::kBitmask[start_offset % 8];
526  if (length > 0) {
527  current_byte_ = bitmap[byte_offset_] & BitUtil::kPrecedingBitmask[start_offset % 8];
528  } else {
529  current_byte_ = 0;
530  }
531  }
532 
533  void Set() { current_byte_ |= bit_mask_; }
534 
535  void Clear() {}
536 
537  void Next() {
538  bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
539  ++position_;
540  if (bit_mask_ == 0) {
541  // Finished this byte, need advancing
542  bit_mask_ = 0x01;
543  bitmap_[byte_offset_++] = current_byte_;
544  current_byte_ = 0;
545  }
546  }
547 
548  void Finish() {
549  // Store current byte if we didn't went past bitmap storage
550  if (bit_mask_ != 0x01 || position_ < length_) {
551  bitmap_[byte_offset_] = current_byte_;
552  }
553  }
554 
555  int64_t position() const { return position_; }
556 
557  private:
558  uint8_t* bitmap_;
559  int64_t position_;
560  int64_t length_;
561 
562  uint8_t current_byte_;
563  uint8_t bit_mask_;
564  int64_t byte_offset_;
565 };
566 
567 // A std::generate() like function to write sequential bits into a bitmap area.
568 // Bits preceding the bitmap area are preserved, bits following the bitmap
569 // area may be clobbered.
570 
571 template <class Generator>
572 void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) {
573  if (length == 0) {
574  return;
575  }
576  uint8_t* cur = bitmap + start_offset / 8;
577  uint8_t bit_mask = BitUtil::kBitmask[start_offset % 8];
578  uint8_t current_byte = *cur & BitUtil::kPrecedingBitmask[start_offset % 8];
579 
580  for (int64_t index = 0; index < length; ++index) {
581  const bool bit = g();
582  current_byte = bit ? (current_byte | bit_mask) : current_byte;
583  bit_mask = static_cast<uint8_t>(bit_mask << 1);
584  if (bit_mask == 0) {
585  bit_mask = 1;
586  *cur++ = current_byte;
587  current_byte = 0;
588  }
589  }
590  if (bit_mask != 1) {
591  *cur++ = current_byte;
592  }
593 }
594 
595 // Like GenerateBits(), but unrolls its main loop for higher performance.
596 
597 template <class Generator>
598 void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length,
599  Generator&& g) {
600  if (length == 0) {
601  return;
602  }
603  uint8_t current_byte;
604  uint8_t* cur = bitmap + start_offset / 8;
605  const uint64_t start_bit_offset = start_offset % 8;
606  uint8_t bit_mask = BitUtil::kBitmask[start_bit_offset];
607  int64_t remaining = length;
608 
609  if (bit_mask != 0x01) {
610  current_byte = *cur & BitUtil::kPrecedingBitmask[start_bit_offset];
611  while (bit_mask != 0 && remaining > 0) {
612  current_byte = g() ? (current_byte | bit_mask) : current_byte;
613  bit_mask = static_cast<uint8_t>(bit_mask << 1);
614  --remaining;
615  }
616  *cur++ = current_byte;
617  }
618 
619  int64_t remaining_bytes = remaining / 8;
620  while (remaining_bytes-- > 0) {
621  current_byte = 0;
622  current_byte = g() ? current_byte | 0x01 : current_byte;
623  current_byte = g() ? current_byte | 0x02 : current_byte;
624  current_byte = g() ? current_byte | 0x04 : current_byte;
625  current_byte = g() ? current_byte | 0x08 : current_byte;
626  current_byte = g() ? current_byte | 0x10 : current_byte;
627  current_byte = g() ? current_byte | 0x20 : current_byte;
628  current_byte = g() ? current_byte | 0x40 : current_byte;
629  current_byte = g() ? current_byte | 0x80 : current_byte;
630  *cur++ = current_byte;
631  }
632 
633  int64_t remaining_bits = remaining % 8;
634  if (remaining_bits) {
635  current_byte = 0;
636  bit_mask = 0x01;
637  while (remaining_bits-- > 0) {
638  current_byte = g() ? (current_byte | bit_mask) : current_byte;
639  bit_mask = static_cast<uint8_t>(bit_mask << 1);
640  }
641  *cur++ = current_byte;
642  }
643 }
644 
645 } // namespace internal
646 
647 // ----------------------------------------------------------------------
648 // Bitmap utilities
649 
650 ARROW_EXPORT
651 Status GetEmptyBitmap(MemoryPool* pool, int64_t length, std::shared_ptr<Buffer>* result);
652 
662 ARROW_EXPORT
663 Status CopyBitmap(MemoryPool* pool, const uint8_t* bitmap, int64_t offset, int64_t length,
664  std::shared_ptr<Buffer>* out);
665 
673 ARROW_EXPORT
674 int64_t CountSetBits(const uint8_t* data, int64_t bit_offset, int64_t length);
675 
676 ARROW_EXPORT
677 bool BitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right,
678  int64_t right_offset, int64_t bit_length);
679 
680 ARROW_EXPORT
681 Status BitmapAnd(MemoryPool* pool, const uint8_t* left, int64_t left_offset,
682  const uint8_t* right, int64_t right_offset, int64_t length,
683  int64_t out_offset, std::shared_ptr<Buffer>* out_buffer);
684 
685 } // namespace arrow
686 
687 #endif // ARROW_UTIL_BIT_UTIL_H
Status GetEmptyBitmap(MemoryPool *pool, int64_t length, std::shared_ptr< Buffer > *result)
static const int64_t POPCNT
Definition: cpu-info.h:40
#define ARROW_PREDICT_TRUE(x)
Definition: macros.h:49
int64_t CountSetBits(const uint8_t *data, int64_t bit_offset, int64_t length)
Compute the number of 1&#39;s in the given data array.
Status BitmapAnd(MemoryPool *pool, const uint8_t *left, int64_t left_offset, const uint8_t *right, int64_t right_offset, int64_t length, int64_t out_offset, std::shared_ptr< Buffer > *out_buffer)
#define ARROW_PREDICT_FALSE(x)
Definition: macros.h:48
#define ARROW_BYTE_SWAP32
Definition: bit-util.h:52
bool BitmapEquals(const uint8_t *left, int64_t left_offset, const uint8_t *right, int64_t right_offset, int64_t bit_length)
Top-level namespace for Apache Arrow C++ API.
Definition: adapter.h:32
#define ARROW_BYTE_SWAP64
Definition: bit-util.h:51
static bool IsSupported(int64_t flag)
Returns whether of not the cpu supports this flag.
Definition: cpu-info.h:60
typename std::enable_if< IsOneOf< T, Args... >::value, T >::type EnableIfIsOneOf
Shorthand for using IsOneOf + std::enable_if.
Definition: type_traits.h:37
Status CopyBitmap(MemoryPool *pool, const uint8_t *bitmap, int64_t offset, int64_t length, std::shared_ptr< Buffer > *out)
Copy a bit range of an existing bitmap.