Low-Level Abstraction of Memory Access
BitPackedInt.hpp
Go to the documentation of this file.
1 // Copyright 2023 Bernhard Manfred Gruber
2 // SPDX-License-Identifier: MPL-2.0
3 
4 #pragma once
5 
6 #include "../Core.hpp"
7 #include "../ProxyRefOpMixin.hpp"
8 #include "Common.hpp"
9 
10 #include <climits>
11 #include <type_traits>
12 
13 namespace llama::mapping
14 {
16  enum class SignBit
17  {
18  Keep,
19  Discard
20  };
21 
22  namespace internal
23  {
24  template<typename Integral>
25  LLAMA_FN_HOST_ACC_INLINE constexpr auto makeMask(Integral bits) -> Integral
26  {
27  return bits >= sizeof(Integral) * CHAR_BIT ? ~Integral{0} : (Integral{1} << bits) - 1u;
28  }
29 
30  template<bool KeepSignBit, typename Integral, typename StoredIntegral>
32  const StoredIntegral* ptr,
33  StoredIntegral bitOffset,
34  StoredIntegral bitCount) -> Integral
35  {
36  constexpr auto bitsPerIntegral = static_cast<StoredIntegral>(sizeof(Integral) * CHAR_BIT);
37  constexpr auto bitsPerStoredIntegral = static_cast<StoredIntegral>(sizeof(StoredIntegral) * CHAR_BIT);
38  static_assert(bitsPerIntegral <= bitsPerStoredIntegral);
39  assert(bitCount > 0 && bitCount <= bitsPerStoredIntegral);
40 #ifdef __clang__
41  // this is necessary to silence the clang static analyzer
42  __builtin_assume(bitCount > 0 && bitCount <= bitsPerStoredIntegral);
43 #endif
44 
45  const auto* p = ptr + bitOffset / bitsPerStoredIntegral;
46  const auto innerBitOffset = bitOffset % bitsPerStoredIntegral;
47  // assert(p < endPtr);
48  auto v = p[0] >> innerBitOffset;
49 
50  const auto innerBitEndOffset = innerBitOffset + bitCount;
51  if(innerBitEndOffset <= bitsPerStoredIntegral)
52  {
53  const auto mask = makeMask(bitCount);
54  v &= mask;
55  }
56  else
57  {
58  const auto excessBits = innerBitEndOffset - bitsPerStoredIntegral;
59  const auto bitsLoaded = bitsPerStoredIntegral - innerBitOffset;
60  const auto mask = makeMask(excessBits);
61  // assert(p + 1 < endPtr);
62  v |= (p[1] & mask) << bitsLoaded;
63  }
64  if constexpr(std::is_signed_v<Integral> && KeepSignBit)
65  {
66  // perform sign extension
67  if((v & (StoredIntegral{1} << (bitCount - 1))) && bitCount < bitsPerStoredIntegral)
68  v |= ~StoredIntegral{0} << bitCount;
69  }
70  return static_cast<Integral>(v);
71  }
72 
73  template<bool KeepSignBit, typename StoredIntegral, typename Integral>
75  StoredIntegral* ptr,
76  StoredIntegral bitOffset,
77  StoredIntegral bitCount,
78  Integral value)
79  {
80  constexpr auto bitsPerIntegral = static_cast<StoredIntegral>(sizeof(Integral) * CHAR_BIT);
81  constexpr auto bitsPerStoredIntegral = static_cast<StoredIntegral>(sizeof(StoredIntegral) * CHAR_BIT);
82  static_assert(bitsPerIntegral <= bitsPerStoredIntegral);
83  assert(bitCount > 0 && bitCount <= bitsPerStoredIntegral);
84 #ifdef __clang__
85  // this is necessary to silence the clang static analyzer
86  __builtin_assume(bitCount > 0 && bitCount <= bitsPerStoredIntegral);
87 #endif
88 
89  // NOLINTNEXTLINE(bugprone-signed-char-misuse,cert-str34-c)
90  const auto unsignedValue = static_cast<StoredIntegral>(value);
91  const auto mask = makeMask(bitCount);
92  StoredIntegral valueBits;
93  if constexpr(std::is_signed_v<Integral> && KeepSignBit)
94  {
95  const auto magnitudeMask = makeMask(bitCount - 1);
96  const auto isSigned = value < 0;
97  valueBits = (StoredIntegral{isSigned} << (bitCount - 1)) | (unsignedValue & magnitudeMask);
98  }
99  else
100  {
101  valueBits = unsignedValue & mask;
102  }
103 
104  auto* p = ptr + bitOffset / bitsPerStoredIntegral;
105  const auto innerBitOffset = bitOffset % bitsPerStoredIntegral;
106 
107  {
108  const auto clearMask = ~(mask << innerBitOffset);
109  // assert(p < endPtr);
110  auto mem = p[0] & clearMask; // clear previous bits
111  mem |= valueBits << innerBitOffset; // write new bits
112  p[0] = mem;
113  }
114 
115  const auto innerBitEndOffset = innerBitOffset + bitCount;
116  if(innerBitEndOffset > bitsPerStoredIntegral)
117  {
118  const auto excessBits = innerBitEndOffset - bitsPerStoredIntegral;
119  const auto bitsWritten = bitsPerStoredIntegral - innerBitOffset;
120  const auto clearMask = ~makeMask(excessBits);
121  // assert(p + 1 < endPtr);
122  auto mem = p[1] & clearMask; // clear previous bits
123  mem |= valueBits >> bitsWritten; // write new bits
124  p[1] = mem;
125  }
126  }
127 
128  template<typename Integral, typename StoredIntegral>
129  LLAMA_FN_HOST_ACC_INLINE constexpr auto bitunpack1(const StoredIntegral* ptr, StoredIntegral bitOffset)
130  -> Integral
131  {
132  constexpr auto bitsPerStoredIntegral = static_cast<StoredIntegral>(sizeof(StoredIntegral) * CHAR_BIT);
133  const auto bit
134  = (ptr[bitOffset / bitsPerStoredIntegral] >> (bitOffset % bitsPerStoredIntegral)) & StoredIntegral{1};
135  return static_cast<Integral>(bit);
136  }
137 
138  template<typename StoredIntegral, typename Integral>
139  LLAMA_FN_HOST_ACC_INLINE constexpr void bitpack1(StoredIntegral* ptr, StoredIntegral bitOffset, Integral value)
140  {
141  constexpr auto bitsPerStoredIntegral = static_cast<StoredIntegral>(sizeof(StoredIntegral) * CHAR_BIT);
142  const auto bitOff = bitOffset % bitsPerStoredIntegral;
143  auto& dst = ptr[bitOffset / bitsPerStoredIntegral];
144  dst &= ~(StoredIntegral{1} << bitOff); // clear bit
145  const auto bit = (static_cast<StoredIntegral>(value) & StoredIntegral{1});
146  dst |= (bit << bitOff); // set bit
147  }
148 
154  template<typename Integral, typename StoredIntegralCV, typename VHBits, typename SizeType, SignBit SignBit>
155  // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions)
157  : private VHBits
158  , ProxyRefOpMixin<BitPackedIntRef<Integral, StoredIntegralCV, VHBits, SizeType, SignBit>, Integral>
159  {
160  private:
161  using StoredIntegral = std::remove_cv_t<StoredIntegralCV>;
162  StoredIntegralCV* ptr;
163  SizeType bitOffset;
164 
165  public:
166  using value_type = Integral;
167 
169  StoredIntegralCV* ptr,
170  SizeType bitOffset,
171  VHBits vhBits)
172  : VHBits{vhBits}
173  , ptr{ptr}
174  , bitOffset{bitOffset}
175  {
176  }
177 
178  BitPackedIntRef(const BitPackedIntRef&) = default;
179 
180  // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp)
182  {
183  *this = static_cast<value_type>(other);
184  return *this;
185  }
186 
187  // NOLINTNEXTLINE(google-explicit-constructor,hicpp-explicit-conversions)
188  LLAMA_FN_HOST_ACC_INLINE constexpr operator Integral() const
189  {
190  // fast path for single bits without sign handling
191  if constexpr(std::is_empty_v<VHBits>)
192  {
193  if constexpr(VHBits::value() == 1 && (std::is_unsigned_v<Integral> || SignBit == SignBit::Discard))
194  {
195  return bitunpack1<Integral>(ptr, static_cast<StoredIntegral>(bitOffset));
196  }
197  }
198 
199  return bitunpack<SignBit == SignBit::Keep, Integral>(
200  ptr,
201  static_cast<StoredIntegral>(bitOffset),
202  static_cast<StoredIntegral>(VHBits::value()));
203  }
204 
205  LLAMA_FN_HOST_ACC_INLINE constexpr auto operator=(Integral value) -> BitPackedIntRef&
206  {
207  // fast path for single bits without sign handling
208  if constexpr(std::is_empty_v<VHBits>)
209  {
210  if constexpr(VHBits::value() == 1 && (std::is_unsigned_v<Integral> || SignBit == SignBit::Discard))
211  {
212  bitpack1(ptr, static_cast<StoredIntegral>(bitOffset), value);
213  }
214  }
215 
216  bitpack<SignBit == SignBit::Keep>(
217  ptr,
218  static_cast<StoredIntegral>(bitOffset),
219  static_cast<StoredIntegral>(VHBits::value()),
220  value);
221  return *this;
222  }
223  };
224 
225  template<typename A, typename B>
226  using HasLargerSize = mp_bool<sizeof(A) < sizeof(B)>;
227 
228  template<typename RecordDim>
229  using LargestIntegral = mp_max_element<FlatRecordDim<RecordDim>, HasLargerSize>;
230 
231  template<typename RecordDim>
232  using StoredUnsignedFor = std::
233  conditional_t<(sizeof(LargestIntegral<RecordDim>) > sizeof(std::uint32_t)), std::uint64_t, std::uint32_t>;
234 
235  template<
236  typename TArrayExtents,
237  typename TRecordDim,
238  typename Bits,
240  typename TLinearizeArrayIndexFunctor,
241  typename TStoredIntegral>
243  : MappingBase<TArrayExtents, TRecordDim>
244  , protected llama::internal::BoxedValue<Bits>
245  {
246  using LinearizeArrayIndexFunctor = TLinearizeArrayIndexFunctor;
247  using StoredIntegral = TStoredIntegral;
248 
249  static_assert(std::is_integral_v<StoredIntegral>);
250  static_assert(std::is_unsigned_v<StoredIntegral>);
251 
252  // We could allow more integer types as storage type, but that needs to be thought through carefully
253  static_assert(
254  std::is_same_v<StoredIntegral, std::uint32_t> || std::is_same_v<StoredIntegral, std::uint64_t>);
255 
256  protected:
259  using size_type = typename TArrayExtents::value_type;
260 
261  template<typename T>
262  using IsAllowedFieldType = mp_or<std::is_integral<T>, std::is_enum<T>>;
263 
264  static_assert(
266  "All record dimension field types must be integral");
267 
268  template<typename T>
269  using IsFieldTypeSmallerOrEqualStorageIntegral = mp_bool<sizeof(T) <= sizeof(StoredIntegral)>;
270 
271  static_assert(
273  "The integral type used for storage must be at least as big as the type of the values to retrieve");
274 
275  public:
277  constexpr auto bits() const -> size_type
278  {
279  return static_cast<size_type>(VHBits::value());
280  }
281 
282  template<typename B = Bits, std::enable_if_t<isConstant<B>, int> = 0>
284  TArrayExtents extents = {},
285  Bits bits = {},
286  TRecordDim = {})
287  : Base(extents)
288  , VHBits{bits}
289  {
290  static_assert(VHBits::value() > 0);
291  mp_for_each_inline<mp_transform<mp_identity, FlatRecordDim<TRecordDim>>>(
292  [&](auto t)
293  {
294  using FieldType = typename decltype(t)::type;
295  static_assert(
296  static_cast<std::size_t>(VHBits::value()) <= sizeof(FieldType) * CHAR_BIT,
297  "Storage bits must not be greater than bits of field type");
298  static_assert(
299  VHBits::value() >= 2
300  || std::is_unsigned_v<FieldType> || SignBit == llama::mapping::SignBit::Discard,
301  "When keeping the sign bit, Bits must be at least 2 with signed integers in the record "
302  "dimension");
303  });
304  }
305 
306  template<typename B = Bits, std::enable_if_t<!isConstant<B>, int> = 0>
308  TArrayExtents extents,
309  Bits bits,
310  TRecordDim = {})
311  : Base(extents)
312  , VHBits{bits}
313  {
314 #ifdef __CUDA_ARCH__
315  assert(VHBits::value() > 0);
316 #else
317  if(VHBits::value() <= 0)
318  throw std::invalid_argument("BitPackedInt* Bits must not be zero");
319 #endif
320  mp_for_each_inline<mp_transform<mp_identity, FlatRecordDim<TRecordDim>>>(
321  [&](auto t)
322  {
323  using FieldType [[maybe_unused]] = typename decltype(t)::type;
324 #ifdef __CUDA_ARCH__
325  assert(VHBits::value() <= sizeof(FieldType) * CHAR_BIT);
326 #else
327  if(static_cast<std::size_t>(VHBits::value()) > sizeof(FieldType) * CHAR_BIT)
328  throw std::invalid_argument(
329  "BitPackedInt* Bits must not be larger than any field type in the record dimension");
330  if(!(VHBits::value() >= 2
331  || std::is_unsigned_v<FieldType> || SignBit == llama::mapping::SignBit::Discard))
332  throw std::invalid_argument("When keeping the sign bit, Bits must be at least 2 with "
333  "signed integers in the record "
334  "dimension");
335 #endif
336  });
337  }
338 
339  template<std::size_t... RecordCoords>
341  {
342  return true;
343  }
344  };
345  } // namespace internal
346 
359  template<
360  typename TArrayExtents,
361  typename TRecordDim,
362  typename Bits = typename TArrayExtents::value_type,
364  typename TLinearizeArrayIndexFunctor = LinearizeArrayIndexRight,
365  typename TStoredIntegral = internal::StoredUnsignedFor<TRecordDim>>
368  TArrayExtents,
369  TRecordDim,
370  Bits,
371  SignBit,
372  TLinearizeArrayIndexFunctor,
373  TStoredIntegral>
374  {
375  private:
376  using Base = internal::
377  BitPackedIntCommon<TArrayExtents, TRecordDim, Bits, SignBit, TLinearizeArrayIndexFunctor, TStoredIntegral>;
378 
379  public:
380  using Base::Base;
381  using typename Base::size_type;
382  using VHBits = typename Base::VHBits; // use plain using declaration with nvcc >= 11.8
383 
384  static constexpr std::size_t blobCount = mp_size<FlatRecordDim<TRecordDim>>::value;
385 
387  constexpr auto blobSize(size_type /*blobIndex*/) const -> size_type
388  {
389  constexpr auto bitsPerStoredIntegral = static_cast<size_type>(sizeof(TStoredIntegral) * CHAR_BIT);
390  const auto bitsNeeded = TLinearizeArrayIndexFunctor{}.size(Base::extents()) * VHBits::value();
391  return roundUpToMultiple(bitsNeeded, bitsPerStoredIntegral) / CHAR_BIT;
392  }
393 
394  template<std::size_t... RecordCoords, typename Blobs>
396  typename Base::ArrayIndex ai,
398  Blobs& blobs) const
399  {
400  constexpr auto blob = flatRecordCoord<TRecordDim, RecordCoord<RecordCoords...>>;
401  const auto bitOffset = TLinearizeArrayIndexFunctor{}(ai, Base::extents()) * VHBits::value();
402 
403  using QualifiedStoredIntegral = CopyConst<Blobs, TStoredIntegral>;
404  using DstType = GetType<TRecordDim, RecordCoord<RecordCoords...>>;
407  reinterpret_cast<QualifiedStoredIntegral*>(&blobs[blob][0]),
408  bitOffset,
409  static_cast<const VHBits&>(*this)};
411  }
412  };
413 
417  template<
418  typename Bits = void,
420  typename LinearizeArrayIndexFunctor = mapping::LinearizeArrayIndexRight,
421  typename StoredIntegral = void>
423  {
424  template<typename ArrayExtents, typename RecordDim>
426  ArrayExtents,
427  RecordDim,
428  std::conditional_t<!std::is_void_v<Bits>, Bits, typename ArrayExtents::value_type>,
429  SignBit,
430  LinearizeArrayIndexFunctor,
431  std::conditional_t<
432  !std::is_void_v<StoredIntegral>,
433  StoredIntegral,
435  };
436 
438  template<typename Mapping>
439  inline constexpr bool isBitPackedIntSoA = false;
440 
442  template<
443  typename ArrayExtents,
444  typename RecordDim,
445  typename Bits,
447  typename LinearizeArrayIndexFunctor,
448  typename StoredIntegral>
449  inline constexpr bool isBitPackedIntSoA<
451  = true;
452 
463  // PermuteFieldsInOrder, \ref PermuteFieldsIncreasingAlignment, \ref PermuteFieldsDecreasingAlignment and
464  // \ref PermuteFieldsMinimizePadding.
468  template<
469  typename TArrayExtents,
470  typename TRecordDim,
471  typename Bits = typename TArrayExtents::value_type,
473  typename TLinearizeArrayIndexFunctor = LinearizeArrayIndexRight,
474  template<typename> typename PermuteFields = PermuteFieldsInOrder,
475  typename TStoredIntegral = internal::StoredUnsignedFor<TRecordDim>>
478  TArrayExtents,
479  TRecordDim,
480  Bits,
481  SignBit,
482  TLinearizeArrayIndexFunctor,
483  TStoredIntegral>
484  {
485  private:
486  using Base = internal::
487  BitPackedIntCommon<TArrayExtents, TRecordDim, Bits, SignBit, TLinearizeArrayIndexFunctor, TStoredIntegral>;
488 
489  public:
490  using Base::Base;
491  using typename Base::size_type;
492  using VHBits = typename Base::VHBits; // use plain using declaration with nvcc >= 11.8
493 
494  using Permuter = PermuteFields<TRecordDim>;
495  static constexpr std::size_t blobCount = 1;
496 
498  constexpr auto blobSize(size_type /*blobIndex*/) const -> size_type
499  {
500  constexpr auto bitsPerStoredIntegral = static_cast<size_type>(sizeof(TStoredIntegral) * CHAR_BIT);
501  const auto bitsNeeded = TLinearizeArrayIndexFunctor{}.size(Base::extents())
502  * static_cast<size_type>(VHBits::value()) * static_cast<size_type>(flatFieldCount<TRecordDim>);
503  return roundUpToMultiple(bitsNeeded, bitsPerStoredIntegral) / CHAR_BIT;
504  }
505 
506  template<std::size_t... RecordCoords, typename Blobs>
508  typename Base::ArrayIndex ai,
510  Blobs& blobs) const
511  {
512  constexpr auto flatFieldIndex = static_cast<size_type>(
513  Permuter::template permute<flatRecordCoord<TRecordDim, RecordCoord<RecordCoords...>>>);
514  const auto bitOffset = ((TLinearizeArrayIndexFunctor{}(ai, Base::extents())
515  * static_cast<size_type>(flatFieldCount<TRecordDim>))
516  + flatFieldIndex)
517  * static_cast<size_type>(VHBits::value());
518 
519  using QualifiedStoredIntegral = CopyConst<Blobs, TStoredIntegral>;
520  using DstType = GetType<TRecordDim, RecordCoord<RecordCoords...>>;
523  reinterpret_cast<QualifiedStoredIntegral*>(&blobs[0][0]),
524  bitOffset,
525  static_cast<const VHBits&>(*this)};
527  }
528  };
529 
533  template<
534  typename Bits = void,
536  typename LinearizeArrayIndexFunctor = mapping::LinearizeArrayIndexRight,
537  template<typename> typename PermuteFields = PermuteFieldsInOrder,
538  typename StoredIntegral = void>
540  {
541  template<typename ArrayExtents, typename RecordDim>
543  ArrayExtents,
544  RecordDim,
545  std::conditional_t<!std::is_void_v<Bits>, Bits, typename ArrayExtents::value_type>,
546  SignBit,
547  LinearizeArrayIndexFunctor,
548  PermuteFields,
549  std::conditional_t<
550  !std::is_void_v<StoredIntegral>,
551  StoredIntegral,
553  };
554 
556  template<typename Mapping>
557  inline constexpr bool isBitPackedIntAoS = false;
558 
559  template<
560  typename ArrayExtents,
561  typename RecordDim,
562  typename Bits,
564  typename LinearizeArrayIndexFunctor,
565  template<typename>
566  typename PermuteFields,
567  typename StoredIntegral>
568  inline constexpr bool isBitPackedIntAoS<BitPackedIntAoS<
569  ArrayExtents,
570  RecordDim,
571  Bits,
572  SignBit,
573  LinearizeArrayIndexFunctor,
574  PermuteFields,
575  StoredIntegral>>
576  = true;
577 } // namespace llama::mapping
#define LLAMA_EXPORT
Definition: macros.hpp:192
#define LLAMA_BEGIN_SUPPRESS_HOST_DEVICE_WARNING
Definition: macros.hpp:141
#define LLAMA_FN_HOST_ACC_INLINE
Definition: macros.hpp:96
#define LLAMA_END_SUPPRESS_HOST_DEVICE_WARNING
Definition: macros.hpp:153
constexpr auto bitunpack1(const StoredIntegral *ptr, StoredIntegral bitOffset) -> Integral
constexpr auto makeMask(Integral bits) -> Integral
constexpr void bitpack1(StoredIntegral *ptr, StoredIntegral bitOffset, Integral value)
std::conditional_t<(sizeof(LargestIntegral< RecordDim >) > sizeof(std::uint32_t)), std::uint64_t, std::uint32_t > StoredUnsignedFor
constexpr auto bitunpack(const StoredIntegral *ptr, StoredIntegral bitOffset, StoredIntegral bitCount) -> Integral
constexpr void bitpack(StoredIntegral *ptr, StoredIntegral bitOffset, StoredIntegral bitCount, Integral value)
mp_bool< sizeof(A)< sizeof(B)> HasLargerSize
mp_max_element< FlatRecordDim< RecordDim >, HasLargerSize > LargestIntegral
constexpr bool isBitPackedIntAoS
constexpr bool isBitPackedIntSoA
typename internal::FlattenRecordDimImpl< RecordDim >::type FlatRecordDim
Returns a flat type list containing all leaf field types of the given record dimension.
Definition: Core.hpp:481
ArrayExtents(Args...) -> ArrayExtents< typename internal::IndexTypeFromArgs< std::size_t, Args... >::type,(Args{}, dyn)... >
constexpr std::size_t flatRecordCoord
Definition: Core.hpp:517
std::conditional_t< std::is_const_v< FromT >, const ToT, ToT > CopyConst
Alias for ToT, adding const if FromT is const qualified.
Definition: Core.hpp:824
constexpr auto roundUpToMultiple(Integral n, Integral mult) -> Integral
Returns the integral n rounded up to be a multiple of mult.
Definition: Core.hpp:578
typename internal::GetTypeImpl< RecordDim, RecordCoordOrTags... >::type GetType
Definition: Core.hpp:388
CRTP mixin for proxy reference types to support all compound assignment and increment/decrement opera...
constexpr auto value() const
Definition: Core.hpp:864
static constexpr std::size_t blobCount
constexpr auto compute(typename Base::ArrayIndex ai, RecordCoord< RecordCoords... >, Blobs &blobs) const
PermuteFields< TRecordDim > Permuter
typename Base::VHBits VHBits
constexpr auto blobSize(size_type) const -> size_type
typename Base::VHBits VHBits
constexpr auto compute(typename Base::ArrayIndex ai, RecordCoord< RecordCoords... >, Blobs &blobs) const
static constexpr std::size_t blobCount
constexpr auto blobSize(size_type) const -> size_type
typename ArrayExtents::value_type size_type
Definition: Common.hpp:25
typename ArrayExtents::Index ArrayIndex
Definition: Common.hpp:24
constexpr auto extents() const -> ArrayExtents
Definition: Common.hpp:35
Retains the order of the record dimension's fields.
Definition: Common.hpp:182
constexpr BitPackedIntCommon(TArrayExtents extents, Bits bits, TRecordDim={})
MappingBase< TArrayExtents, TRecordDim > Base
typename TArrayExtents::value_type size_type
constexpr BitPackedIntCommon(TArrayExtents extents={}, Bits bits={}, TRecordDim={})
mp_or< std::is_integral< T >, std::is_enum< T > > IsAllowedFieldType
TLinearizeArrayIndexFunctor LinearizeArrayIndexFunctor
mp_bool< sizeof(T)<=sizeof(StoredIntegral)> IsFieldTypeSmallerOrEqualStorageIntegral
static constexpr auto isComputed(RecordCoord< RecordCoords... >)
llama::internal::BoxedValue< Bits > VHBits
constexpr auto bits() const -> size_type
BitPackedIntRef(const BitPackedIntRef &)=default
constexpr auto operator=(Integral value) -> BitPackedIntRef &
constexpr BitPackedIntRef(StoredIntegralCV *ptr, SizeType bitOffset, VHBits vhBits)
constexpr auto operator=(const BitPackedIntRef &other) -> BitPackedIntRef &