array.hpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993
  1. /*************************************************************************
  2. *
  3. * Copyright 2016 Realm Inc.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. **************************************************************************/
  18. #ifndef REALM_ARRAY_HPP
  19. #define REALM_ARRAY_HPP
  20. #include <realm/node.hpp>
  21. #include <realm/query_state.hpp>
  22. #include <realm/column_fwd.hpp>
  23. #include <realm/array_direct.hpp>
  24. namespace realm {
  25. // Pre-definitions
  26. class GroupWriter;
  27. namespace _impl {
  28. class ArrayWriterBase;
  29. }
  30. struct MemStats {
  31. size_t allocated = 0;
  32. size_t used = 0;
  33. size_t array_count = 0;
  34. };
  35. // Stores a value obtained from Array::get(). It is a ref if the least
  36. // significant bit is clear, otherwise it is a tagged integer. A tagged interger
  37. // is obtained from a logical integer value by left shifting by one bit position
  38. // (multiplying by two), and then setting the least significant bit to
  39. // one. Clearly, this means that the maximum value that can be stored as a
  40. // tagged integer is 2**63 - 1.
  41. class RefOrTagged {
  42. public:
  43. bool is_ref() const noexcept;
  44. bool is_tagged() const noexcept;
  45. ref_type get_as_ref() const noexcept;
  46. uint_fast64_t get_as_int() const noexcept;
  47. static RefOrTagged make_ref(ref_type) noexcept;
  48. static RefOrTagged make_tagged(uint_fast64_t) noexcept;
  49. private:
  50. int_fast64_t m_value;
  51. RefOrTagged(int_fast64_t) noexcept;
  52. friend class Array;
  53. };
  54. template <class T>
  55. class QueryStateFindAll : public QueryStateBase {
  56. public:
  57. explicit QueryStateFindAll(T& keys, size_t limit = -1)
  58. : QueryStateBase(limit)
  59. , m_keys(keys)
  60. {
  61. }
  62. bool match(size_t index, Mixed) noexcept final;
  63. private:
  64. T& m_keys;
  65. };
  66. class QueryStateFindFirst : public QueryStateBase {
  67. public:
  68. size_t m_state = realm::not_found;
  69. QueryStateFindFirst()
  70. : QueryStateBase(1)
  71. {
  72. }
  73. bool match(size_t index, Mixed) noexcept final;
  74. };
  75. class Array : public Node, public ArrayParent {
  76. public:
  77. /// Create an array accessor in the unattached state.
  78. explicit Array(Allocator& allocator) noexcept
  79. : Node(allocator)
  80. {
  81. }
  82. ~Array() noexcept override {}
  83. /// Create a new integer array of the specified type and size, and filled
  84. /// with the specified value, and attach this accessor to it. This does not
  85. /// modify the parent reference information of this accessor.
  86. ///
  87. /// Note that the caller assumes ownership of the allocated underlying
  88. /// node. It is not owned by the accessor.
  89. void create(Type, bool context_flag = false, size_t size = 0, int_fast64_t value = 0);
  90. /// Reinitialize this array accessor to point to the specified new
  91. /// underlying memory. This does not modify the parent reference information
  92. /// of this accessor.
  93. void init_from_ref(ref_type ref) noexcept
  94. {
  95. REALM_ASSERT_DEBUG(ref);
  96. char* header = m_alloc.translate(ref);
  97. init_from_mem(MemRef(header, ref, m_alloc));
  98. }
  99. /// Same as init_from_ref(ref_type) but avoid the mapping of 'ref' to memory
  100. /// pointer.
  101. void init_from_mem(MemRef) noexcept;
  102. /// Same as `init_from_ref(get_ref_from_parent())`.
  103. void init_from_parent() noexcept
  104. {
  105. ref_type ref = get_ref_from_parent();
  106. init_from_ref(ref);
  107. }
  108. /// Called in the context of Group::commit() to ensure that attached
  109. /// accessors stay valid across a commit. Please note that this works only
  110. /// for non-transactional commits. Accessors obtained during a transaction
  111. /// are always detached when the transaction ends.
  112. void update_from_parent() noexcept;
  113. /// Change the type of an already attached array node.
  114. ///
  115. /// The effect of calling this function on an unattached accessor is
  116. /// undefined.
  117. void set_type(Type);
  118. /// Construct a complete copy of this array (including its subarrays) using
  119. /// the specified target allocator and return just the reference to the
  120. /// underlying memory.
  121. MemRef clone_deep(Allocator& target_alloc) const;
  122. /// Construct an empty integer array of the specified type, and return just
  123. /// the reference to the underlying memory.
  124. static MemRef create_empty_array(Type, bool context_flag, Allocator&);
  125. /// Construct an integer array of the specified type and size, and return
  126. /// just the reference to the underlying memory. All elements will be
  127. /// initialized to the specified value.
  128. static MemRef create_array(Type, bool context_flag, size_t size, int_fast64_t value, Allocator&);
  129. Type get_type() const noexcept;
  130. /// The meaning of 'width' depends on the context in which this
  131. /// array is used.
  132. size_t get_width() const noexcept
  133. {
  134. REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
  135. return m_width;
  136. }
  137. void insert(size_t ndx, int_fast64_t value);
  138. void add(int_fast64_t value);
  139. // Used from ArrayBlob
  140. size_t blob_size() const noexcept;
  141. ref_type blob_replace(size_t begin, size_t end, const char* data, size_t data_size, bool add_zero_term);
  142. /// This function is guaranteed to not throw if the current width is
  143. /// sufficient for the specified value (e.g. if you have called
  144. /// ensure_minimum_width(value)) and get_alloc().is_read_only(get_ref())
  145. /// returns false (noexcept:array-set). Note that for a value of zero, the
  146. /// first criterion is trivially satisfied.
  147. void set(size_t ndx, int64_t value);
  148. void set_as_ref(size_t ndx, ref_type ref);
  149. template <size_t w>
  150. void set(size_t ndx, int64_t value);
  151. int64_t get(size_t ndx) const noexcept;
  152. template <size_t w>
  153. int64_t get(size_t ndx) const noexcept;
  154. void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
  155. template <size_t w>
  156. void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
  157. ref_type get_as_ref(size_t ndx) const noexcept;
  158. RefOrTagged get_as_ref_or_tagged(size_t ndx) const noexcept;
  159. void set(size_t ndx, RefOrTagged);
  160. void add(RefOrTagged);
  161. void ensure_minimum_width(RefOrTagged);
  162. int64_t front() const noexcept;
  163. int64_t back() const noexcept;
  164. void alloc(size_t init_size, size_t new_width)
  165. {
  166. REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
  167. REALM_ASSERT_3(m_size, ==, get_size_from_header(get_header()));
  168. Node::alloc(init_size, new_width);
  169. update_width_cache_from_header();
  170. }
  171. /// Remove the element at the specified index, and move elements at higher
  172. /// indexes to the next lower index.
  173. ///
  174. /// This function does **not** destroy removed subarrays. That is, if the
  175. /// erased element is a 'ref' pointing to a subarray, then that subarray
  176. /// will not be destroyed automatically.
  177. ///
  178. /// This function guarantees that no exceptions will be thrown if
  179. /// get_alloc().is_read_only(get_ref()) would return false before the
  180. /// call. This is automatically guaranteed if the array is used in a
  181. /// non-transactional context, or if the array has already been successfully
  182. /// modified within the current write transaction.
  183. void erase(size_t ndx);
  184. /// Same as erase(size_t), but remove all elements in the specified
  185. /// range.
  186. ///
  187. /// Please note that this function does **not** destroy removed subarrays.
  188. ///
  189. /// This function guarantees that no exceptions will be thrown if
  190. /// get_alloc().is_read_only(get_ref()) would return false before the call.
  191. void erase(size_t begin, size_t end);
  192. /// Reduce the size of this array to the specified number of elements. It is
  193. /// an error to specify a size that is greater than the current size of this
  194. /// array. The effect of doing so is undefined. This is just a shorthand for
  195. /// calling the ranged erase() function with appropriate arguments.
  196. ///
  197. /// Please note that this function does **not** destroy removed
  198. /// subarrays. See clear_and_destroy_children() for an alternative.
  199. ///
  200. /// This function guarantees that no exceptions will be thrown if
  201. /// get_alloc().is_read_only(get_ref()) would return false before the call.
  202. void truncate(size_t new_size);
  203. /// Reduce the size of this array to the specified number of elements. It is
  204. /// an error to specify a size that is greater than the current size of this
  205. /// array. The effect of doing so is undefined. Subarrays will be destroyed
  206. /// recursively, as if by a call to `destroy_deep(subarray_ref, alloc)`.
  207. ///
  208. /// This function is guaranteed not to throw if
  209. /// get_alloc().is_read_only(get_ref()) returns false.
  210. void truncate_and_destroy_children(size_t new_size);
  211. /// Remove every element from this array. This is just a shorthand for
  212. /// calling truncate(0).
  213. ///
  214. /// Please note that this function does **not** destroy removed
  215. /// subarrays. See clear_and_destroy_children() for an alternative.
  216. ///
  217. /// This function guarantees that no exceptions will be thrown if
  218. /// get_alloc().is_read_only(get_ref()) would return false before the call.
  219. void clear();
  220. /// Remove every element in this array. Subarrays will be destroyed
  221. /// recursively, as if by a call to `destroy_deep(subarray_ref,
  222. /// alloc)`. This is just a shorthand for calling
  223. /// truncate_and_destroy_children(0).
  224. ///
  225. /// This function guarantees that no exceptions will be thrown if
  226. /// get_alloc().is_read_only(get_ref()) would return false before the call.
  227. void clear_and_destroy_children();
  228. /// If neccessary, expand the representation so that it can store the
  229. /// specified value.
  230. void ensure_minimum_width(int_fast64_t value);
  231. /// This one may change the represenation of the array, so be carefull if
  232. /// you call it after ensure_minimum_width().
  233. void set_all_to_zero();
  234. /// Add \a diff to the element at the specified index.
  235. void adjust(size_t ndx, int_fast64_t diff);
  236. /// Add \a diff to all the elements in the specified index range.
  237. void adjust(size_t begin, size_t end, int_fast64_t diff);
  238. //@{
  239. /// This is similar in spirit to std::move() from `<algorithm>`.
  240. /// \a dest_begin must not be in the range [`begin`,`end`)
  241. ///
  242. /// This function is guaranteed to not throw if
  243. /// `get_alloc().is_read_only(get_ref())` returns false.
  244. void move(size_t begin, size_t end, size_t dest_begin);
  245. //@}
  246. // Move elements from ndx and above to another array
  247. void move(Array& dst, size_t ndx);
  248. //@{
  249. /// Find the lower/upper bound of the specified value in a sequence of
  250. /// integers which must already be sorted ascendingly.
  251. ///
  252. /// For an integer value '`v`', lower_bound_int(v) returns the index '`l`'
  253. /// of the first element such that `get(l) &ge; v`, and upper_bound_int(v)
  254. /// returns the index '`u`' of the first element such that `get(u) &gt;
  255. /// v`. In both cases, if no such element is found, the returned value is
  256. /// the number of elements in the array.
  257. ///
  258. /// 3 3 3 4 4 4 5 6 7 9 9 9
  259. /// ^ ^ ^ ^ ^
  260. /// | | | | |
  261. /// | | | | -- Lower and upper bound of 15
  262. /// | | | |
  263. /// | | | -- Lower and upper bound of 8
  264. /// | | |
  265. /// | | -- Upper bound of 4
  266. /// | |
  267. /// | -- Lower bound of 4
  268. /// |
  269. /// -- Lower and upper bound of 1
  270. ///
  271. /// These functions are similar to std::lower_bound() and
  272. /// std::upper_bound().
  273. ///
  274. /// We currently use binary search. See for example
  275. /// http://www.tbray.org/ongoing/When/200x/2003/03/22/Binary.
  276. ///
  277. /// FIXME: It may be worth considering if overall efficiency can be improved
  278. /// by doing a linear search for short sequences.
  279. size_t lower_bound_int(int64_t value) const noexcept;
  280. size_t upper_bound_int(int64_t value) const noexcept;
  281. //@}
  282. int64_t get_sum(size_t start = 0, size_t end = size_t(-1)) const
  283. {
  284. return sum(start, end);
  285. }
  286. /// This information is guaranteed to be cached in the array accessor.
  287. bool is_inner_bptree_node() const noexcept;
  288. /// Returns true if type is either type_HasRefs or type_InnerColumnNode.
  289. ///
  290. /// This information is guaranteed to be cached in the array accessor.
  291. bool has_refs() const noexcept;
  292. void set_has_refs(bool) noexcept;
  293. /// This information is guaranteed to be cached in the array accessor.
  294. ///
  295. /// Columns and indexes can use the context bit to differentiate leaf types.
  296. bool get_context_flag() const noexcept;
  297. void set_context_flag(bool) noexcept;
  298. /// Recursively destroy children (as if calling
  299. /// clear_and_destroy_children()), then put this accessor into the detached
  300. /// state (as if calling detach()), then free the allocated memory. If this
  301. /// accessor is already in the detached state, this function has no effect
  302. /// (idempotency).
  303. void destroy_deep() noexcept;
  304. /// Shorthand for `destroy_deep(MemRef(ref, alloc), alloc)`.
  305. static void destroy_deep(ref_type ref, Allocator& alloc) noexcept;
  306. /// Destroy the specified array node and all of its children, recursively.
  307. ///
  308. /// This is done by freeing the specified array node after calling
  309. /// destroy_deep() for every contained 'ref' element.
  310. static void destroy_deep(MemRef, Allocator&) noexcept;
  311. // Clone deep
  312. static MemRef clone(MemRef, Allocator& from_alloc, Allocator& target_alloc);
  313. // Serialization
  314. /// Returns the ref (position in the target stream) of the written copy of
  315. /// this array, or the ref of the original array if \a only_if_modified is
  316. /// true, and this array is unmodified (Alloc::is_read_only()).
  317. ///
  318. /// The number of bytes that will be written by a non-recursive invocation
  319. /// of this function is exactly the number returned by get_byte_size().
  320. ///
  321. /// \param out The destination stream (writer).
  322. ///
  323. /// \param deep If true, recursively write out subarrays, but still subject
  324. /// to \a only_if_modified.
  325. ///
  326. /// \param only_if_modified Set to `false` to always write, or to `true` to
  327. /// only write the array if it has been modified.
  328. ref_type write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const;
  329. /// Same as non-static write() with `deep` set to true. This is for the
  330. /// cases where you do not already have an array accessor available.
  331. static ref_type write(ref_type, Allocator&, _impl::ArrayWriterBase&, bool only_if_modified);
  332. size_t find_first(int64_t value, size_t begin = 0, size_t end = size_t(-1)) const;
  333. // Wrappers for backwards compatibility and for simple use without
  334. // setting up state initialization etc
  335. template <class cond>
  336. size_t find_first(int64_t value, size_t start = 0, size_t end = size_t(-1)) const
  337. {
  338. REALM_ASSERT(start <= m_size && (end <= m_size || end == size_t(-1)) && start <= end);
  339. // todo, would be nice to avoid this in order to speed up find_first loops
  340. QueryStateFindFirst state;
  341. Finder finder = m_vtable->finder[cond::condition];
  342. (this->*finder)(value, start, end, 0, &state);
  343. return static_cast<size_t>(state.m_state);
  344. }
  345. /// Get the specified element without the cost of constructing an
  346. /// array instance. If an array instance is already available, or
  347. /// you need to get multiple values, then this method will be
  348. /// slower.
  349. static int_fast64_t get(const char* header, size_t ndx) noexcept;
  350. /// Like get(const char*, size_t) but gets two consecutive
  351. /// elements.
  352. static std::pair<int64_t, int64_t> get_two(const char* header, size_t ndx) noexcept;
  353. static RefOrTagged get_as_ref_or_tagged(const char* header, size_t ndx) noexcept
  354. {
  355. return get(header, ndx);
  356. }
  357. /// Get the number of bytes currently in use by this array. This
  358. /// includes the array header, but it does not include allocated
  359. /// bytes corresponding to excess capacity. The result is
  360. /// guaranteed to be a multiple of 8 (i.e., 64-bit aligned).
  361. ///
  362. /// This number is exactly the number of bytes that will be
  363. /// written by a non-recursive invocation of write().
  364. size_t get_byte_size() const noexcept;
  365. /// Get the maximum number of bytes that can be written by a
  366. /// non-recursive invocation of write() on an array with the
  367. /// specified number of elements, that is, the maximum value that
  368. /// can be returned by get_byte_size().
  369. static size_t get_max_byte_size(size_t num_elems) noexcept;
  370. /// FIXME: Belongs in IntegerArray
  371. static size_t calc_aligned_byte_size(size_t size, int width);
  372. #ifdef REALM_DEBUG
  373. class MemUsageHandler {
  374. public:
  375. virtual void handle(ref_type ref, size_t allocated, size_t used) = 0;
  376. };
  377. void report_memory_usage(MemUsageHandler&) const;
  378. void stats(MemStats& stats_dest) const noexcept;
  379. #endif
  380. void verify() const;
  381. Array& operator=(const Array&) = delete; // not allowed
  382. Array(const Array&) = delete; // not allowed
  383. protected:
  384. // This returns the minimum value ("lower bound") of the representable values
  385. // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
  386. static constexpr int_fast64_t lbound_for_width(size_t width) noexcept;
  387. // This returns the maximum value ("inclusive upper bound") of the representable values
  388. // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
  389. static constexpr int_fast64_t ubound_for_width(size_t width) noexcept;
  390. // This will have to be eventually used, exposing this here for testing.
  391. size_t count(int64_t value) const noexcept;
  392. private:
  393. void update_width_cache_from_header() noexcept;
  394. void do_ensure_minimum_width(int_fast64_t);
  395. int64_t sum(size_t start, size_t end) const;
  396. template <size_t w>
  397. int64_t sum(size_t start, size_t end) const;
  398. protected:
  399. /// It is an error to specify a non-zero value unless the width
  400. /// type is wtype_Bits. It is also an error to specify a non-zero
  401. /// size if the width type is wtype_Ignore.
  402. static MemRef create(Type, bool context_flag, WidthType, size_t size, int_fast64_t value, Allocator&);
  403. // Overriding method in ArrayParent
  404. void update_child_ref(size_t, ref_type) override;
  405. // Overriding method in ArrayParent
  406. ref_type get_child_ref(size_t) const noexcept override;
  407. void destroy_children(size_t offset = 0) noexcept;
  408. protected:
  409. // Getters and Setters for adaptive-packed arrays
  410. typedef int64_t (Array::*Getter)(size_t) const; // Note: getters must not throw
  411. typedef void (Array::*Setter)(size_t, int64_t);
  412. typedef bool (Array::*Finder)(int64_t, size_t, size_t, size_t, QueryStateBase*) const;
  413. typedef void (Array::*ChunkGetter)(size_t, int64_t res[8]) const; // Note: getters must not throw
  414. struct VTable {
  415. Getter getter;
  416. ChunkGetter chunk_getter;
  417. Setter setter;
  418. Finder finder[cond_VTABLE_FINDER_COUNT]; // one for each active function pointer
  419. };
  420. template <size_t w>
  421. struct VTableForWidth;
  422. // This is the one installed into the m_vtable->finder slots.
  423. template <class cond, size_t bitwidth>
  424. bool find_vtable(int64_t value, size_t start, size_t end, size_t baseindex, QueryStateBase* state) const;
  425. template <size_t w>
  426. int64_t get_universal(const char* const data, const size_t ndx) const;
  427. protected:
  428. /// Takes a 64-bit value and returns the minimum number of bits needed
  429. /// to fit the value. For alignment this is rounded up to nearest
  430. /// log2. Posssible results {0, 1, 2, 4, 8, 16, 32, 64}
  431. static size_t bit_width(int64_t value);
  432. protected:
  433. Getter m_getter = nullptr; // cached to avoid indirection
  434. const VTable* m_vtable = nullptr;
  435. uint_least8_t m_width = 0; // Size of an element (meaning depend on type of array).
  436. int64_t m_lbound; // min number that can be stored with current m_width
  437. int64_t m_ubound; // max number that can be stored with current m_width
  438. bool m_is_inner_bptree_node; // This array is an inner node of B+-tree.
  439. bool m_has_refs; // Elements whose first bit is zero are refs to subarrays.
  440. bool m_context_flag; // Meaning depends on context.
  441. private:
  442. ref_type do_write_shallow(_impl::ArrayWriterBase&) const;
  443. ref_type do_write_deep(_impl::ArrayWriterBase&, bool only_if_modified) const;
  444. #ifdef REALM_DEBUG
  445. void report_memory_usage_2(MemUsageHandler&) const;
  446. #endif
  447. friend class Allocator;
  448. friend class SlabAlloc;
  449. friend class GroupWriter;
  450. friend class ArrayWithFind;
  451. };
  452. // Implementation:
  453. constexpr inline int_fast64_t Array::lbound_for_width(size_t width) noexcept
  454. {
  455. if (width == 32) {
  456. return -0x80000000LL;
  457. }
  458. else if (width == 16) {
  459. return -0x8000LL;
  460. }
  461. else if (width < 8) {
  462. return 0;
  463. }
  464. else if (width == 8) {
  465. return -0x80LL;
  466. }
  467. else if (width == 64) {
  468. return -0x8000000000000000LL;
  469. }
  470. else {
  471. REALM_UNREACHABLE();
  472. }
  473. }
  474. constexpr inline int_fast64_t Array::ubound_for_width(size_t width) noexcept
  475. {
  476. if (width == 32) {
  477. return 0x7FFFFFFFLL;
  478. }
  479. else if (width == 16) {
  480. return 0x7FFFLL;
  481. }
  482. else if (width == 0) {
  483. return 0;
  484. }
  485. else if (width == 1) {
  486. return 1;
  487. }
  488. else if (width == 2) {
  489. return 3;
  490. }
  491. else if (width == 4) {
  492. return 15;
  493. }
  494. else if (width == 8) {
  495. return 0x7FLL;
  496. }
  497. else if (width == 64) {
  498. return 0x7FFFFFFFFFFFFFFFLL;
  499. }
  500. else {
  501. REALM_UNREACHABLE();
  502. }
  503. }
  504. inline bool RefOrTagged::is_ref() const noexcept
  505. {
  506. return (m_value & 1) == 0;
  507. }
  508. inline bool RefOrTagged::is_tagged() const noexcept
  509. {
  510. return !is_ref();
  511. }
  512. inline ref_type RefOrTagged::get_as_ref() const noexcept
  513. {
  514. // to_ref() is defined in <alloc.hpp>
  515. return to_ref(m_value);
  516. }
  517. inline uint_fast64_t RefOrTagged::get_as_int() const noexcept
  518. {
  519. // The bitwise AND is there in case uint_fast64_t is wider than 64 bits.
  520. return (uint_fast64_t(m_value) & 0xFFFFFFFFFFFFFFFFULL) >> 1;
  521. }
  522. inline RefOrTagged RefOrTagged::make_ref(ref_type ref) noexcept
  523. {
  524. // from_ref() is defined in <alloc.hpp>
  525. int_fast64_t value = from_ref(ref);
  526. return RefOrTagged(value);
  527. }
  528. inline RefOrTagged RefOrTagged::make_tagged(uint_fast64_t i) noexcept
  529. {
  530. REALM_ASSERT(i < (1ULL << 63));
  531. return RefOrTagged((i << 1) | 1);
  532. }
  533. inline RefOrTagged::RefOrTagged(int_fast64_t value) noexcept
  534. : m_value(value)
  535. {
  536. }
  537. inline void Array::create(Type type, bool context_flag, size_t length, int_fast64_t value)
  538. {
  539. MemRef mem = create_array(type, context_flag, length, value, m_alloc); // Throws
  540. init_from_mem(mem);
  541. }
  542. inline Array::Type Array::get_type() const noexcept
  543. {
  544. if (m_is_inner_bptree_node) {
  545. REALM_ASSERT_DEBUG(m_has_refs);
  546. return type_InnerBptreeNode;
  547. }
  548. if (m_has_refs)
  549. return type_HasRefs;
  550. return type_Normal;
  551. }
  552. inline void Array::get_chunk(size_t ndx, int64_t res[8]) const noexcept
  553. {
  554. REALM_ASSERT_DEBUG(ndx < m_size);
  555. (this->*(m_vtable->chunk_getter))(ndx, res);
  556. }
  557. template <size_t w>
  558. int64_t Array::get_universal(const char* data, size_t ndx) const
  559. {
  560. if (w == 0) {
  561. return 0;
  562. }
  563. else if (w == 1) {
  564. size_t offset = ndx >> 3;
  565. return (data[offset] >> (ndx & 7)) & 0x01;
  566. }
  567. else if (w == 2) {
  568. size_t offset = ndx >> 2;
  569. return (data[offset] >> ((ndx & 3) << 1)) & 0x03;
  570. }
  571. else if (w == 4) {
  572. size_t offset = ndx >> 1;
  573. return (data[offset] >> ((ndx & 1) << 2)) & 0x0F;
  574. }
  575. else if (w == 8) {
  576. return *reinterpret_cast<const signed char*>(data + ndx);
  577. }
  578. else if (w == 16) {
  579. size_t offset = ndx * 2;
  580. return *reinterpret_cast<const int16_t*>(data + offset);
  581. }
  582. else if (w == 32) {
  583. size_t offset = ndx * 4;
  584. return *reinterpret_cast<const int32_t*>(data + offset);
  585. }
  586. else if (w == 64) {
  587. size_t offset = ndx * 8;
  588. return *reinterpret_cast<const int64_t*>(data + offset);
  589. }
  590. else {
  591. REALM_ASSERT_DEBUG(false);
  592. return int64_t(-1);
  593. }
  594. }
  595. template <size_t w>
  596. int64_t Array::get(size_t ndx) const noexcept
  597. {
  598. return get_universal<w>(m_data, ndx);
  599. }
  600. inline int64_t Array::get(size_t ndx) const noexcept
  601. {
  602. REALM_ASSERT_DEBUG(is_attached());
  603. REALM_ASSERT_DEBUG_EX(ndx < m_size, ndx, m_size);
  604. return (this->*m_getter)(ndx);
  605. // Two ideas that are not efficient but may be worth looking into again:
  606. /*
  607. // Assume correct width is found early in REALM_TEMPEX, which is the case for B tree offsets that
  608. // are probably either 2^16 long. Turns out to be 25% faster if found immediately, but 50-300% slower
  609. // if found later
  610. REALM_TEMPEX(return get, (ndx));
  611. */
  612. /*
  613. // Slightly slower in both of the if-cases. Also needs an matchcount m_size check too, to avoid
  614. // reading beyond array.
  615. if (m_width >= 8 && m_size > ndx + 7)
  616. return get<64>(ndx >> m_shift) & m_widthmask;
  617. else
  618. return (this->*(m_vtable->getter))(ndx);
  619. */
  620. }
  621. inline int64_t Array::front() const noexcept
  622. {
  623. return get(0);
  624. }
  625. inline int64_t Array::back() const noexcept
  626. {
  627. return get(m_size - 1);
  628. }
  629. inline ref_type Array::get_as_ref(size_t ndx) const noexcept
  630. {
  631. REALM_ASSERT_DEBUG(is_attached());
  632. REALM_ASSERT_DEBUG_EX(m_has_refs, m_ref, ndx, m_size);
  633. int64_t v = get(ndx);
  634. return to_ref(v);
  635. }
  636. inline RefOrTagged Array::get_as_ref_or_tagged(size_t ndx) const noexcept
  637. {
  638. REALM_ASSERT(has_refs());
  639. return RefOrTagged(get(ndx));
  640. }
  641. inline void Array::set(size_t ndx, RefOrTagged ref_or_tagged)
  642. {
  643. REALM_ASSERT(has_refs());
  644. set(ndx, ref_or_tagged.m_value); // Throws
  645. }
  646. inline void Array::add(RefOrTagged ref_or_tagged)
  647. {
  648. REALM_ASSERT(has_refs());
  649. add(ref_or_tagged.m_value); // Throws
  650. }
  651. inline void Array::ensure_minimum_width(RefOrTagged ref_or_tagged)
  652. {
  653. REALM_ASSERT(has_refs());
  654. ensure_minimum_width(ref_or_tagged.m_value); // Throws
  655. }
  656. inline bool Array::is_inner_bptree_node() const noexcept
  657. {
  658. return m_is_inner_bptree_node;
  659. }
  660. inline bool Array::has_refs() const noexcept
  661. {
  662. return m_has_refs;
  663. }
  664. inline void Array::set_has_refs(bool value) noexcept
  665. {
  666. if (m_has_refs != value) {
  667. REALM_ASSERT(!is_read_only());
  668. m_has_refs = value;
  669. set_hasrefs_in_header(value, get_header());
  670. }
  671. }
  672. inline bool Array::get_context_flag() const noexcept
  673. {
  674. return m_context_flag;
  675. }
  676. inline void Array::set_context_flag(bool value) noexcept
  677. {
  678. if (m_context_flag != value) {
  679. copy_on_write();
  680. m_context_flag = value;
  681. set_context_flag_in_header(value, get_header());
  682. }
  683. }
  684. inline void Array::destroy_deep() noexcept
  685. {
  686. if (!is_attached())
  687. return;
  688. if (m_has_refs)
  689. destroy_children();
  690. char* header = get_header_from_data(m_data);
  691. m_alloc.free_(m_ref, header);
  692. m_data = nullptr;
  693. }
  694. inline ref_type Array::write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const
  695. {
  696. REALM_ASSERT(is_attached());
  697. if (only_if_modified && m_alloc.is_read_only(m_ref))
  698. return m_ref;
  699. if (!deep || !m_has_refs)
  700. return do_write_shallow(out); // Throws
  701. return do_write_deep(out, only_if_modified); // Throws
  702. }
  703. inline ref_type Array::write(ref_type ref, Allocator& alloc, _impl::ArrayWriterBase& out, bool only_if_modified)
  704. {
  705. if (only_if_modified && alloc.is_read_only(ref))
  706. return ref;
  707. Array array(alloc);
  708. array.init_from_ref(ref);
  709. if (!array.m_has_refs)
  710. return array.do_write_shallow(out); // Throws
  711. return array.do_write_deep(out, only_if_modified); // Throws
  712. }
  713. inline void Array::add(int_fast64_t value)
  714. {
  715. insert(m_size, value);
  716. }
  717. inline void Array::erase(size_t ndx)
  718. {
  719. // This can throw, but only if array is currently in read-only
  720. // memory.
  721. move(ndx + 1, size(), ndx);
  722. // Update size (also in header)
  723. --m_size;
  724. set_header_size(m_size);
  725. }
  726. inline void Array::erase(size_t begin, size_t end)
  727. {
  728. if (begin != end) {
  729. // This can throw, but only if array is currently in read-only memory.
  730. move(end, size(), begin); // Throws
  731. // Update size (also in header)
  732. m_size -= end - begin;
  733. set_header_size(m_size);
  734. }
  735. }
  736. inline void Array::clear()
  737. {
  738. truncate(0); // Throws
  739. }
  740. inline void Array::clear_and_destroy_children()
  741. {
  742. truncate_and_destroy_children(0);
  743. }
  744. inline void Array::destroy_deep(ref_type ref, Allocator& alloc) noexcept
  745. {
  746. destroy_deep(MemRef(ref, alloc), alloc);
  747. }
  748. inline void Array::destroy_deep(MemRef mem, Allocator& alloc) noexcept
  749. {
  750. if (!get_hasrefs_from_header(mem.get_addr())) {
  751. alloc.free_(mem);
  752. return;
  753. }
  754. Array array(alloc);
  755. array.init_from_mem(mem);
  756. array.destroy_deep();
  757. }
  758. inline void Array::adjust(size_t ndx, int_fast64_t diff)
  759. {
  760. REALM_ASSERT_3(ndx, <=, m_size);
  761. if (diff != 0) {
  762. // FIXME: Should be optimized
  763. int_fast64_t v = get(ndx);
  764. set(ndx, int64_t(v + diff)); // Throws
  765. }
  766. }
  767. inline void Array::adjust(size_t begin, size_t end, int_fast64_t diff)
  768. {
  769. if (diff != 0) {
  770. // FIXME: Should be optimized
  771. for (size_t i = begin; i != end; ++i)
  772. adjust(i, diff); // Throws
  773. }
  774. }
  775. //-------------------------------------------------
  776. inline size_t Array::get_byte_size() const noexcept
  777. {
  778. const char* header = get_header_from_data(m_data);
  779. WidthType wtype = Node::get_wtype_from_header(header);
  780. size_t num_bytes = NodeHeader::calc_byte_size(wtype, m_size, m_width);
  781. REALM_ASSERT_7(m_alloc.is_read_only(m_ref), ==, true, ||, num_bytes, <=, get_capacity_from_header(header));
  782. return num_bytes;
  783. }
  784. //-------------------------------------------------
  785. inline MemRef Array::clone_deep(Allocator& target_alloc) const
  786. {
  787. char* header = get_header_from_data(m_data);
  788. return clone(MemRef(header, m_ref, m_alloc), m_alloc, target_alloc); // Throws
  789. }
  790. inline MemRef Array::create_empty_array(Type type, bool context_flag, Allocator& alloc)
  791. {
  792. size_t size = 0;
  793. int_fast64_t value = 0;
  794. return create_array(type, context_flag, size, value, alloc); // Throws
  795. }
  796. inline MemRef Array::create_array(Type type, bool context_flag, size_t size, int_fast64_t value, Allocator& alloc)
  797. {
  798. return create(type, context_flag, wtype_Bits, size, value, alloc); // Throws
  799. }
  800. inline size_t Array::get_max_byte_size(size_t num_elems) noexcept
  801. {
  802. int max_bytes_per_elem = 8;
  803. return header_size + num_elems * max_bytes_per_elem;
  804. }
  805. inline void Array::update_child_ref(size_t child_ndx, ref_type new_ref)
  806. {
  807. set(child_ndx, new_ref);
  808. }
  809. inline ref_type Array::get_child_ref(size_t child_ndx) const noexcept
  810. {
  811. return get_as_ref(child_ndx);
  812. }
  813. inline void Array::ensure_minimum_width(int_fast64_t value)
  814. {
  815. if (value >= m_lbound && value <= m_ubound)
  816. return;
  817. do_ensure_minimum_width(value);
  818. }
  819. } // namespace realm
  820. #endif // REALM_ARRAY_HPP