array.hpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*************************************************************************
  2. *
  3. * Copyright 2016 Realm Inc.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. **************************************************************************/
  18. #ifndef REALM_ARRAY_HPP
  19. #define REALM_ARRAY_HPP
  20. #include <realm/node.hpp>
  21. #include <realm/query_state.hpp>
  22. #include <realm/column_fwd.hpp>
  23. #include <realm/array_direct.hpp>
  24. namespace realm {
  25. // Pre-definitions
  26. class GroupWriter;
  27. namespace _impl {
  28. class ArrayWriterBase;
  29. }
  30. struct MemStats {
  31. size_t allocated = 0;
  32. size_t used = 0;
  33. size_t array_count = 0;
  34. };
  35. // Stores a value obtained from Array::get(). It is a ref if the least
  36. // significant bit is clear, otherwise it is a tagged integer. A tagged interger
  37. // is obtained from a logical integer value by left shifting by one bit position
  38. // (multiplying by two), and then setting the least significant bit to
  39. // one. Clearly, this means that the maximum value that can be stored as a
  40. // tagged integer is 2**63 - 1.
  41. class RefOrTagged {
  42. public:
  43. bool is_ref() const noexcept;
  44. bool is_tagged() const noexcept;
  45. ref_type get_as_ref() const noexcept;
  46. uint_fast64_t get_as_int() const noexcept;
  47. static RefOrTagged make_ref(ref_type) noexcept;
  48. static RefOrTagged make_tagged(uint_fast64_t) noexcept;
  49. private:
  50. int_fast64_t m_value;
  51. RefOrTagged(int_fast64_t) noexcept;
  52. friend class Array;
  53. };
  54. template <class T>
  55. class QueryStateFindAll : public QueryStateBase {
  56. public:
  57. explicit QueryStateFindAll(T& keys, size_t limit = -1)
  58. : QueryStateBase(limit)
  59. , m_keys(keys)
  60. {
  61. }
  62. bool match(size_t index, Mixed) noexcept final;
  63. bool match(size_t index) noexcept final;
  64. private:
  65. T& m_keys;
  66. };
  67. class QueryStateFindFirst : public QueryStateBase {
  68. public:
  69. size_t m_state = realm::not_found;
  70. QueryStateFindFirst()
  71. : QueryStateBase(1)
  72. {
  73. }
  74. bool match(size_t index, Mixed) noexcept final;
  75. bool match(size_t index) noexcept final;
  76. };
  77. class Array : public Node, public ArrayParent {
  78. public:
  79. /// Create an array accessor in the unattached state.
  80. explicit Array(Allocator& allocator) noexcept
  81. : Node(allocator)
  82. {
  83. }
  84. ~Array() noexcept override {}
  85. /// Create a new integer array of the specified type and size, and filled
  86. /// with the specified value, and attach this accessor to it. This does not
  87. /// modify the parent reference information of this accessor.
  88. ///
  89. /// Note that the caller assumes ownership of the allocated underlying
  90. /// node. It is not owned by the accessor.
  91. void create(Type, bool context_flag = false, size_t size = 0, int_fast64_t value = 0);
  92. /// Reinitialize this array accessor to point to the specified new
  93. /// underlying memory. This does not modify the parent reference information
  94. /// of this accessor.
  95. void init_from_ref(ref_type ref) noexcept
  96. {
  97. REALM_ASSERT_DEBUG(ref);
  98. char* header = m_alloc.translate(ref);
  99. init_from_mem(MemRef(header, ref, m_alloc));
  100. }
  101. /// Same as init_from_ref(ref_type) but avoid the mapping of 'ref' to memory
  102. /// pointer.
  103. void init_from_mem(MemRef) noexcept;
  104. /// Same as `init_from_ref(get_ref_from_parent())`.
  105. void init_from_parent() noexcept
  106. {
  107. ref_type ref = get_ref_from_parent();
  108. init_from_ref(ref);
  109. }
  110. /// Called in the context of Group::commit() to ensure that attached
  111. /// accessors stay valid across a commit. Please note that this works only
  112. /// for non-transactional commits. Accessors obtained during a transaction
  113. /// are always detached when the transaction ends.
  114. void update_from_parent() noexcept;
  115. /// Change the type of an already attached array node.
  116. ///
  117. /// The effect of calling this function on an unattached accessor is
  118. /// undefined.
  119. void set_type(Type);
  120. /// Construct an empty integer array of the specified type, and return just
  121. /// the reference to the underlying memory.
  122. static MemRef create_empty_array(Type, bool context_flag, Allocator&);
  123. /// Construct an integer array of the specified type and size, and return
  124. /// just the reference to the underlying memory. All elements will be
  125. /// initialized to the specified value.
  126. static MemRef create_array(Type, bool context_flag, size_t size, int_fast64_t value, Allocator&);
  127. Type get_type() const noexcept;
  128. /// The meaning of 'width' depends on the context in which this
  129. /// array is used.
  130. size_t get_width() const noexcept
  131. {
  132. REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
  133. return m_width;
  134. }
  135. void insert(size_t ndx, int_fast64_t value);
  136. void add(int_fast64_t value);
  137. // Used from ArrayBlob
  138. size_t blob_size() const noexcept;
  139. ref_type blob_replace(size_t begin, size_t end, const char* data, size_t data_size, bool add_zero_term);
  140. /// This function is guaranteed to not throw if the current width is
  141. /// sufficient for the specified value (e.g. if you have called
  142. /// ensure_minimum_width(value)) and get_alloc().is_read_only(get_ref())
  143. /// returns false (noexcept:array-set). Note that for a value of zero, the
  144. /// first criterion is trivially satisfied.
  145. void set(size_t ndx, int64_t value);
  146. void set_as_ref(size_t ndx, ref_type ref);
  147. template <size_t w>
  148. void set(size_t ndx, int64_t value);
  149. int64_t get(size_t ndx) const noexcept;
  150. template <size_t w>
  151. int64_t get(size_t ndx) const noexcept;
  152. void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
  153. template <size_t w>
  154. void get_chunk(size_t ndx, int64_t res[8]) const noexcept;
  155. ref_type get_as_ref(size_t ndx) const noexcept;
  156. RefOrTagged get_as_ref_or_tagged(size_t ndx) const noexcept;
  157. void set(size_t ndx, RefOrTagged);
  158. void add(RefOrTagged);
  159. void ensure_minimum_width(RefOrTagged);
  160. int64_t front() const noexcept;
  161. int64_t back() const noexcept;
  162. void alloc(size_t init_size, size_t new_width)
  163. {
  164. REALM_ASSERT_3(m_width, ==, get_width_from_header(get_header()));
  165. REALM_ASSERT_3(m_size, ==, get_size_from_header(get_header()));
  166. Node::alloc(init_size, new_width);
  167. update_width_cache_from_header();
  168. }
  169. /// Remove the element at the specified index, and move elements at higher
  170. /// indexes to the next lower index.
  171. ///
  172. /// This function does **not** destroy removed subarrays. That is, if the
  173. /// erased element is a 'ref' pointing to a subarray, then that subarray
  174. /// will not be destroyed automatically.
  175. ///
  176. /// This function guarantees that no exceptions will be thrown if
  177. /// get_alloc().is_read_only(get_ref()) would return false before the
  178. /// call. This is automatically guaranteed if the array is used in a
  179. /// non-transactional context, or if the array has already been successfully
  180. /// modified within the current write transaction.
  181. void erase(size_t ndx);
  182. /// Same as erase(size_t), but remove all elements in the specified
  183. /// range.
  184. ///
  185. /// Please note that this function does **not** destroy removed subarrays.
  186. ///
  187. /// This function guarantees that no exceptions will be thrown if
  188. /// get_alloc().is_read_only(get_ref()) would return false before the call.
  189. void erase(size_t begin, size_t end);
  190. /// Reduce the size of this array to the specified number of elements. It is
  191. /// an error to specify a size that is greater than the current size of this
  192. /// array. The effect of doing so is undefined. This is just a shorthand for
  193. /// calling the ranged erase() function with appropriate arguments.
  194. ///
  195. /// Please note that this function does **not** destroy removed
  196. /// subarrays. See clear_and_destroy_children() for an alternative.
  197. ///
  198. /// This function guarantees that no exceptions will be thrown if
  199. /// get_alloc().is_read_only(get_ref()) would return false before the call.
  200. void truncate(size_t new_size);
  201. /// Reduce the size of this array to the specified number of elements. It is
  202. /// an error to specify a size that is greater than the current size of this
  203. /// array. The effect of doing so is undefined. Subarrays will be destroyed
  204. /// recursively, as if by a call to `destroy_deep(subarray_ref, alloc)`.
  205. ///
  206. /// This function is guaranteed not to throw if
  207. /// get_alloc().is_read_only(get_ref()) returns false.
  208. void truncate_and_destroy_children(size_t new_size);
  209. /// Remove every element from this array. This is just a shorthand for
  210. /// calling truncate(0).
  211. ///
  212. /// Please note that this function does **not** destroy removed
  213. /// subarrays. See clear_and_destroy_children() for an alternative.
  214. ///
  215. /// This function guarantees that no exceptions will be thrown if
  216. /// get_alloc().is_read_only(get_ref()) would return false before the call.
  217. void clear();
  218. /// Remove every element in this array. Subarrays will be destroyed
  219. /// recursively, as if by a call to `destroy_deep(subarray_ref,
  220. /// alloc)`. This is just a shorthand for calling
  221. /// truncate_and_destroy_children(0).
  222. ///
  223. /// This function guarantees that no exceptions will be thrown if
  224. /// get_alloc().is_read_only(get_ref()) would return false before the call.
  225. void clear_and_destroy_children();
  226. /// If neccessary, expand the representation so that it can store the
  227. /// specified value.
  228. void ensure_minimum_width(int_fast64_t value);
  229. /// Add \a diff to the element at the specified index.
  230. void adjust(size_t ndx, int_fast64_t diff);
  231. /// Add \a diff to all the elements in the specified index range.
  232. void adjust(size_t begin, size_t end, int_fast64_t diff);
  233. //@{
  234. /// This is similar in spirit to std::move() from `<algorithm>`.
  235. /// \a dest_begin must not be in the range [`begin`,`end`)
  236. ///
  237. /// This function is guaranteed to not throw if
  238. /// `get_alloc().is_read_only(get_ref())` returns false.
  239. void move(size_t begin, size_t end, size_t dest_begin);
  240. //@}
  241. // Move elements from ndx and above to another array
  242. void move(Array& dst, size_t ndx);
  243. //@{
  244. /// Find the lower/upper bound of the specified value in a sequence of
  245. /// integers which must already be sorted ascendingly.
  246. ///
  247. /// For an integer value '`v`', lower_bound_int(v) returns the index '`l`'
  248. /// of the first element such that `get(l) &ge; v`, and upper_bound_int(v)
  249. /// returns the index '`u`' of the first element such that `get(u) &gt;
  250. /// v`. In both cases, if no such element is found, the returned value is
  251. /// the number of elements in the array.
  252. ///
  253. /// 3 3 3 4 4 4 5 6 7 9 9 9
  254. /// ^ ^ ^ ^ ^
  255. /// | | | | |
  256. /// | | | | -- Lower and upper bound of 15
  257. /// | | | |
  258. /// | | | -- Lower and upper bound of 8
  259. /// | | |
  260. /// | | -- Upper bound of 4
  261. /// | |
  262. /// | -- Lower bound of 4
  263. /// |
  264. /// -- Lower and upper bound of 1
  265. ///
  266. /// These functions are similar to std::lower_bound() and
  267. /// std::upper_bound().
  268. ///
  269. /// We currently use binary search. See for example
  270. /// http://www.tbray.org/ongoing/When/200x/2003/03/22/Binary.
  271. ///
  272. /// FIXME: It may be worth considering if overall efficiency can be improved
  273. /// by doing a linear search for short sequences.
  274. size_t lower_bound_int(int64_t value) const noexcept;
  275. size_t upper_bound_int(int64_t value) const noexcept;
  276. //@}
  277. int64_t get_sum(size_t start = 0, size_t end = size_t(-1)) const
  278. {
  279. return sum(start, end);
  280. }
  281. /// This information is guaranteed to be cached in the array accessor.
  282. bool is_inner_bptree_node() const noexcept;
  283. /// Returns true if type is either type_HasRefs or type_InnerColumnNode.
  284. ///
  285. /// This information is guaranteed to be cached in the array accessor.
  286. bool has_refs() const noexcept;
  287. void set_has_refs(bool) noexcept;
  288. /// This information is guaranteed to be cached in the array accessor.
  289. ///
  290. /// Columns and indexes can use the context bit to differentiate leaf types.
  291. bool get_context_flag() const noexcept;
  292. void set_context_flag(bool) noexcept;
  293. /// Recursively destroy children (as if calling
  294. /// clear_and_destroy_children()), then put this accessor into the detached
  295. /// state (as if calling detach()), then free the allocated memory. If this
  296. /// accessor is already in the detached state, this function has no effect
  297. /// (idempotency).
  298. void destroy_deep() noexcept;
  299. /// Shorthand for `destroy_deep(MemRef(ref, alloc), alloc)`.
  300. static void destroy_deep(ref_type ref, Allocator& alloc) noexcept;
  301. /// Destroy the specified array node and all of its children, recursively.
  302. ///
  303. /// This is done by freeing the specified array node after calling
  304. /// destroy_deep() for every contained 'ref' element.
  305. static void destroy_deep(MemRef, Allocator&) noexcept;
  306. // Clone deep
  307. static MemRef clone(MemRef, Allocator& from_alloc, Allocator& target_alloc);
  308. // Serialization
  309. /// Returns the ref (position in the target stream) of the written copy of
  310. /// this array, or the ref of the original array if \a only_if_modified is
  311. /// true, and this array is unmodified (Alloc::is_read_only()).
  312. ///
  313. /// The number of bytes that will be written by a non-recursive invocation
  314. /// of this function is exactly the number returned by get_byte_size().
  315. ///
  316. /// \param out The destination stream (writer).
  317. ///
  318. /// \param deep If true, recursively write out subarrays, but still subject
  319. /// to \a only_if_modified.
  320. ///
  321. /// \param only_if_modified Set to `false` to always write, or to `true` to
  322. /// only write the array if it has been modified.
  323. ref_type write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const;
  324. /// Same as non-static write() with `deep` set to true. This is for the
  325. /// cases where you do not already have an array accessor available.
  326. static ref_type write(ref_type, Allocator&, _impl::ArrayWriterBase&, bool only_if_modified);
  327. size_t find_first(int64_t value, size_t begin = 0, size_t end = size_t(-1)) const;
  328. // Wrappers for backwards compatibility and for simple use without
  329. // setting up state initialization etc
  330. template <class cond>
  331. size_t find_first(int64_t value, size_t start = 0, size_t end = size_t(-1)) const
  332. {
  333. REALM_ASSERT(start <= m_size && (end <= m_size || end == size_t(-1)) && start <= end);
  334. // todo, would be nice to avoid this in order to speed up find_first loops
  335. QueryStateFindFirst state;
  336. Finder finder = m_vtable->finder[cond::condition];
  337. (this->*finder)(value, start, end, 0, &state);
  338. return static_cast<size_t>(state.m_state);
  339. }
  340. /// Get the specified element without the cost of constructing an
  341. /// array instance. If an array instance is already available, or
  342. /// you need to get multiple values, then this method will be
  343. /// slower.
  344. static int_fast64_t get(const char* header, size_t ndx) noexcept;
  345. /// Like get(const char*, size_t) but gets two consecutive
  346. /// elements.
  347. static std::pair<int64_t, int64_t> get_two(const char* header, size_t ndx) noexcept;
  348. static RefOrTagged get_as_ref_or_tagged(const char* header, size_t ndx) noexcept
  349. {
  350. return get(header, ndx);
  351. }
  352. /// Get the number of bytes currently in use by this array. This
  353. /// includes the array header, but it does not include allocated
  354. /// bytes corresponding to excess capacity. The result is
  355. /// guaranteed to be a multiple of 8 (i.e., 64-bit aligned).
  356. ///
  357. /// This number is exactly the number of bytes that will be
  358. /// written by a non-recursive invocation of write().
  359. size_t get_byte_size() const noexcept;
  360. /// Get the maximum number of bytes that can be written by a
  361. /// non-recursive invocation of write() on an array with the
  362. /// specified number of elements, that is, the maximum value that
  363. /// can be returned by get_byte_size().
  364. static size_t get_max_byte_size(size_t num_elems) noexcept;
  365. /// FIXME: Belongs in IntegerArray
  366. static size_t calc_aligned_byte_size(size_t size, int width);
  367. #ifdef REALM_DEBUG
  368. class MemUsageHandler {
  369. public:
  370. virtual void handle(ref_type ref, size_t allocated, size_t used) = 0;
  371. };
  372. void report_memory_usage(MemUsageHandler&) const;
  373. void stats(MemStats& stats_dest) const noexcept;
  374. #endif
  375. void verify() const;
  376. Array& operator=(const Array&) = delete; // not allowed
  377. Array(const Array&) = delete; // not allowed
  378. protected:
  379. // This returns the minimum value ("lower bound") of the representable values
  380. // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
  381. static constexpr int_fast64_t lbound_for_width(size_t width) noexcept;
  382. // This returns the maximum value ("inclusive upper bound") of the representable values
  383. // for the given bit width. Valid widths are 0, 1, 2, 4, 8, 16, 32, and 64.
  384. static constexpr int_fast64_t ubound_for_width(size_t width) noexcept;
  385. // This will have to be eventually used, exposing this here for testing.
  386. size_t count(int64_t value) const noexcept;
  387. private:
  388. void update_width_cache_from_header() noexcept;
  389. void do_ensure_minimum_width(int_fast64_t);
  390. int64_t sum(size_t start, size_t end) const;
  391. template <size_t w>
  392. int64_t sum(size_t start, size_t end) const;
  393. protected:
  394. /// It is an error to specify a non-zero value unless the width
  395. /// type is wtype_Bits. It is also an error to specify a non-zero
  396. /// size if the width type is wtype_Ignore.
  397. static MemRef create(Type, bool context_flag, WidthType, size_t size, int_fast64_t value, Allocator&);
  398. // Overriding method in ArrayParent
  399. void update_child_ref(size_t, ref_type) override;
  400. // Overriding method in ArrayParent
  401. ref_type get_child_ref(size_t) const noexcept override;
  402. void destroy_children(size_t offset = 0) noexcept;
  403. protected:
  404. // Getters and Setters for adaptive-packed arrays
  405. typedef int64_t (Array::*Getter)(size_t) const; // Note: getters must not throw
  406. typedef void (Array::*Setter)(size_t, int64_t);
  407. typedef bool (Array::*Finder)(int64_t, size_t, size_t, size_t, QueryStateBase*) const;
  408. typedef void (Array::*ChunkGetter)(size_t, int64_t res[8]) const; // Note: getters must not throw
  409. struct VTable {
  410. Getter getter;
  411. ChunkGetter chunk_getter;
  412. Setter setter;
  413. Finder finder[cond_VTABLE_FINDER_COUNT]; // one for each active function pointer
  414. };
  415. template <size_t w>
  416. struct VTableForWidth;
  417. // This is the one installed into the m_vtable->finder slots.
  418. template <class cond, size_t bitwidth>
  419. bool find_vtable(int64_t value, size_t start, size_t end, size_t baseindex, QueryStateBase* state) const;
  420. template <size_t w>
  421. int64_t get_universal(const char* const data, const size_t ndx) const;
  422. protected:
  423. /// Takes a 64-bit value and returns the minimum number of bits needed
  424. /// to fit the value. For alignment this is rounded up to nearest
  425. /// log2. Posssible results {0, 1, 2, 4, 8, 16, 32, 64}
  426. static size_t bit_width(int64_t value);
  427. protected:
  428. Getter m_getter = nullptr; // cached to avoid indirection
  429. const VTable* m_vtable = nullptr;
  430. uint_least8_t m_width = 0; // Size of an element (meaning depend on type of array).
  431. int64_t m_lbound; // min number that can be stored with current m_width
  432. int64_t m_ubound; // max number that can be stored with current m_width
  433. bool m_is_inner_bptree_node; // This array is an inner node of B+-tree.
  434. bool m_has_refs; // Elements whose first bit is zero are refs to subarrays.
  435. bool m_context_flag; // Meaning depends on context.
  436. private:
  437. ref_type do_write_shallow(_impl::ArrayWriterBase&) const;
  438. ref_type do_write_deep(_impl::ArrayWriterBase&, bool only_if_modified) const;
  439. #ifdef REALM_DEBUG
  440. void report_memory_usage_2(MemUsageHandler&) const;
  441. #endif
  442. friend class Allocator;
  443. friend class SlabAlloc;
  444. friend class GroupWriter;
  445. friend class ArrayWithFind;
  446. };
  447. // Implementation:
  448. constexpr inline int_fast64_t Array::lbound_for_width(size_t width) noexcept
  449. {
  450. if (width == 32) {
  451. return -0x80000000LL;
  452. }
  453. else if (width == 16) {
  454. return -0x8000LL;
  455. }
  456. else if (width < 8) {
  457. return 0;
  458. }
  459. else if (width == 8) {
  460. return -0x80LL;
  461. }
  462. else if (width == 64) {
  463. return -0x8000000000000000LL;
  464. }
  465. else {
  466. REALM_UNREACHABLE();
  467. }
  468. }
  469. constexpr inline int_fast64_t Array::ubound_for_width(size_t width) noexcept
  470. {
  471. if (width == 32) {
  472. return 0x7FFFFFFFLL;
  473. }
  474. else if (width == 16) {
  475. return 0x7FFFLL;
  476. }
  477. else if (width == 0) {
  478. return 0;
  479. }
  480. else if (width == 1) {
  481. return 1;
  482. }
  483. else if (width == 2) {
  484. return 3;
  485. }
  486. else if (width == 4) {
  487. return 15;
  488. }
  489. else if (width == 8) {
  490. return 0x7FLL;
  491. }
  492. else if (width == 64) {
  493. return 0x7FFFFFFFFFFFFFFFLL;
  494. }
  495. else {
  496. REALM_UNREACHABLE();
  497. }
  498. }
  499. inline bool RefOrTagged::is_ref() const noexcept
  500. {
  501. return (m_value & 1) == 0;
  502. }
  503. inline bool RefOrTagged::is_tagged() const noexcept
  504. {
  505. return !is_ref();
  506. }
  507. inline ref_type RefOrTagged::get_as_ref() const noexcept
  508. {
  509. // to_ref() is defined in <alloc.hpp>
  510. return to_ref(m_value);
  511. }
  512. inline uint_fast64_t RefOrTagged::get_as_int() const noexcept
  513. {
  514. // The bitwise AND is there in case uint_fast64_t is wider than 64 bits.
  515. return (uint_fast64_t(m_value) & 0xFFFFFFFFFFFFFFFFULL) >> 1;
  516. }
  517. inline RefOrTagged RefOrTagged::make_ref(ref_type ref) noexcept
  518. {
  519. // from_ref() is defined in <alloc.hpp>
  520. int_fast64_t value = from_ref(ref);
  521. return RefOrTagged(value);
  522. }
  523. inline RefOrTagged RefOrTagged::make_tagged(uint_fast64_t i) noexcept
  524. {
  525. REALM_ASSERT(i < (1ULL << 63));
  526. return RefOrTagged((i << 1) | 1);
  527. }
  528. inline RefOrTagged::RefOrTagged(int_fast64_t value) noexcept
  529. : m_value(value)
  530. {
  531. }
  532. inline void Array::create(Type type, bool context_flag, size_t length, int_fast64_t value)
  533. {
  534. MemRef mem = create_array(type, context_flag, length, value, m_alloc); // Throws
  535. init_from_mem(mem);
  536. }
  537. inline Array::Type Array::get_type() const noexcept
  538. {
  539. if (m_is_inner_bptree_node) {
  540. REALM_ASSERT_DEBUG(m_has_refs);
  541. return type_InnerBptreeNode;
  542. }
  543. if (m_has_refs)
  544. return type_HasRefs;
  545. return type_Normal;
  546. }
  547. inline void Array::get_chunk(size_t ndx, int64_t res[8]) const noexcept
  548. {
  549. REALM_ASSERT_DEBUG(ndx < m_size);
  550. (this->*(m_vtable->chunk_getter))(ndx, res);
  551. }
  552. template <size_t w>
  553. int64_t Array::get_universal(const char* data, size_t ndx) const
  554. {
  555. if (w == 0) {
  556. return 0;
  557. }
  558. else if (w == 1) {
  559. size_t offset = ndx >> 3;
  560. return (data[offset] >> (ndx & 7)) & 0x01;
  561. }
  562. else if (w == 2) {
  563. size_t offset = ndx >> 2;
  564. return (data[offset] >> ((ndx & 3) << 1)) & 0x03;
  565. }
  566. else if (w == 4) {
  567. size_t offset = ndx >> 1;
  568. return (data[offset] >> ((ndx & 1) << 2)) & 0x0F;
  569. }
  570. else if (w == 8) {
  571. return *reinterpret_cast<const signed char*>(data + ndx);
  572. }
  573. else if (w == 16) {
  574. size_t offset = ndx * 2;
  575. return *reinterpret_cast<const int16_t*>(data + offset);
  576. }
  577. else if (w == 32) {
  578. size_t offset = ndx * 4;
  579. return *reinterpret_cast<const int32_t*>(data + offset);
  580. }
  581. else if (w == 64) {
  582. size_t offset = ndx * 8;
  583. return *reinterpret_cast<const int64_t*>(data + offset);
  584. }
  585. else {
  586. REALM_ASSERT_DEBUG(false);
  587. return int64_t(-1);
  588. }
  589. }
  590. template <size_t w>
  591. int64_t Array::get(size_t ndx) const noexcept
  592. {
  593. return get_universal<w>(m_data, ndx);
  594. }
  595. inline int64_t Array::get(size_t ndx) const noexcept
  596. {
  597. REALM_ASSERT_DEBUG(is_attached());
  598. REALM_ASSERT_DEBUG_EX(ndx < m_size, ndx, m_size);
  599. return (this->*m_getter)(ndx);
  600. // Two ideas that are not efficient but may be worth looking into again:
  601. /*
  602. // Assume correct width is found early in REALM_TEMPEX, which is the case for B tree offsets that
  603. // are probably either 2^16 long. Turns out to be 25% faster if found immediately, but 50-300% slower
  604. // if found later
  605. REALM_TEMPEX(return get, (ndx));
  606. */
  607. /*
  608. // Slightly slower in both of the if-cases. Also needs an matchcount m_size check too, to avoid
  609. // reading beyond array.
  610. if (m_width >= 8 && m_size > ndx + 7)
  611. return get<64>(ndx >> m_shift) & m_widthmask;
  612. else
  613. return (this->*(m_vtable->getter))(ndx);
  614. */
  615. }
  616. inline int64_t Array::front() const noexcept
  617. {
  618. return get(0);
  619. }
  620. inline int64_t Array::back() const noexcept
  621. {
  622. return get(m_size - 1);
  623. }
  624. inline ref_type Array::get_as_ref(size_t ndx) const noexcept
  625. {
  626. REALM_ASSERT_DEBUG(is_attached());
  627. REALM_ASSERT_DEBUG_EX(m_has_refs, m_ref, ndx, m_size);
  628. int64_t v = get(ndx);
  629. return to_ref(v);
  630. }
  631. inline RefOrTagged Array::get_as_ref_or_tagged(size_t ndx) const noexcept
  632. {
  633. REALM_ASSERT(has_refs());
  634. return RefOrTagged(get(ndx));
  635. }
  636. inline void Array::set(size_t ndx, RefOrTagged ref_or_tagged)
  637. {
  638. REALM_ASSERT(has_refs());
  639. set(ndx, ref_or_tagged.m_value); // Throws
  640. }
  641. inline void Array::add(RefOrTagged ref_or_tagged)
  642. {
  643. REALM_ASSERT(has_refs());
  644. add(ref_or_tagged.m_value); // Throws
  645. }
  646. inline void Array::ensure_minimum_width(RefOrTagged ref_or_tagged)
  647. {
  648. REALM_ASSERT(has_refs());
  649. ensure_minimum_width(ref_or_tagged.m_value); // Throws
  650. }
  651. inline bool Array::is_inner_bptree_node() const noexcept
  652. {
  653. return m_is_inner_bptree_node;
  654. }
  655. inline bool Array::has_refs() const noexcept
  656. {
  657. return m_has_refs;
  658. }
  659. inline void Array::set_has_refs(bool value) noexcept
  660. {
  661. if (m_has_refs != value) {
  662. REALM_ASSERT(!is_read_only());
  663. m_has_refs = value;
  664. set_hasrefs_in_header(value, get_header());
  665. }
  666. }
  667. inline bool Array::get_context_flag() const noexcept
  668. {
  669. return m_context_flag;
  670. }
  671. inline void Array::set_context_flag(bool value) noexcept
  672. {
  673. if (m_context_flag != value) {
  674. copy_on_write();
  675. m_context_flag = value;
  676. set_context_flag_in_header(value, get_header());
  677. }
  678. }
  679. inline void Array::destroy_deep() noexcept
  680. {
  681. if (!is_attached())
  682. return;
  683. if (m_has_refs)
  684. destroy_children();
  685. char* header = get_header_from_data(m_data);
  686. m_alloc.free_(m_ref, header);
  687. m_data = nullptr;
  688. }
  689. inline ref_type Array::write(_impl::ArrayWriterBase& out, bool deep, bool only_if_modified) const
  690. {
  691. REALM_ASSERT(is_attached());
  692. if (only_if_modified && m_alloc.is_read_only(m_ref))
  693. return m_ref;
  694. if (!deep || !m_has_refs)
  695. return do_write_shallow(out); // Throws
  696. return do_write_deep(out, only_if_modified); // Throws
  697. }
  698. inline ref_type Array::write(ref_type ref, Allocator& alloc, _impl::ArrayWriterBase& out, bool only_if_modified)
  699. {
  700. if (only_if_modified && alloc.is_read_only(ref))
  701. return ref;
  702. Array array(alloc);
  703. array.init_from_ref(ref);
  704. if (!array.m_has_refs)
  705. return array.do_write_shallow(out); // Throws
  706. return array.do_write_deep(out, only_if_modified); // Throws
  707. }
  708. inline void Array::add(int_fast64_t value)
  709. {
  710. insert(m_size, value);
  711. }
  712. inline void Array::erase(size_t ndx)
  713. {
  714. // This can throw, but only if array is currently in read-only
  715. // memory.
  716. move(ndx + 1, size(), ndx);
  717. // Update size (also in header)
  718. --m_size;
  719. set_header_size(m_size);
  720. }
  721. inline void Array::erase(size_t begin, size_t end)
  722. {
  723. if (begin != end) {
  724. // This can throw, but only if array is currently in read-only memory.
  725. move(end, size(), begin); // Throws
  726. // Update size (also in header)
  727. m_size -= end - begin;
  728. set_header_size(m_size);
  729. }
  730. }
  731. inline void Array::clear()
  732. {
  733. truncate(0); // Throws
  734. }
  735. inline void Array::clear_and_destroy_children()
  736. {
  737. truncate_and_destroy_children(0);
  738. }
  739. inline void Array::destroy_deep(ref_type ref, Allocator& alloc) noexcept
  740. {
  741. destroy_deep(MemRef(ref, alloc), alloc);
  742. }
  743. inline void Array::destroy_deep(MemRef mem, Allocator& alloc) noexcept
  744. {
  745. if (!get_hasrefs_from_header(mem.get_addr())) {
  746. alloc.free_(mem);
  747. return;
  748. }
  749. Array array(alloc);
  750. array.init_from_mem(mem);
  751. array.destroy_deep();
  752. }
  753. inline void Array::adjust(size_t ndx, int_fast64_t diff)
  754. {
  755. REALM_ASSERT_3(ndx, <=, m_size);
  756. if (diff != 0) {
  757. // FIXME: Should be optimized
  758. int_fast64_t v = get(ndx);
  759. set(ndx, int64_t(v + diff)); // Throws
  760. }
  761. }
  762. inline void Array::adjust(size_t begin, size_t end, int_fast64_t diff)
  763. {
  764. if (diff != 0) {
  765. // FIXME: Should be optimized
  766. for (size_t i = begin; i != end; ++i)
  767. adjust(i, diff); // Throws
  768. }
  769. }
  770. //-------------------------------------------------
  771. inline size_t Array::get_byte_size() const noexcept
  772. {
  773. const char* header = get_header_from_data(m_data);
  774. WidthType wtype = Node::get_wtype_from_header(header);
  775. size_t num_bytes = NodeHeader::calc_byte_size(wtype, m_size, m_width);
  776. REALM_ASSERT_7(m_alloc.is_read_only(m_ref), ==, true, ||, num_bytes, <=, get_capacity_from_header(header));
  777. return num_bytes;
  778. }
  779. //-------------------------------------------------
  780. inline MemRef Array::create_empty_array(Type type, bool context_flag, Allocator& alloc)
  781. {
  782. size_t size = 0;
  783. int_fast64_t value = 0;
  784. return create_array(type, context_flag, size, value, alloc); // Throws
  785. }
  786. inline MemRef Array::create_array(Type type, bool context_flag, size_t size, int_fast64_t value, Allocator& alloc)
  787. {
  788. return create(type, context_flag, wtype_Bits, size, value, alloc); // Throws
  789. }
  790. inline size_t Array::get_max_byte_size(size_t num_elems) noexcept
  791. {
  792. int max_bytes_per_elem = 8;
  793. return header_size + num_elems * max_bytes_per_elem;
  794. }
  795. inline void Array::update_child_ref(size_t child_ndx, ref_type new_ref)
  796. {
  797. set(child_ndx, new_ref);
  798. }
  799. inline ref_type Array::get_child_ref(size_t child_ndx) const noexcept
  800. {
  801. return get_as_ref(child_ndx);
  802. }
  803. inline void Array::ensure_minimum_width(int_fast64_t value)
  804. {
  805. if (value >= m_lbound && value <= m_ubound)
  806. return;
  807. do_ensure_minimum_width(value);
  808. }
  809. } // namespace realm
  810. #endif // REALM_ARRAY_HPP