alloc.hpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. /*************************************************************************
  2. *
  3. * Copyright 2016 Realm Inc.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. **************************************************************************/
  18. #ifndef REALM_ALLOC_HPP
  19. #define REALM_ALLOC_HPP
  20. #include <cstdint>
  21. #include <cstddef>
  22. #include <atomic>
  23. #include <realm/util/features.h>
  24. #include <realm/util/terminate.hpp>
  25. #include <realm/util/assert.hpp>
  26. #include <realm/util/file.hpp>
  27. #include <realm/exceptions.hpp>
  28. #include <realm/util/safe_int_ops.hpp>
  29. #include <realm/node_header.hpp>
  30. #include <realm/util/file_mapper.hpp>
  31. // Temporary workaround for
  32. // https://developercommunity.visualstudio.com/content/problem/994075/64-bit-atomic-load-ices-cl-1924-with-o2-ob1.html
  33. #if defined REALM_ARCHITECTURE_X86_32 && defined REALM_WINDOWS
  34. #define REALM_WORKAROUND_MSVC_BUG REALM_NOINLINE
  35. #else
  36. #define REALM_WORKAROUND_MSVC_BUG
  37. #endif
  38. namespace realm {
  39. class Allocator;
  40. using ref_type = size_t;
  41. int_fast64_t from_ref(ref_type) noexcept;
  42. ref_type to_ref(int_fast64_t) noexcept;
  43. int64_t to_int64(size_t value) noexcept;
  44. class MemRef {
  45. public:
  46. MemRef() noexcept;
  47. ~MemRef() noexcept;
  48. MemRef(char* addr, ref_type ref, Allocator& alloc) noexcept;
  49. MemRef(ref_type ref, Allocator& alloc) noexcept;
  50. char* get_addr() const;
  51. ref_type get_ref() const;
  52. void set_ref(ref_type ref);
  53. void set_addr(char* addr);
  54. private:
  55. char* m_addr;
  56. ref_type m_ref;
  57. #if REALM_ENABLE_MEMDEBUG
  58. // Allocator that created m_ref. Used to verify that the ref is valid whenever you call
  59. // get_ref()/get_addr and that it e.g. has not been free'ed
  60. const Allocator* m_alloc = nullptr;
  61. #endif
  62. };
  63. /// The common interface for Realm allocators.
  64. ///
  65. /// A Realm allocator must associate a 'ref' to each allocated
  66. /// object and be able to efficiently map any 'ref' to the
  67. /// corresponding memory address. The 'ref' is an integer and it must
  68. /// always be divisible by 8. Also, a value of zero is used to
  69. /// indicate a null-reference, and must therefore never be returned by
  70. /// Allocator::alloc().
  71. ///
  72. /// The purpose of the 'refs' is to decouple the memory reference from
  73. /// the actual address and thereby allowing objects to be relocated in
  74. /// memory without having to modify stored references.
  75. ///
  76. /// \sa SlabAlloc
  77. class Allocator {
  78. public:
  79. /// The specified size must be divisible by 8, and must not be
  80. /// zero.
  81. ///
  82. /// \throw std::bad_alloc If insufficient memory was available.
  83. MemRef alloc(size_t size);
  84. /// Calls do_realloc().
  85. ///
  86. /// Note: The underscore has been added because the name `realloc`
  87. /// would conflict with a macro on the Windows platform.
  88. MemRef realloc_(ref_type, const char* addr, size_t old_size, size_t new_size);
  89. /// Calls do_free().
  90. ///
  91. /// Note: The underscore has been added because the name `free
  92. /// would conflict with a macro on the Windows platform.
  93. void free_(ref_type, const char* addr) noexcept;
  94. /// Shorthand for free_(mem.get_ref(), mem.get_addr()).
  95. void free_(MemRef mem) noexcept;
  96. /// Calls do_translate().
  97. char* translate(ref_type ref) const noexcept;
  98. /// Returns true if, and only if the object at the specified 'ref'
  99. /// is in the immutable part of the memory managed by this
  100. /// allocator. The method by which some objects become part of the
  101. /// immuatble part is entirely up to the class that implements
  102. /// this interface.
  103. bool is_read_only(ref_type) const noexcept;
  104. void set_read_only(bool ro)
  105. {
  106. m_is_read_only = ro;
  107. }
  108. /// Returns a simple allocator that can be used with free-standing
  109. /// Realm objects (such as a free-standing table). A
  110. /// free-standing object is one that is not part of a Group, and
  111. /// therefore, is not part of an actual database.
  112. static Allocator& get_default() noexcept;
  113. virtual ~Allocator() noexcept;
  114. // Disable copying. Copying an allocator can produce double frees.
  115. Allocator(const Allocator&) = delete;
  116. Allocator& operator=(const Allocator&) = delete;
  117. virtual void verify() const = 0;
  118. #ifdef REALM_DEBUG
  119. /// Terminate the program precisely when the specified 'ref' is
  120. /// freed (or reallocated). You can use this to detect whether the
  121. /// ref is freed (or reallocated), and even to get a stacktrace at
  122. /// the point where it happens. Call watch(0) to stop watching
  123. /// that ref.
  124. void watch(ref_type ref)
  125. {
  126. m_debug_watch = ref;
  127. }
  128. #endif
  129. struct MappedFile;
  130. static constexpr size_t section_size() noexcept
  131. {
  132. return 1 << section_shift;
  133. }
  134. protected:
  135. constexpr static int section_shift = 26;
  136. std::atomic<size_t> m_baseline; // Separation line between immutable and mutable refs.
  137. ref_type m_debug_watch = 0;
  138. // The following logically belongs in the slab allocator, but is placed
  139. // here to optimize a critical path:
  140. // The ref translation splits the full ref-space (both below and above baseline)
  141. // into equal chunks.
  142. struct RefTranslation {
  143. char* mapping_addr;
  144. uint64_t cookie;
  145. std::atomic<size_t> lowest_possible_xover_offset = 0;
  146. // member 'xover_mapping_addr' is used for memory synchronization of the fields
  147. // 'xover_mapping_base' and 'xover_encrypted_mapping'. It also imposes an ordering
  148. // on 'lowest_possible_xover_offset' such that once a non-null value of 'xover_mapping_addr'
  149. // has been acquired, 'lowest_possible_xover_offset' will never change.
  150. std::atomic<char*> xover_mapping_addr = nullptr;
  151. size_t xover_mapping_base = 0;
  152. #if REALM_ENABLE_ENCRYPTION
  153. util::EncryptedFileMapping* encrypted_mapping = nullptr;
  154. util::EncryptedFileMapping* xover_encrypted_mapping = nullptr;
  155. #endif
  156. explicit RefTranslation(char* addr)
  157. : mapping_addr(addr)
  158. , cookie(0x1234567890)
  159. {
  160. }
  161. RefTranslation()
  162. : RefTranslation(nullptr)
  163. {
  164. }
  165. ~RefTranslation()
  166. {
  167. cookie = 0xdeadbeefdeadbeef;
  168. }
  169. RefTranslation& operator=(const RefTranslation& from)
  170. {
  171. if (&from != this) {
  172. mapping_addr = from.mapping_addr;
  173. #if REALM_ENABLE_ENCRYPTION
  174. encrypted_mapping = from.encrypted_mapping;
  175. #endif
  176. const auto local_xover_mapping_addr = from.xover_mapping_addr.load(std::memory_order_acquire);
  177. // This must be loaded after xover_mapping_addr to ensure it isn't stale.
  178. lowest_possible_xover_offset.store(from.lowest_possible_xover_offset, std::memory_order_relaxed);
  179. if (local_xover_mapping_addr) {
  180. xover_mapping_base = from.xover_mapping_base;
  181. #if REALM_ENABLE_ENCRYPTION
  182. xover_encrypted_mapping = from.xover_encrypted_mapping;
  183. #endif
  184. xover_mapping_addr.store(local_xover_mapping_addr, std::memory_order_release);
  185. }
  186. }
  187. return *this;
  188. }
  189. };
  190. // This pointer may be changed concurrently with access, so make sure it is
  191. // atomic!
  192. std::atomic<RefTranslation*> m_ref_translation_ptr;
  193. /// The specified size must be divisible by 8, and must not be
  194. /// zero.
  195. ///
  196. /// \throw std::bad_alloc If insufficient memory was available.
  197. virtual MemRef do_alloc(const size_t size) = 0;
  198. /// The specified size must be divisible by 8, and must not be
  199. /// zero.
  200. ///
  201. /// The default version of this function simply allocates a new
  202. /// chunk of memory, copies over the old contents, and then frees
  203. /// the old chunk.
  204. ///
  205. /// \throw std::bad_alloc If insufficient memory was available.
  206. virtual MemRef do_realloc(ref_type, char* addr, size_t old_size, size_t new_size) = 0;
  207. /// Release the specified chunk of memory.
  208. virtual void do_free(ref_type, char* addr) = 0;
  209. /// Map the specified \a ref to the corresponding memory
  210. /// address. Note that if is_read_only(ref) returns true, then the
  211. /// referenced object is to be considered immutable, and it is
  212. /// then entirely the responsibility of the caller that the memory
  213. /// is not modified by way of the returned memory pointer.
  214. virtual char* do_translate(ref_type ref) const noexcept = 0;
  215. char* translate_critical(RefTranslation*, ref_type ref) const noexcept;
  216. char* translate_less_critical(RefTranslation*, ref_type ref) const noexcept;
  217. virtual void get_or_add_xover_mapping(RefTranslation&, size_t, size_t, size_t) = 0;
  218. Allocator() noexcept;
  219. size_t get_section_index(size_t pos) const noexcept;
  220. inline size_t get_section_base(size_t index) const noexcept;
  221. // The following counters are used to ensure accessor refresh,
  222. // and allows us to report many errors related to attempts to
  223. // access data which is no longer current.
  224. //
  225. // * storage_versioning: monotonically increasing counter
  226. // bumped whenever the underlying storage layout is changed,
  227. // or if the owning accessor have been detached.
  228. // * content_versioning: monotonically increasing counter
  229. // bumped whenever the data is changed. Used to detect
  230. // if queries are stale.
  231. // * instance_versioning: monotonically increasing counter
  232. // used to detect if the allocator (and owning structure, e.g. Table)
  233. // is recycled. Mismatch on this counter will cause accesors
  234. // lower in the hierarchy to throw if access is attempted.
  235. std::atomic<uint_fast64_t> m_content_versioning_counter;
  236. std::atomic<uint_fast64_t> m_storage_versioning_counter;
  237. std::atomic<uint_fast64_t> m_instance_versioning_counter;
  238. inline uint_fast64_t get_storage_version(uint64_t instance_version)
  239. {
  240. if (instance_version != m_instance_versioning_counter) {
  241. throw LogicError(LogicError::detached_accessor);
  242. }
  243. return m_storage_versioning_counter.load(std::memory_order_acquire);
  244. }
  245. public:
  246. inline uint_fast64_t get_storage_version()
  247. {
  248. return m_storage_versioning_counter.load(std::memory_order_acquire);
  249. }
  250. protected:
  251. inline void bump_storage_version() noexcept
  252. {
  253. m_storage_versioning_counter.fetch_add(1, std::memory_order_acq_rel);
  254. }
  255. public:
  256. REALM_WORKAROUND_MSVC_BUG inline uint_fast64_t get_content_version() noexcept
  257. {
  258. return m_content_versioning_counter.load(std::memory_order_acquire);
  259. }
  260. protected:
  261. inline uint_fast64_t bump_content_version() noexcept
  262. {
  263. return m_content_versioning_counter.fetch_add(1, std::memory_order_acq_rel) + 1;
  264. }
  265. REALM_WORKAROUND_MSVC_BUG inline uint_fast64_t get_instance_version() noexcept
  266. {
  267. return m_instance_versioning_counter.load(std::memory_order_relaxed);
  268. }
  269. inline void bump_instance_version() noexcept
  270. {
  271. m_instance_versioning_counter.fetch_add(1, std::memory_order_relaxed);
  272. }
  273. private:
  274. bool m_is_read_only = false; // prevent any alloc or free operations
  275. friend class Table;
  276. friend class ClusterTree;
  277. friend class Group;
  278. friend class WrappedAllocator;
  279. friend class Obj;
  280. template <class>
  281. friend class CollectionBaseImpl;
  282. friend class Dictionary;
  283. };
  284. class WrappedAllocator : public Allocator {
  285. public:
  286. WrappedAllocator(Allocator& underlying_allocator)
  287. : m_alloc(&underlying_allocator)
  288. {
  289. m_baseline.store(m_alloc->m_baseline, std::memory_order_relaxed);
  290. m_debug_watch = 0;
  291. m_ref_translation_ptr.store(m_alloc->m_ref_translation_ptr);
  292. }
  293. ~WrappedAllocator() {}
  294. void switch_underlying_allocator(Allocator& underlying_allocator)
  295. {
  296. m_alloc = &underlying_allocator;
  297. m_baseline.store(m_alloc->m_baseline, std::memory_order_relaxed);
  298. m_debug_watch = 0;
  299. refresh_ref_translation();
  300. }
  301. void update_from_underlying_allocator(bool writable)
  302. {
  303. switch_underlying_allocator(*m_alloc);
  304. set_read_only(!writable);
  305. }
  306. void refresh_ref_translation()
  307. {
  308. m_ref_translation_ptr.store(m_alloc->m_ref_translation_ptr);
  309. }
  310. protected:
  311. void get_or_add_xover_mapping(RefTranslation& txl, size_t index, size_t offset, size_t size) override
  312. {
  313. m_alloc->get_or_add_xover_mapping(txl, index, offset, size);
  314. }
  315. private:
  316. Allocator* m_alloc;
  317. MemRef do_alloc(const size_t size) override
  318. {
  319. auto result = m_alloc->do_alloc(size);
  320. bump_storage_version();
  321. m_baseline.store(m_alloc->m_baseline, std::memory_order_relaxed);
  322. m_ref_translation_ptr.store(m_alloc->m_ref_translation_ptr);
  323. return result;
  324. }
  325. virtual MemRef do_realloc(ref_type ref, char* addr, size_t old_size, size_t new_size) override
  326. {
  327. auto result = m_alloc->do_realloc(ref, addr, old_size, new_size);
  328. bump_storage_version();
  329. m_baseline.store(m_alloc->m_baseline, std::memory_order_relaxed);
  330. m_ref_translation_ptr.store(m_alloc->m_ref_translation_ptr);
  331. return result;
  332. }
  333. virtual void do_free(ref_type ref, char* addr) noexcept override
  334. {
  335. return m_alloc->do_free(ref, addr);
  336. }
  337. virtual char* do_translate(ref_type ref) const noexcept override
  338. {
  339. return m_alloc->translate(ref);
  340. }
  341. virtual void verify() const override
  342. {
  343. m_alloc->verify();
  344. }
  345. };
  346. // Implementation:
  347. inline int_fast64_t from_ref(ref_type v) noexcept
  348. {
  349. // Check that v is divisible by 8 (64-bit aligned).
  350. REALM_ASSERT_DEBUG(v % 8 == 0);
  351. static_assert(std::is_same<ref_type, size_t>::value,
  352. "If ref_type changes, from_ref and to_ref should probably be updated");
  353. // Make sure that we preserve the bit pattern of the ref_type (without sign extension).
  354. return int_fast64_t(uint_fast64_t(v));
  355. }
  356. inline ref_type to_ref(int_fast64_t v) noexcept
  357. {
  358. // Check that v is divisible by 8 (64-bit aligned).
  359. REALM_ASSERT_DEBUG(v % 8 == 0);
  360. // C++11 standard, paragraph 4.7.2 [conv.integral]:
  361. // If the destination type is unsigned, the resulting value is the least unsigned integer congruent to the source
  362. // integer (modulo 2n where n is the number of bits used to represent the unsigned type). [ Note: In a two's
  363. // complement representation, this conversion is conceptual and there is no change in the bit pattern (if there is
  364. // no truncation). - end note ]
  365. static_assert(std::is_unsigned<ref_type>::value,
  366. "If ref_type changes, from_ref and to_ref should probably be updated");
  367. return ref_type(v);
  368. }
  369. inline int64_t to_int64(size_t value) noexcept
  370. {
  371. int64_t res = static_cast<int64_t>(value);
  372. REALM_ASSERT_DEBUG(res >= 0);
  373. return static_cast<int64_t>(value);
  374. }
  375. inline MemRef::MemRef() noexcept
  376. : m_addr(nullptr)
  377. , m_ref(0)
  378. {
  379. }
  380. inline MemRef::~MemRef() noexcept {}
  381. inline MemRef::MemRef(char* addr, ref_type ref, Allocator& alloc) noexcept
  382. : m_addr(addr)
  383. , m_ref(ref)
  384. {
  385. static_cast<void>(alloc);
  386. #if REALM_ENABLE_MEMDEBUG
  387. m_alloc = &alloc;
  388. #endif
  389. }
  390. inline MemRef::MemRef(ref_type ref, Allocator& alloc) noexcept
  391. : m_addr(alloc.translate(ref))
  392. , m_ref(ref)
  393. {
  394. static_cast<void>(alloc);
  395. #if REALM_ENABLE_MEMDEBUG
  396. m_alloc = &alloc;
  397. #endif
  398. }
  399. inline char* MemRef::get_addr() const
  400. {
  401. #if REALM_ENABLE_MEMDEBUG
  402. // Asserts if the ref has been freed
  403. m_alloc->translate(m_ref);
  404. #endif
  405. return m_addr;
  406. }
  407. inline ref_type MemRef::get_ref() const
  408. {
  409. #if REALM_ENABLE_MEMDEBUG
  410. // Asserts if the ref has been freed
  411. m_alloc->translate(m_ref);
  412. #endif
  413. return m_ref;
  414. }
  415. inline void MemRef::set_ref(ref_type ref)
  416. {
  417. #if REALM_ENABLE_MEMDEBUG
  418. // Asserts if the ref has been freed
  419. m_alloc->translate(ref);
  420. #endif
  421. m_ref = ref;
  422. }
  423. inline void MemRef::set_addr(char* addr)
  424. {
  425. m_addr = addr;
  426. }
  427. inline MemRef Allocator::alloc(size_t size)
  428. {
  429. if (m_is_read_only)
  430. throw realm::LogicError(realm::LogicError::wrong_transact_state);
  431. return do_alloc(size);
  432. }
  433. inline MemRef Allocator::realloc_(ref_type ref, const char* addr, size_t old_size, size_t new_size)
  434. {
  435. #ifdef REALM_DEBUG
  436. if (ref == m_debug_watch)
  437. REALM_TERMINATE("Allocator watch: Ref was reallocated");
  438. #endif
  439. if (m_is_read_only)
  440. throw realm::LogicError(realm::LogicError::wrong_transact_state);
  441. return do_realloc(ref, const_cast<char*>(addr), old_size, new_size);
  442. }
  443. inline void Allocator::free_(ref_type ref, const char* addr) noexcept
  444. {
  445. #ifdef REALM_DEBUG
  446. if (ref == m_debug_watch)
  447. REALM_TERMINATE("Allocator watch: Ref was freed");
  448. #endif
  449. REALM_ASSERT(!m_is_read_only);
  450. return do_free(ref, const_cast<char*>(addr));
  451. }
  452. inline void Allocator::free_(MemRef mem) noexcept
  453. {
  454. free_(mem.get_ref(), mem.get_addr());
  455. }
  456. inline size_t Allocator::get_section_base(size_t index) const noexcept
  457. {
  458. return index << section_shift; // 64MB chunks
  459. }
  460. inline size_t Allocator::get_section_index(size_t pos) const noexcept
  461. {
  462. return pos >> section_shift; // 64Mb chunks
  463. }
  464. inline bool Allocator::is_read_only(ref_type ref) const noexcept
  465. {
  466. REALM_ASSERT_DEBUG(ref != 0);
  467. // REALM_ASSERT_DEBUG(m_baseline != 0); // Attached SlabAlloc
  468. return ref < m_baseline.load(std::memory_order_relaxed);
  469. }
  470. inline Allocator::Allocator() noexcept
  471. {
  472. m_content_versioning_counter = 0;
  473. m_storage_versioning_counter = 0;
  474. m_instance_versioning_counter = 0;
  475. m_ref_translation_ptr = nullptr;
  476. }
  477. inline Allocator::~Allocator() noexcept {}
  478. // performance critical part of the translation process. Less critical code is in translate_less_critical.
  479. inline char* Allocator::translate_critical(RefTranslation* ref_translation_ptr, ref_type ref) const noexcept
  480. {
  481. size_t idx = get_section_index(ref);
  482. RefTranslation& txl = ref_translation_ptr[idx];
  483. if (REALM_LIKELY(txl.cookie == 0x1234567890)) {
  484. size_t offset = ref - get_section_base(idx);
  485. size_t lowest_possible_xover_offset = txl.lowest_possible_xover_offset.load(std::memory_order_relaxed);
  486. if (REALM_LIKELY(offset < lowest_possible_xover_offset)) {
  487. // the lowest possible xover offset may grow concurrently, but that will not affect this code path
  488. char* addr = txl.mapping_addr + offset;
  489. #if REALM_ENABLE_ENCRYPTION
  490. realm::util::encryption_read_barrier(addr, NodeHeader::header_size, txl.encrypted_mapping,
  491. NodeHeader::get_byte_size_from_header);
  492. #endif
  493. return addr;
  494. }
  495. else {
  496. // the lowest possible xover offset may grow concurrently, but that will be handled inside the call
  497. return translate_less_critical(ref_translation_ptr, ref);
  498. }
  499. }
  500. realm::util::terminate("Invalid ref translation entry", __FILE__, __LINE__, txl.cookie, 0x1234567890);
  501. return nullptr;
  502. }
  503. inline char* Allocator::translate(ref_type ref) const noexcept
  504. {
  505. auto ref_translation_ptr = m_ref_translation_ptr.load(std::memory_order_acquire);
  506. if (REALM_LIKELY(ref_translation_ptr)) {
  507. return translate_critical(ref_translation_ptr, ref);
  508. }
  509. else {
  510. return do_translate(ref);
  511. }
  512. }
  513. } // namespace realm
  514. #endif // REALM_ALLOC_HPP