thread.hpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /*************************************************************************
  2. *
  3. * Copyright 2016 Realm Inc.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. **************************************************************************/
  18. #ifndef REALM_UTIL_THREAD_HPP
  19. #define REALM_UTIL_THREAD_HPP
  20. #include <exception>
  21. #ifdef _WIN32
  22. #include <thread>
  23. #include <condition_variable> // for windows non-interprocess condvars we use std::condition_variable
  24. #include <Windows.h>
  25. #include <process.h> // _getpid()
  26. #else
  27. #include <pthread.h>
  28. #endif
  29. // Use below line to enable a thread bug detection tool. Note: Will make program execution slower.
  30. // #include <../test/pthread_test.hpp>
  31. #include <cerrno>
  32. #include <cstddef>
  33. #include <string>
  34. #include <realm/util/features.h>
  35. #include <realm/util/assert.hpp>
  36. #include <realm/util/terminate.hpp>
  37. #include <memory>
  38. #include <stdexcept>
  39. #include <atomic>
  40. namespace realm {
  41. namespace util {
  42. /// A separate thread of execution.
  43. ///
  44. /// This class is a C++03 compatible reproduction of a subset of std::thread
  45. /// from C++11 (when discounting Thread::start(), Thread::set_name(), and
  46. /// Thread::get_name()).
  47. class Thread {
  48. public:
  49. Thread();
  50. ~Thread() noexcept;
  51. template <class F>
  52. explicit Thread(F func);
  53. // Disable copying. It is an error to copy this Thread class.
  54. Thread(const Thread&) = delete;
  55. Thread& operator=(const Thread&) = delete;
  56. Thread(Thread&&) noexcept;
  57. /// This method is an extension of the API provided by
  58. /// std::thread. This method exists because proper move semantics
  59. /// is unavailable in C++03. If move semantics had been available,
  60. /// calling `start(func)` would have been equivalent to `*this =
  61. /// Thread(func)`. Please see std::thread::operator=() for
  62. /// details.
  63. template <class F>
  64. void start(F func);
  65. bool joinable() noexcept;
  66. void join();
  67. // If supported by the platform, set the name of the calling thread (mainly
  68. // for debugging purposes). The name will be silently clamped to whatever
  69. // limit the platform places on these names. Linux places a limit of 15
  70. // characters for these names.
  71. static void set_name(const std::string&);
  72. // If supported by the platform, this function assigns the name of the
  73. // calling thread to \a name, and returns true, otherwise it does nothing
  74. // and returns false.
  75. static bool get_name(std::string& name) noexcept;
  76. private:
  77. #ifdef _WIN32
  78. std::thread m_std_thread;
  79. #else
  80. pthread_t m_id;
  81. #endif
  82. bool m_joinable;
  83. typedef void* (*entry_func_type)(void*);
  84. void start(entry_func_type, void* arg);
  85. template <class>
  86. static void* entry_point(void*) noexcept;
  87. REALM_NORETURN static void create_failed(int);
  88. REALM_NORETURN static void join_failed(int);
  89. };
  90. /// Low-level mutual exclusion device.
  91. class Mutex {
  92. public:
  93. Mutex();
  94. ~Mutex() noexcept;
  95. struct process_shared_tag {
  96. };
  97. /// Initialize this mutex for use across multiple processes. When
  98. /// constructed this way, the instance may be placed in memory
  99. /// shared by multiple processes, as well as in a memory mapped
  100. /// file. Such a mutex remains valid even after the constructing
  101. /// process terminates. Deleting the instance (freeing the memory
  102. /// or deleting the file) without first calling the destructor is
  103. /// legal and will not cause any system resources to be leaked.
  104. Mutex(process_shared_tag);
  105. // Disable copying.
  106. Mutex(const Mutex&) = delete;
  107. Mutex& operator=(const Mutex&) = delete;
  108. friend class LockGuard;
  109. friend class UniqueLock;
  110. friend class InterprocessCondVar;
  111. void lock() noexcept;
  112. bool try_lock() noexcept;
  113. void unlock() noexcept;
  114. protected:
  115. #ifdef _WIN32
  116. // Used for non-process-shared mutex. We only know at runtime whether or not to use it, depending on if we call
  117. // Mutex::Mutex(process_shared_tag)
  118. CRITICAL_SECTION m_critical_section;
  119. #else
  120. pthread_mutex_t m_impl = PTHREAD_MUTEX_INITIALIZER;
  121. #endif
  122. struct no_init_tag {
  123. };
  124. Mutex(no_init_tag)
  125. {
  126. }
  127. void init_as_regular();
  128. void init_as_process_shared(bool robust_if_available);
  129. REALM_NORETURN static void init_failed(int);
  130. REALM_NORETURN static void attr_init_failed(int);
  131. REALM_NORETURN static void destroy_failed(int) noexcept;
  132. REALM_NORETURN static void lock_failed(int) noexcept;
  133. private:
  134. friend class CondVar;
  135. friend class RobustMutex;
  136. };
  137. /// A simple mutex ownership wrapper.
  138. class LockGuard {
  139. public:
  140. LockGuard(Mutex&) noexcept;
  141. ~LockGuard() noexcept;
  142. private:
  143. Mutex& m_mutex;
  144. friend class CondVar;
  145. };
  146. /// See UniqueLock.
  147. struct defer_lock_tag {
  148. };
  149. /// A general-purpose mutex ownership wrapper supporting deferred
  150. /// locking as well as repeated unlocking and relocking.
  151. class UniqueLock {
  152. public:
  153. UniqueLock(Mutex&) noexcept;
  154. UniqueLock(Mutex&, defer_lock_tag) noexcept;
  155. ~UniqueLock() noexcept;
  156. void lock() noexcept;
  157. void unlock() noexcept;
  158. bool holds_lock() noexcept;
  159. private:
  160. Mutex* m_mutex;
  161. bool m_is_locked;
  162. };
  163. /// A robust version of a process-shared mutex.
  164. ///
  165. /// A robust mutex is one that detects whether a thread (or process)
  166. /// has died while holding a lock on the mutex.
  167. ///
  168. /// When the present platform does not offer support for robust
  169. /// mutexes, this mutex class behaves as a regular process-shared
  170. /// mutex, which means that if a thread dies while holding a lock, any
  171. /// future attempt at locking will block indefinitely.
  172. class RobustMutex : private Mutex {
  173. public:
  174. RobustMutex();
  175. ~RobustMutex() noexcept;
  176. #ifdef REALM_HAVE_ROBUST_PTHREAD_MUTEX
  177. constexpr static bool is_robust_on_this_platform = true;
  178. #else
  179. constexpr static bool is_robust_on_this_platform = false;
  180. #endif
  181. class NotRecoverable;
  182. /// \param recover_func If the present platform does not support
  183. /// robust mutexes, this function is never called. Otherwise it is
  184. /// called if, and only if a thread has died while holding a
  185. /// lock. The purpose of the function is to reestablish a
  186. /// consistent shared state. If it fails to do this by throwing an
  187. /// exception, the mutex enters the 'unrecoverable' state where
  188. /// any future attempt at locking it will fail and cause
  189. /// NotRecoverable to be thrown. This function is advised to throw
  190. /// NotRecoverable when it fails, but it may throw any exception.
  191. ///
  192. /// \throw NotRecoverable If thrown by the specified recover
  193. /// function, or if the mutex has entered the 'unrecoverable'
  194. /// state due to a different thread throwing from its recover
  195. /// function.
  196. template <class Func>
  197. void lock(Func recover_func);
  198. template <class Func>
  199. bool try_lock(Func recover_func);
  200. void unlock() noexcept;
  201. /// Low-level locking of robust mutex.
  202. ///
  203. /// If the present platform does not support robust mutexes, this
  204. /// function always returns true. Otherwise it returns false if,
  205. /// and only if a thread has died while holding a lock.
  206. ///
  207. /// \note Most application should never call this function
  208. /// directly. It is called automatically when using the ordinary
  209. /// lock() function.
  210. ///
  211. /// \throw NotRecoverable If this mutex has entered the "not
  212. /// recoverable" state. It enters this state if
  213. /// mark_as_consistent() is not called between a call to
  214. /// robust_lock() that returns false and the corresponding call to
  215. /// unlock().
  216. bool low_level_lock();
  217. /// Low-level try-lock of robust mutex
  218. ///
  219. /// If the present platform does not support robust mutexes, this
  220. /// function always returns 0 or 1. Otherwise it returns -1 if,
  221. /// and only if a thread has died while holding a lock.
  222. ///
  223. /// Returns 1 if the lock is succesfully obtained.
  224. /// Returns 0 if the lock is held by somebody else (not obtained)
  225. /// Returns -1 if a thread has died while holding a lock.
  226. ///
  227. /// \note Most application should never call this function
  228. /// directly. It is called automatically when using the ordinary
  229. /// lock() function.
  230. ///
  231. /// \throw NotRecoverable If this mutex has entered the "not
  232. /// recoverable" state. It enters this state if
  233. /// mark_as_consistent() is not called between a call to
  234. /// robust_lock() that returns false and the corresponding call to
  235. /// unlock().
  236. int try_low_level_lock();
  237. /// Pull this mutex out of the 'inconsistent' state.
  238. ///
  239. /// Must be called only after low_level_lock() has returned false.
  240. ///
  241. /// \note Most application should never call this function
  242. /// directly. It is called automatically when using the ordinary
  243. /// lock() function.
  244. void mark_as_consistent() noexcept;
  245. /// Attempt to check if this mutex is a valid object.
  246. ///
  247. /// This attempts to trylock() the mutex, and if that fails returns false if
  248. /// the return value indicates that the low-level mutex is invalid (which is
  249. /// distinct from 'inconsistent'). Although pthread_mutex_trylock() may
  250. /// return EINVAL if the argument is not an initialized mutex object, merely
  251. /// attempting to check if an arbitrary blob of memory is a mutex object may
  252. /// involve undefined behavior, so it is only safe to assume that this
  253. /// function will run correctly when it is known that the mutex object is
  254. /// valid.
  255. bool is_valid() noexcept;
  256. friend class CondVar;
  257. };
  258. class RobustMutex::NotRecoverable : public std::exception {
  259. public:
  260. const char* what() const noexcept override
  261. {
  262. return "Failed to recover consistent state of shared memory";
  263. }
  264. };
  265. /// A simple robust mutex ownership wrapper.
  266. class RobustLockGuard {
  267. public:
  268. /// \param m the mutex to guard
  269. /// \param func See RobustMutex::lock().
  270. template <class TFunc>
  271. RobustLockGuard(RobustMutex& m, TFunc func);
  272. ~RobustLockGuard() noexcept;
  273. private:
  274. RobustMutex& m_mutex;
  275. friend class CondVar;
  276. };
  277. /// Condition variable for use in synchronization monitors.
  278. class CondVar {
  279. public:
  280. CondVar();
  281. ~CondVar() noexcept;
  282. struct process_shared_tag {
  283. };
  284. /// Initialize this condition variable for use across multiple
  285. /// processes. When constructed this way, the instance may be
  286. /// placed in memory shared by multimple processes, as well as in
  287. /// a memory mapped file. Such a condition variable remains valid
  288. /// even after the constructing process terminates. Deleting the
  289. /// instance (freeing the memory or deleting the file) without
  290. /// first calling the destructor is legal and will not cause any
  291. /// system resources to be leaked.
  292. CondVar(process_shared_tag);
  293. /// Wait for another thread to call notify() or notify_all().
  294. void wait(LockGuard& l) noexcept;
  295. template <class Func>
  296. void wait(RobustMutex& m, Func recover_func, const struct timespec* tp = nullptr);
  297. /// If any threads are wating for this condition, wake up at least
  298. /// one.
  299. void notify() noexcept;
  300. /// Wake up every thread that is currently wating on this
  301. /// condition.
  302. void notify_all() noexcept;
  303. private:
  304. #ifdef _WIN32
  305. CONDITION_VARIABLE m_condvar = CONDITION_VARIABLE_INIT;
  306. #else
  307. pthread_cond_t m_impl;
  308. #endif
  309. REALM_NORETURN static void init_failed(int);
  310. REALM_NORETURN static void attr_init_failed(int);
  311. REALM_NORETURN static void destroy_failed(int) noexcept;
  312. void handle_wait_error(int error);
  313. };
  314. class RaceDetector {
  315. std::atomic<bool> busy;
  316. public:
  317. RaceDetector()
  318. {
  319. busy.store(false);
  320. }
  321. void enter()
  322. {
  323. bool already_busy = busy.exchange(true, std::memory_order_acq_rel);
  324. if (already_busy)
  325. throw std::runtime_error("Race detected - critical section busy on entry");
  326. }
  327. void leave()
  328. {
  329. busy.store(false, std::memory_order_release);
  330. }
  331. friend class CriticalSection;
  332. };
  333. class CriticalSection {
  334. RaceDetector& rd;
  335. public:
  336. CriticalSection(RaceDetector& race)
  337. : rd(race)
  338. {
  339. rd.enter();
  340. }
  341. ~CriticalSection()
  342. {
  343. rd.leave();
  344. }
  345. };
  346. // Implementation:
  347. inline Thread::Thread()
  348. : m_joinable(false)
  349. {
  350. }
  351. template <class F>
  352. inline Thread::Thread(F func)
  353. : m_joinable(true)
  354. {
  355. std::unique_ptr<F> func2(new F(func)); // Throws
  356. start(&Thread::entry_point<F>, func2.get()); // Throws
  357. func2.release();
  358. }
  359. inline Thread::Thread(Thread&& thread) noexcept
  360. {
  361. #ifndef _WIN32
  362. m_id = thread.m_id;
  363. m_joinable = thread.m_joinable;
  364. thread.m_joinable = false;
  365. #endif
  366. }
  367. template <class F>
  368. inline void Thread::start(F func)
  369. {
  370. if (m_joinable)
  371. std::terminate();
  372. std::unique_ptr<F> func2(new F(func)); // Throws
  373. start(&Thread::entry_point<F>, func2.get()); // Throws
  374. func2.release();
  375. m_joinable = true;
  376. }
  377. inline Thread::~Thread() noexcept
  378. {
  379. if (m_joinable)
  380. REALM_TERMINATE("Destruction of joinable thread");
  381. }
  382. inline bool Thread::joinable() noexcept
  383. {
  384. return m_joinable;
  385. }
  386. inline void Thread::start(entry_func_type entry_func, void* arg)
  387. {
  388. #ifdef _WIN32
  389. m_std_thread = std::thread(entry_func, arg);
  390. #else
  391. const pthread_attr_t* attr = nullptr; // Use default thread attributes
  392. int r = pthread_create(&m_id, attr, entry_func, arg);
  393. if (REALM_UNLIKELY(r != 0))
  394. create_failed(r); // Throws
  395. #endif
  396. }
  397. template <class F>
  398. inline void* Thread::entry_point(void* cookie) noexcept
  399. {
  400. std::unique_ptr<F> func(static_cast<F*>(cookie));
  401. try {
  402. (*func)();
  403. }
  404. catch (...) {
  405. std::terminate();
  406. }
  407. return 0;
  408. }
  409. inline Mutex::Mutex()
  410. {
  411. init_as_regular();
  412. }
  413. inline Mutex::Mutex(process_shared_tag)
  414. {
  415. bool robust_if_available = false;
  416. init_as_process_shared(robust_if_available);
  417. }
  418. inline Mutex::~Mutex() noexcept
  419. {
  420. #ifndef _WIN32
  421. int r = pthread_mutex_destroy(&m_impl);
  422. if (REALM_UNLIKELY(r != 0))
  423. destroy_failed(r);
  424. #else
  425. DeleteCriticalSection(&m_critical_section);
  426. #endif
  427. }
  428. inline void Mutex::init_as_regular()
  429. {
  430. #ifndef _WIN32
  431. int r = pthread_mutex_init(&m_impl, 0);
  432. if (REALM_UNLIKELY(r != 0))
  433. init_failed(r);
  434. #else
  435. InitializeCriticalSection(&m_critical_section);
  436. #endif
  437. }
  438. inline void Mutex::lock() noexcept
  439. {
  440. #ifdef _WIN32
  441. EnterCriticalSection(&m_critical_section);
  442. #else
  443. int r = pthread_mutex_lock(&m_impl);
  444. if (REALM_LIKELY(r == 0))
  445. return;
  446. lock_failed(r);
  447. #endif
  448. }
  449. inline bool Mutex::try_lock() noexcept
  450. {
  451. #ifdef _WIN32
  452. return TryEnterCriticalSection(&m_critical_section);
  453. #else
  454. int r = pthread_mutex_trylock(&m_impl);
  455. if (r == EBUSY) {
  456. return false;
  457. }
  458. else if (r == 0) {
  459. return true;
  460. }
  461. lock_failed(r);
  462. #endif
  463. }
  464. inline void Mutex::unlock() noexcept
  465. {
  466. #ifdef _WIN32
  467. LeaveCriticalSection(&m_critical_section);
  468. #else
  469. int r = pthread_mutex_unlock(&m_impl);
  470. REALM_ASSERT(r == 0);
  471. #endif
  472. }
  473. inline LockGuard::LockGuard(Mutex& m) noexcept
  474. : m_mutex(m)
  475. {
  476. m_mutex.lock();
  477. }
  478. inline LockGuard::~LockGuard() noexcept
  479. {
  480. m_mutex.unlock();
  481. }
  482. inline UniqueLock::UniqueLock(Mutex& m) noexcept
  483. : m_mutex(&m)
  484. {
  485. m_mutex->lock();
  486. m_is_locked = true;
  487. }
  488. inline UniqueLock::UniqueLock(Mutex& m, defer_lock_tag) noexcept
  489. : m_mutex(&m)
  490. {
  491. m_is_locked = false;
  492. }
  493. inline UniqueLock::~UniqueLock() noexcept
  494. {
  495. if (m_is_locked)
  496. m_mutex->unlock();
  497. }
  498. inline bool UniqueLock::holds_lock() noexcept
  499. {
  500. return m_is_locked;
  501. }
  502. inline void UniqueLock::lock() noexcept
  503. {
  504. m_mutex->lock();
  505. m_is_locked = true;
  506. }
  507. inline void UniqueLock::unlock() noexcept
  508. {
  509. m_mutex->unlock();
  510. m_is_locked = false;
  511. }
  512. template <typename TFunc>
  513. inline RobustLockGuard::RobustLockGuard(RobustMutex& m, TFunc func)
  514. : m_mutex(m)
  515. {
  516. m_mutex.lock(func);
  517. }
  518. inline RobustLockGuard::~RobustLockGuard() noexcept
  519. {
  520. m_mutex.unlock();
  521. }
  522. inline RobustMutex::RobustMutex()
  523. : Mutex(no_init_tag())
  524. {
  525. bool robust_if_available = true;
  526. init_as_process_shared(robust_if_available);
  527. }
  528. inline RobustMutex::~RobustMutex() noexcept
  529. {
  530. }
  531. template <class Func>
  532. inline void RobustMutex::lock(Func recover_func)
  533. {
  534. bool no_thread_has_died = low_level_lock(); // Throws
  535. if (REALM_LIKELY(no_thread_has_died))
  536. return;
  537. try {
  538. recover_func(); // Throws
  539. mark_as_consistent();
  540. // If we get this far, the protected memory has been
  541. // brought back into a consistent state, and the mutex has
  542. // been notified about this. This means that we can safely
  543. // enter the applications critical section.
  544. }
  545. catch (...) {
  546. // Unlocking without first calling mark_as_consistent()
  547. // means that the mutex enters the "not recoverable"
  548. // state, which will cause all future attempts at locking
  549. // to fail.
  550. unlock();
  551. throw;
  552. }
  553. }
  554. template <class Func>
  555. inline bool RobustMutex::try_lock(Func recover_func)
  556. {
  557. int lock_result = try_low_level_lock(); // Throws
  558. if (lock_result == 0) return false;
  559. bool no_thread_has_died = lock_result == 1;
  560. if (REALM_LIKELY(no_thread_has_died))
  561. return true;
  562. try {
  563. recover_func(); // Throws
  564. mark_as_consistent();
  565. // If we get this far, the protected memory has been
  566. // brought back into a consistent state, and the mutex has
  567. // been notified aboit this. This means that we can safely
  568. // enter the applications critical section.
  569. }
  570. catch (...) {
  571. // Unlocking without first calling mark_as_consistent()
  572. // means that the mutex enters the "not recoverable"
  573. // state, which will cause all future attempts at locking
  574. // to fail.
  575. unlock();
  576. throw;
  577. }
  578. return true;
  579. }
  580. inline void RobustMutex::unlock() noexcept
  581. {
  582. Mutex::unlock();
  583. }
  584. inline CondVar::CondVar()
  585. {
  586. #ifndef _WIN32
  587. int r = pthread_cond_init(&m_impl, 0);
  588. if (REALM_UNLIKELY(r != 0))
  589. init_failed(r);
  590. #endif
  591. }
  592. inline CondVar::~CondVar() noexcept
  593. {
  594. #ifndef _WIN32
  595. int r = pthread_cond_destroy(&m_impl);
  596. if (REALM_UNLIKELY(r != 0))
  597. destroy_failed(r);
  598. #endif
  599. }
  600. inline void CondVar::wait(LockGuard& l) noexcept
  601. {
  602. #ifdef _WIN32
  603. SleepConditionVariableCS(&m_condvar, &l.m_mutex.m_critical_section, INFINITE);
  604. #else
  605. int r = pthread_cond_wait(&m_impl, &l.m_mutex.m_impl);
  606. if (REALM_UNLIKELY(r != 0))
  607. REALM_TERMINATE("pthread_cond_wait() failed");
  608. #endif
  609. }
  610. template <class Func>
  611. inline void CondVar::wait(RobustMutex& m, Func recover_func, const struct timespec* tp)
  612. {
  613. int r;
  614. if (!tp) {
  615. #ifdef _WIN32
  616. if (!SleepConditionVariableCS(&m_condvar, &m.m_critical_section, INFINITE))
  617. r = GetLastError();
  618. else
  619. r = 0;
  620. #else
  621. r = pthread_cond_wait(&m_impl, &m.m_impl);
  622. #endif
  623. }
  624. else {
  625. #ifdef _WIN32
  626. if (!SleepConditionVariableCS(&m_condvar, &m.m_critical_section, tp->tv_sec / 1000)) {
  627. r = GetLastError();
  628. if (r == ERROR_TIMEOUT)
  629. return;
  630. } else {
  631. r = 0;
  632. }
  633. #else
  634. r = pthread_cond_timedwait(&m_impl, &m.m_impl, tp);
  635. if (r == ETIMEDOUT)
  636. return;
  637. #endif
  638. }
  639. if (REALM_LIKELY(r == 0))
  640. return;
  641. handle_wait_error(r);
  642. try {
  643. recover_func(); // Throws
  644. m.mark_as_consistent();
  645. // If we get this far, the protected memory has been
  646. // brought back into a consistent state, and the mutex has
  647. // been notified aboit this. This means that we can safely
  648. // enter the applications critical section.
  649. }
  650. catch (...) {
  651. // Unlocking without first calling mark_as_consistent()
  652. // means that the mutex enters the "not recoverable"
  653. // state, which will cause all future attempts at locking
  654. // to fail.
  655. m.unlock();
  656. throw;
  657. }
  658. }
  659. inline void CondVar::notify() noexcept
  660. {
  661. #ifdef _WIN32
  662. WakeConditionVariable(&m_condvar);
  663. #else
  664. int r = pthread_cond_signal(&m_impl);
  665. REALM_ASSERT(r == 0);
  666. #endif
  667. }
  668. inline void CondVar::notify_all() noexcept
  669. {
  670. #ifdef _WIN32
  671. WakeAllConditionVariable(&m_condvar);
  672. #else
  673. int r = pthread_cond_broadcast(&m_impl);
  674. REALM_ASSERT(r == 0);
  675. #endif
  676. }
  677. // helpers which can ensure atomic access to memory which has not itself been declared atomic.
  678. // This can be used to e.g. ensure atomic access to members of a vector. Vectors does not
  679. // fully allow atomic members because operations on vector may relocate the underlying memory.
  680. // use with care!
  681. template <typename T>
  682. T load_atomic(T& t_ref, std::memory_order order)
  683. {
  684. std::atomic<T>* t_ptr = reinterpret_cast<std::atomic<T>*>(&t_ref);
  685. T t = atomic_load_explicit(t_ptr, order);
  686. return t;
  687. }
  688. template <typename T>
  689. void store_atomic(T& t_ref, T value, std::memory_order order)
  690. {
  691. std::atomic<T>* t_ptr = reinterpret_cast<std::atomic<T>*>(&t_ref);
  692. atomic_store_explicit(t_ptr, value, order);
  693. }
  694. } // namespace util
  695. } // namespace realm
  696. #endif // REALM_UTIL_THREAD_HPP