bitcoin_sync/sync_impl.rs
1/*!
2 | The Simple Definition:
3 |
4 | RecursiveMutex mutex;
5 | std::recursive_mutex mutex;
6 |
7 | LOCK(mutex);
8 | std::unique_lock<std::recursive_mutex> criticalblock(mutex);
9 |
10 | LOCK2(mutex1, mutex2);
11 | std::unique_lock<std::recursive_mutex> criticalblock1(mutex1);
12 | std::unique_lock<std::recursive_mutex> criticalblock2(mutex2);
13 |
14 | TRY_LOCK(mutex, name);
15 | std::unique_lock<std::recursive_mutex> name(mutex, std::try_to_lock_t);
16 |
17 | ENTER_CRITICAL_SECTION(mutex); // no RAII
18 | mutex.lock();
19 |
20 | LEAVE_CRITICAL_SECTION(mutex); // no RAII
21 | mutex.unlock();
22 */
23
24crate::ix!();
25
26//-------------------------------------------[.cpp/bitcoin/src/sync.h]
27
28/* --------- THE ACTUAL IMPLEMENTATION --------- */
29
30#[macro_export] macro_rules! assert_lock_held {
31 ($cs:ident) => {
32 /*
33 AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
34 */
35 }
36}
37
38#[macro_export] macro_rules! assert_lock_not_held {
39 ($cs:ident) => {
40 /*
41 AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs)
42 */
43 }
44}
45
46/**
47 | Template mixin that adds -Wthread-safety
48 | locking annotations and lock order
49 | checking to a subset of the mutex API.
50 |
51 */
52#[LOCKABLE]
53pub struct AnnotatedMixin<PARENT> {
54 base: PARENT,
55}
56
57impl<PARENT> Drop for AnnotatedMixin<PARENT> {
58 fn drop(&mut self) {
59 todo!();
60 /*
61 DeleteLock((c_void*)this);
62 */
63 }
64}
65
66pub mod annotated_mixin {
67 pub type UniqueLock<PARENT> = super::UniqueLock<PARENT>;
68}
69
70impl<PARENT> Not for AnnotatedMixin<PARENT> {
71 type Output = AnnotatedMixin<PARENT>;
72
73 /**
74 | For negative capabilities in the Clang
75 | Thread Safety Analysis.
76 |
77 | A negative requirement uses the
78 | EXCLUSIVE_LOCKS_REQUIRED attribute, in
79 | conjunction with the ! operator, to
80 | indicate that a mutex should not be held.
81 */
82 #[inline] fn not(self) -> Self::Output {
83 todo!();
84 /*
85 return *this;
86 */
87 }
88}
89
90impl<PARENT> AnnotatedMixin<PARENT> {
91
92 #[EXCLUSIVE_LOCK_FUNCTION()]
93 pub fn lock(&mut self) {
94
95 todo!();
96 /*
97 PARENT::lock();
98 */
99 }
100
101 #[UNLOCK_FUNCTION()]
102 pub fn unlock(&mut self) {
103
104 todo!();
105 /*
106 PARENT::unlock();
107 */
108 }
109
110 #[EXCLUSIVE_TRYLOCK_FUNCTION(true)]
111 pub fn try_lock(&mut self) -> bool {
112
113 todo!();
114 /*
115 return PARENT::try_lock();
116 */
117 }
118}
119
120/**
121 | Wrapped mutex: supports recursive
122 | locking, but no waiting
123 |
124 | TODO: We should move away from using
125 | the recursive lock by default.
126 |
127 */
128pub type RecursiveMutex<T> = AnnotatedMixin<parking_lot::ReentrantMutex<T>>;
129
130/**
131 | Wrapped mutex: supports waiting but
132 | not recursive locking
133 |
134 */
135pub type Mutex = AnnotatedMixin<parking_lot::RawMutex>;
136
137pub type MutexUniqueLock = Broken;
138
139/**
140 | Wrapper around std::unique_lock style
141 | lock for Mutex.
142 |
143 */
144#[SCOPED_LOCKABLE]
145#[derive(Default)]
146pub struct UniqueLock<Base = MutexUniqueLock> {
147 base: Base,
148}
149
150pub mod unique_lock {
151 use super::*;
152
153 /**
154 | An RAII-style reverse lock. Unlocks
155 | on construction and locks on destruction.
156 |
157 */
158 pub struct ReverseLock<Base> {
159 lock: Rc<RefCell<UniqueLock<Base>>>,
160 templock: UniqueLock<Base>,
161 lockname: String,
162 file: String,
163 line: i32,
164 }
165
166 impl<Base> Drop for ReverseLock<Base> {
167 fn drop(&mut self) {
168 todo!();
169 /*
170 templock.swap(lock);
171 EnterCritical(lockname.c_str(), file.c_str(), line, lock.mutex());
172 lock.lock();
173 */
174 }
175 }
176
177 impl<Base> ReverseLock<Base> {
178
179 pub fn new(
180 lock: &mut UniqueLock<Base>,
181 guardname: *const u8,
182 file: *const u8,
183 line: i32) -> Self {
184
185 todo!();
186 /*
187
188
189 : lock(_lock), file(_file), line(_line)
190 CheckLastCritical((c_void*)lock.mutex(), lockname, _guardname, _file, _line);
191 lock.unlock();
192 LeaveCritical();
193 lock.swap(templock);
194 */
195 }
196 }
197}
198
199impl<Base> Drop for UniqueLock<Base> {
200
201 #[UNLOCK_FUNCTION()]
202 fn drop(&mut self) {
203 todo!();
204 /*
205 if (Base::owns_lock())
206 LeaveCritical();
207 */
208 }
209}
210
211impl<Base> Into<bool> for UniqueLock<Base> {
212
213 #[inline] fn into(self) -> bool {
214 todo!();
215 /*
216 return Base::owns_lock();
217 */
218 }
219}
220
221impl<Base> UniqueLock<Base> {
222
223 pub fn enter(&mut self,
224 psz_name: *const u8,
225 psz_file: *const u8,
226 n_line: i32) {
227
228 todo!();
229 /*
230 EnterCritical(pszName, pszFile, nLine, Base::mutex());
231 if (Base::try_lock()) return;
232 LOG_TIME_MICROS_WITH_CATEGORY(strprintf("lock contention %s, %s:%d", pszName, pszFile, nLine), BCLog::LOCK);
233 Base::lock();
234 */
235 }
236
237 pub fn try_enter(&mut self,
238 psz_name: *const u8,
239 psz_file: *const u8,
240 n_line: i32) -> bool {
241
242 todo!();
243 /*
244 EnterCritical(pszName, pszFile, nLine, Base::mutex(), true);
245 Base::try_lock();
246 if (!Base::owns_lock()) {
247 LeaveCritical();
248 }
249 return Base::owns_lock();
250 */
251 }
252
253 #[EXCLUSIVE_LOCK_FUNCTION(mutexIn)]
254 pub fn new(
255 mutex_in: &mut Mutex,
256 psz_name: *const u8,
257 psz_file: *const u8,
258 n_line: i32,
259 try_: Option<bool>) -> Self {
260 let try_: bool = try_.unwrap_or(false);
261 todo!();
262 /*
263 : base(mutexIn, std::defer_lock),
264
265 if (fTry)
266 TryEnter(pszName, pszFile, nLine);
267 else
268 Enter(pszName, pszFile, nLine);
269 */
270 }
271}
272
273#[macro_export] macro_rules! reverse_lock {
274 ($g:ident) => {
275 /*
276 typename std::decay<decltype(g)>::type::reverse_lock PASTE2(revlock, __COUNTER__)(g, #g, __FILE__, __LINE__)
277 */
278 }
279}
280
281pub type DebugLock<MutexArg> = UniqueLock<RemoveReference<RemovePointer<MutexArg>>>;
282
283#[macro_export] macro_rules! lock {
284 ($cs:expr) => {
285 /*
286 DebugLock<decltype(cs)> PASTE2(criticalblock, __COUNTER__)(cs, #cs, __FILE__, __LINE__)
287 */
288 }
289}
290
291#[macro_export] macro_rules! lock2 {
292 ($cs1:expr, $cs2:expr) => {
293 /*
294
295 DebugLock<decltype(cs1)> criticalblock1(cs1, #cs1, __FILE__, __LINE__);
296 DebugLock<decltype(cs2)> criticalblock2(cs2, #cs2, __FILE__, __LINE__);
297 */
298 }
299}
300
301#[macro_export] macro_rules! try_lock {
302 ($cs:expr, $name:expr) => {
303 /*
304 DebugLock<decltype(cs)> name(cs, #cs, __FILE__, __LINE__, true)
305 */
306 }
307}
308
309#[macro_export] macro_rules! wait_lock {
310 ($cs:expr, $name:expr) => {
311 /*
312 DebugLock<decltype(cs)> name(cs, #cs, __FILE__, __LINE__)
313 */
314 }
315}
316
317#[macro_export] macro_rules! enter_critical_section {
318 ($cs:expr) => {
319 /*
320
321 {
322 EnterCritical(#cs, __FILE__, __LINE__, &cs);
323 (cs).lock();
324 }
325 */
326 }
327}
328
329#[macro_export] macro_rules! leave_critical_section {
330 ($cs:expr) => {
331 /*
332
333 {
334 std::string lockname;
335 CheckLastCritical((c_void*)(&cs), lockname, #cs, __FILE__, __LINE__);
336 (cs).unlock();
337 LeaveCritical();
338 }
339 */
340 }
341}
342
343/**
344 | Run code while locking a mutex.
345 |
346 | Examples:
347 |
348 -------------------------
349 |WITH_LOCK(cs, shared_val = shared_val + 1);
350 |
351 | int val = WITH_LOCK(cs, return shared_val);
352 |
353 |
354 -------------------------
355 | Note:
356 |
357 | Since the return type deduction follows that
358 | of decltype(auto), while the deduced type of:
359 |
360 | WITH_LOCK(cs, return {int i = 1; return i;});
361 |
362 | is int, the deduced type of:
363 |
364 | WITH_LOCK(cs, return {int j = 1; return (j);});
365 |
366 | is &int, a reference to a local variable
367 |
368 | The above is detectable at compile-time with
369 | the -Wreturn-local-addr flag in gcc and the
370 | -Wreturn-stack-address flag in clang, both
371 | enabled by default.
372 */
373#[macro_export] macro_rules! with_lock {
374 ($cs:expr, $code:expr) => {
375 /*
376 [&]() -> decltype(auto) { LOCK(cs); code; }()
377 */
378 }
379}
380
381///------------------------------
382pub struct Semaphore {
383 condition: std::sync::Condvar,
384 mutex: parking_lot::RawMutex,
385 value: i32,
386}
387
388impl Semaphore {
389
390 pub fn new(init: i32) -> Self {
391
392 todo!();
393 /*
394 : value(init),
395
396
397 */
398 }
399
400 pub fn wait(&mut self) {
401
402 todo!();
403 /*
404 std::unique_lock<std::mutex> lock(mutex);
405 condition.wait(lock, [&]() { return value >= 1; });
406 value--;
407 */
408 }
409
410 pub fn try_wait(&mut self) -> bool {
411
412 todo!();
413 /*
414 std::lock_guard<std::mutex> lock(mutex);
415 if (value < 1)
416 return false;
417 value--;
418 return true;
419 */
420 }
421
422 pub fn post(&mut self) {
423
424 todo!();
425 /*
426 {
427 std::lock_guard<std::mutex> lock(mutex);
428 value++;
429 }
430 condition.notify_one();
431 */
432 }
433}
434
435/**
436 | RAII-style semaphore lock
437 |
438 */
439#[derive(Clone)]
440pub struct SemaphoreGrant {
441 sem: Amo<Semaphore>,
442 have_grant: bool,
443}
444
445impl Drop for SemaphoreGrant {
446 fn drop(&mut self) {
447 todo!();
448 /*
449 Release();
450 */
451 }
452}
453
454impl Into<bool> for &SemaphoreGrant {
455
456 #[inline] fn into(self) -> bool {
457 todo!();
458 /*
459 return fHaveGrant;
460 */
461 }
462}
463
464impl Default for SemaphoreGrant {
465
466 fn default() -> Self {
467 todo!();
468 /*
469 : sem(nullptr),
470 : have_grant(false),
471
472
473 */
474 }
475}
476
477impl SemaphoreGrant {
478
479 pub fn acquire(&mut self) {
480
481 todo!();
482 /*
483 if (fHaveGrant)
484 return;
485 sem->wait();
486 fHaveGrant = true;
487 */
488 }
489
490 pub fn release(&mut self) {
491
492 todo!();
493 /*
494 if (!fHaveGrant)
495 return;
496 sem->post();
497 fHaveGrant = false;
498 */
499 }
500
501 pub fn try_acquire(&mut self) -> bool {
502
503 todo!();
504 /*
505 if (!fHaveGrant && sem->try_wait())
506 fHaveGrant = true;
507 return fHaveGrant;
508 */
509 }
510
511 pub fn move_to(&mut self, grant: &mut SemaphoreGrant) {
512
513 todo!();
514 /*
515 grant.Release();
516 grant.sem = sem;
517 grant.fHaveGrant = fHaveGrant;
518 fHaveGrant = false;
519 */
520 }
521
522
523 pub fn new(
524 sema: Amo<Semaphore>,
525 try_: Option<bool>) -> Self {
526 let try_:bool = try_.unwrap_or(false);
527 todo!();
528 /*
529 : sem(&sema),
530 : have_grant(false),
531
532 if (fTry)
533 TryAcquire();
534 else
535 Acquire();
536 */
537 }
538}
539
540//-------------------------------------------[.cpp/bitcoin/src/sync.cpp]
541#[cfg(DEBUG_LOCKORDER)]
542pub use debug_lockorder::*;
543
544#[cfg(not(DEBUG_LOCKORDER))]
545pub use debug_lockorder_noop::*;
546
547#[cfg(not(DEBUG_LOCKORDER))]
548pub mod debug_lockorder_noop {
549 use super::*;
550
551 #[inline] pub fn enter_critical<MutexType>(
552 psz_name: *const u8,
553 psz_file: *const u8,
554 n_line: i32,
555 cs: *mut MutexType,
556 try_: bool) { }
557
558 #[inline] pub fn leave_critical() { }
559
560 #[inline] pub fn check_last_critical(
561 cs: *mut c_void,
562 lockname: &mut String,
563 guardname: *const u8,
564 file: *const u8,
565 line: i32) { }
566
567 #[EXCLUSIVE_LOCKS_REQUIRED(cs)]
568 #[inline] pub fn assert_lock_held_internal<MutexType>(
569 psz_name: *const u8,
570 psz_file: *const u8,
571 n_line: i32,
572 cs: *mut MutexType) { }
573
574 #[LOCKS_EXCLUDED(cs)]
575 pub fn assert_lock_not_held_internal<MutexType>(
576 psz_name: *const u8,
577 psz_file: *const u8,
578 n_line: i32,
579 cs: *mut MutexType) { }
580
581 #[inline] pub fn delete_lock(cs: *mut c_void) { }
582
583 #[inline] pub fn lock_stack_empty() -> bool {
584
585 todo!();
586 /*
587 return true;
588 */
589 }
590}
591
592#[cfg(DEBUG_LOCKORDER)]
593mod debug_lockorder {
594
595 pub fn check_last_critical(
596 cs: *mut c_void,
597 lockname: &mut String,
598 guardname: *const u8,
599 file: *const u8,
600 line: i32) {
601
602 todo!();
603 /*
604
605 */
606 }
607
608 pub fn locks_held() -> String {
609
610 todo!();
611 /*
612
613 */
614 }
615
616 #[EXCLUSIVE_LOCKS_REQUIRED(cs)]
617 pub fn assert_lock_held_internal<MutexType>(
618 psz_name: *const u8,
619 psz_file: *const u8,
620 n_line: i32,
621 cs: *mut MutexType) {
622
623 todo!();
624 /*
625
626 */
627 }
628
629 #[LOCKS_EXCLUDED(cs)]
630 pub fn assert_lock_not_held_internal<MutexType>(
631 psz_name: *const u8,
632 psz_file: *const u8,
633 n_line: i32,
634 cs: *mut MutexType) {
635
636 todo!();
637 /*
638
639 */
640 }
641
642 pub fn delete_lock(cs: *mut c_void) {
643
644 todo!();
645 /*
646
647 */
648 }
649
650 pub fn lock_stack_empty() -> bool {
651
652 todo!();
653 /*
654
655 */
656 }
657
658 /**
659 | Call abort() if a potential lock order
660 | deadlock bug is detected, instead of
661 | just logging information and throwing
662 | a logic_error.
663 |
664 | Defaults to true, and set to false in
665 | DEBUG_LOCKORDER unit tests.
666 |
667 */
668 lazy_static!{
669 /*
670 extern bool g_debug_lockorder_abort;
671 */
672 }
673
674 /**
675 | Early deadlock detection.
676 | Problem being solved:
677 | Thread 1 locks A, then B, then C
678 | Thread 2 locks D, then C, then A
679 | --> may result in deadlock between the two
680 | threads, depending on when they run.
681 |
682 | Solution implemented here:
683 |
684 | Keep track of pairs of locks: (A before B), (A
685 | before C), etc.
686 |
687 | Complain if any thread tries to lock in
688 | a different order.
689 */
690 pub struct LockLocation {
691 try_: bool,
692 mutex_name: String,
693 source_file: String,
694 thread_name: &String,
695 source_line: i32,
696 }
697
698 impl LockLocation {
699
700 pub fn new(
701 psz_name: *const u8,
702 psz_file: *const u8,
703 n_line: i32,
704 try_in: bool,
705 thread_name: &String) -> Self {
706
707 todo!();
708 /*
709
710 : fTry(fTryIn),
711 mutexName(pszName),
712 sourceFile(pszFile),
713 m_thread_name(thread_name),
714 sourceLine(nLine)
715 */
716 }
717
718 pub fn to_string(&self) -> String {
719
720 todo!();
721 /*
722 return strprintf(
723 "'%s' in %s:%s%s (in thread '%s')",
724 mutexName, sourceFile, sourceLine, (fTry ? " (TRY)" : ""), m_thread_name);
725 */
726 }
727
728 pub fn name(&self) -> String {
729
730 todo!();
731 /*
732 return mutexName;
733 */
734 }
735 }
736
737 pub type LockStackItem = Pair<*mut c_void,LockLocation>;
738 pub type LockStack = Vec<LockStackItem>;
739 pub type LockStacks = HashMap<std::thread::ThreadId,LockStack>;
740 pub type LockPair = Pair<*mut c_void,*mut c_void>;
741 pub type LockOrders = HashMap<LockPair,LockStack>;
742 pub type InvLockOrders = HashSet<LockPair>;
743
744 pub struct LockData {
745 lock_stacks: LockStacks,
746 lockorders: LockOrders,
747 invlockorders: InvLockOrders,
748 dd_mutex: parking_lot::RawMutex,
749 }
750
751 pub fn get_lock_data() -> &mut LockData {
752
753 todo!();
754 /*
755 // This approach guarantees that the object is not destroyed until after its last use.
756 // The operating system automatically reclaims all the memory in a program's heap when that program exits.
757 // Since the ~LockData() destructor is never called, the LockData class and all
758 // its subclasses must have implicitly-defined destructors.
759 static LockData& lock_data = *new LockData();
760 return lock_data;
761 */
762 }
763
764 pub fn potential_deadlock_detected(
765 mismatch: &LockPair,
766 s1: &LockStack,
767 s2: &LockStack) {
768
769 todo!();
770 /*
771 LogPrintf("POTENTIAL DEADLOCK DETECTED\n");
772 LogPrintf("Previous lock order was:\n");
773 for (const LockStackItem& i : s1) {
774 std::string prefix{};
775 if (i.first == mismatch.first) {
776 prefix = " (1)";
777 }
778 if (i.first == mismatch.second) {
779 prefix = " (2)";
780 }
781 LogPrintf("%s %s\n", prefix, i.second.ToString());
782 }
783
784 std::string mutex_a, mutex_b;
785 LogPrintf("Current lock order is:\n");
786 for (const LockStackItem& i : s2) {
787 std::string prefix{};
788 if (i.first == mismatch.first) {
789 prefix = " (1)";
790 mutex_a = i.second.Name();
791 }
792 if (i.first == mismatch.second) {
793 prefix = " (2)";
794 mutex_b = i.second.Name();
795 }
796 LogPrintf("%s %s\n", prefix, i.second.ToString());
797 }
798 if (g_debug_lockorder_abort) {
799 tfm::format(std::cerr, "Assertion failed: detected inconsistent lock order for %s, details in debug log.\n", s2.back().second.ToString());
800 abort();
801 }
802 throw std::logic_error(strprintf("potential deadlock detected: %s -> %s -> %s", mutex_b, mutex_a, mutex_b));
803 */
804 }
805
806 pub fn double_lock_detected(
807 mutex: *const c_void,
808 lock_stack: &LockStack) {
809
810 todo!();
811 /*
812 LogPrintf("DOUBLE LOCK DETECTED\n");
813 LogPrintf("Lock order:\n");
814 for (const LockStackItem& i : lock_stack) {
815 std::string prefix{};
816 if (i.first == mutex) {
817 prefix = " (*)";
818 }
819 LogPrintf("%s %s\n", prefix, i.second.ToString());
820 }
821 if (g_debug_lockorder_abort) {
822 tfm::format(std::cerr,
823 "Assertion failed: detected double lock for %s, details in debug log.\n",
824 lock_stack.back().second.ToString());
825 abort();
826 }
827 throw std::logic_error("double lock detected");
828 */
829 }
830
831 pub fn push_lock<MutexType>(
832 c: *mut MutexType,
833 locklocation: &LockLocation) {
834
835 todo!();
836 /*
837 constexpr bool is_recursive_mutex =
838 std::is_base_of<RecursiveMutex, MutexType>::value ||
839 std::is_base_of<std::recursive_mutex, MutexType>::value;
840
841 LockData& lockdata = GetLockData();
842 std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
843
844 LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
845 lock_stack.emplace_back(c, locklocation);
846 for (size_t j = 0; j < lock_stack.size() - 1; ++j) {
847 const LockStackItem& i = lock_stack[j];
848 if (i.first == c) {
849 if (is_recursive_mutex) {
850 break;
851 }
852 // It is not a recursive mutex and it appears in the stack two times:
853 // at position `j` and at the end (which we added just before this loop).
854 // Can't allow locking the same (non-recursive) mutex two times from the
855 // same thread as that results in an undefined behavior.
856 auto lock_stack_copy = lock_stack;
857 lock_stack.pop_back();
858 double_lock_detected(c, lock_stack_copy);
859 // double_lock_detected() does not return.
860 }
861
862 const LockPair p1 = std::make_pair(i.first, c);
863 if (lockdata.lockorders.count(p1))
864 continue;
865
866 const LockPair p2 = std::make_pair(c, i.first);
867 if (lockdata.lockorders.count(p2)) {
868 auto lock_stack_copy = lock_stack;
869 lock_stack.pop_back();
870 potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy);
871 // potential_deadlock_detected() does not return.
872 }
873
874 lockdata.lockorders.emplace(p1, lock_stack);
875 lockdata.invlockorders.insert(p2);
876 }
877 */
878 }
879
880 pub fn pop_lock() {
881
882 todo!();
883 /*
884 LockData& lockdata = GetLockData();
885 std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
886
887 LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
888 lock_stack.pop_back();
889 if (lock_stack.empty()) {
890 lockdata.m_lock_stacks.erase(std::this_thread::get_id());
891 }
892 */
893 }
894
895 pub fn enter_critical<MutexType>(
896 psz_name: *const u8,
897 psz_file: *const u8,
898 n_line: i32,
899 cs: *mut MutexType,
900 try_: bool) {
901
902 let try_:bool = try_.unwrap_or(false);
903
904 todo!();
905 /*
906 push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry, util::ThreadGetInternalName()));
907 */
908 }
909
910 pub fn check_last_critical(
911 cs: *mut c_void,
912 lockname: &mut String,
913 guardname: *const u8,
914 file: *const u8,
915 line: i32) {
916
917 todo!();
918 /*
919 LockData& lockdata = GetLockData();
920 std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
921
922 const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
923 if (!lock_stack.empty()) {
924 const auto& lastlock = lock_stack.back();
925 if (lastlock.first == cs) {
926 lockname = lastlock.second.Name();
927 return;
928 }
929 }
930
931 LogPrintf("INCONSISTENT LOCK ORDER DETECTED\n");
932 LogPrintf("Current lock order (least recent first) is:\n");
933 for (const LockStackItem& i : lock_stack) {
934 LogPrintf(" %s\n", i.second.ToString());
935 }
936 if (g_debug_lockorder_abort) {
937 tfm::format(std::cerr, "%s:%s %s was not most recent critical section locked, details in debug log.\n", file, line, guardname);
938 abort();
939 }
940 throw std::logic_error(strprintf("%s was not most recent critical section locked", guardname));
941 */
942 }
943
944 pub fn leave_critical() {
945
946 todo!();
947 /*
948 pop_lock();
949 */
950 }
951
952 pub fn locks_held() -> String {
953
954 todo!();
955 /*
956 LockData& lockdata = GetLockData();
957 std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
958
959 const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
960 std::string result;
961 for (const LockStackItem& i : lock_stack)
962 result += i.second.ToString() + std::string("\n");
963 return result;
964 */
965 }
966
967 pub fn lock_held(mutex: *mut c_void) -> bool {
968
969 todo!();
970 /*
971 LockData& lockdata = GetLockData();
972 std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
973
974 const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
975 for (const LockStackItem& i : lock_stack) {
976 if (i.first == mutex) return true;
977 }
978
979 return false;
980 */
981 }
982
983 pub fn assert_lock_held_internal<MutexType>(
984 psz_name: *const u8,
985 psz_file: *const u8,
986 n_line: i32,
987 cs: *mut MutexType) {
988
989 todo!();
990 /*
991 if (LockHeld(cs)) return;
992 tfm::format(std::cerr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld());
993 abort();
994 */
995 }
996
997 pub fn assert_lock_not_held_internal<MutexType>(
998 psz_name: *const u8,
999 psz_file: *const u8,
1000 n_line: i32,
1001 cs: *mut MutexType) {
1002
1003 todo!();
1004 /*
1005 if (!LockHeld(cs)) return;
1006 tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld());
1007 abort();
1008 */
1009 }
1010
1011 pub fn delete_lock(cs: *mut c_void) {
1012
1013 todo!();
1014 /*
1015 LockData& lockdata = GetLockData();
1016 std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
1017 const LockPair item = std::make_pair(cs, nullptr);
1018 LockOrders::iterator it = lockdata.lockorders.lower_bound(item);
1019 while (it != lockdata.lockorders.end() && it->first.first == cs) {
1020 const LockPair invitem = std::make_pair(it->first.second, it->first.first);
1021 lockdata.invlockorders.erase(invitem);
1022 lockdata.lockorders.erase(it++);
1023 }
1024 InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item);
1025 while (invit != lockdata.invlockorders.end() && invit->first == cs) {
1026 const LockPair invinvitem = std::make_pair(invit->second, invit->first);
1027 lockdata.lockorders.erase(invinvitem);
1028 lockdata.invlockorders.erase(invit++);
1029 }
1030 */
1031 }
1032
1033 pub fn lock_stack_empty() -> bool {
1034
1035 todo!();
1036 /*
1037 LockData& lockdata = GetLockData();
1038 std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
1039 const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id());
1040 if (it == lockdata.m_lock_stacks.end()) {
1041 return true;
1042 }
1043 return it->second.empty();
1044 */
1045 }
1046
1047 lazy_static!{
1048 /*
1049 bool g_debug_lockorder_abort = true;
1050 */
1051 }
1052}