Main Page   Reference Manual   Namespace List   Compound List   Namespace Members   Compound Members   File Members  

private_threading.h
Go to the documentation of this file.
1 // $Header$
2 //
3 // Copyright (C) 2001 - 2004, by
4 //
5 // Carlo Wood, Run on IRC <carlo@alinoe.com>
6 // RSA-1024 0x624ACAD5 1997-01-26 Sign & Encrypt
7 // Fingerprint16 = 32 EC A7 B6 AC DB 65 A6 F6 F6 55 DD 1C DC FF 61
8 //
9 // This file may be distributed under the terms of the Q Public License
10 // version 1.0 as appearing in the file LICENSE.QPL included in the
11 // packaging of this file.
12 //
13 
18 #ifndef LIBCWD_PRIVATE_THREADING_H
19 #define LIBCWD_PRIVATE_THREADING_H
20 
21 #define LIBCWD_DEBUGDEBUGRWLOCK 0
22 
23 #if LIBCWD_DEBUGDEBUGRWLOCK
24 #define LIBCWD_NO_INTERNAL_STRING
25 #include "raw_write.h"
26 #undef LIBCWD_NO_INTERNAL_STRING
27 extern pthread_mutex_t LIBCWD_DEBUGDEBUGLOCK_CERR_mutex;
28 extern unsigned int LIBCWD_DEBUGDEBUGLOCK_CERR_count;
29 #define LIBCWD_DEBUGDEBUGRWLOCK_CERR(x) \
30  do { \
31  pthread_mutex_lock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
32  FATALDEBUGDEBUG_CERR(x); \
33  pthread_mutex_unlock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
34  } while(0)
35 #define LIBCWD_DEBUGDEBUGLOCK_CERR(x) \
36  do { \
37  if (instance != static_tsd_instance) \
38  { \
39  pthread_mutex_lock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
40  ++LIBCWD_DEBUGDEBUGLOCK_CERR_count; \
41  FATALDEBUGDEBUG_CERR("[" << LIBCWD_DEBUGDEBUGLOCK_CERR_count << "] " << pthread_self() << ": " << x); \
42  pthread_mutex_unlock(&LIBCWD_DEBUGDEBUGLOCK_CERR_mutex); \
43  } \
44  } while(0)
45 #else // !LIBCWD_DEBUGDEBUGRWLOCK
46 #define LIBCWD_DEBUGDEBUGRWLOCK_CERR(x) do { } while(0)
47 #define LIBCWD_DEBUGDEBUGLOCK_CERR(x) do { } while(0)
48 #endif // !LIBCWD_DEBUGDEBUGRWLOCK
49 
50 #ifndef LIBCWD_PRIVATE_SET_ALLOC_CHECKING_H
52 #endif
53 #ifndef LIBCWD_PRIVATE_STRUCT_TSD_H
54 #include "private_struct_TSD.h"
55 #endif
56 #ifndef LIBCWD_PRIVATE_MUTEX_INSTANCES_H
58 #endif
59 #ifndef LIBCWD_CORE_DUMP_H
60 #include "core_dump.h"
61 #endif
62 #ifndef LIBCW_CSTRING
63 #define LIBCW_CSTRING
64 #include <cstring> // Needed for std::memset and std::memcpy.
65 #endif
66 
67 #ifdef LIBCWD_HAVE_PTHREAD
68 #ifdef __linux
69 #ifndef _GNU_SOURCE
70 #error "You need to use define _GNU_SOURCE in order to make use of the extensions of Linux Threads."
71 #endif
72 #endif
73 #ifndef LIBCW_PTHREAD_H
74 #define LIBCW_PTHREAD_H
75 #include <pthread.h>
76 #endif
77 #if defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP) && defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP)
78 #define LIBCWD_USE_LINUXTHREADS 1
79 #else
80 #define LIBCWD_USE_POSIX_THREADS 1
81 #endif
82 #else
83 #if LIBCWD_THREAD_SAFE
84 #error Fatal error: thread support was not detected during configuration of libcwd (did you use --disable-threading?)! \
85  How come you are trying to compile a threaded program now? \
86  To fix this problem, either link with libcwd_r (install it), or when you are indeed compiling a \
87  single threaded application, then get rid of the -pthread (and/or -D_REENTRANT and/or -D_THREAD_SAFE) in your compile flags.
88 #endif
89 #endif // LIBCWD_HAVE_PTHREAD
90 
91 #ifndef LIBCWD_USE_LINUXTHREADS
92 #define LIBCWD_USE_LINUXTHREADS 0
93 #endif
94 #ifndef LIBCWD_USE_POSIX_THREADS
95 #define LIBCWD_USE_POSIX_THREADS 0
96 #endif
97 
98 #if CWDEBUG_DEBUGT
99 #define LibcwDebugThreads(x) do { x; } while(0)
100 #else
101 #define LibcwDebugThreads(x) do { } while(0)
102 #endif
103 
104 #if CWDEBUG_DEBUGT || CWDEBUG_DEBUG
105 #ifndef LIBCWD_PRIVATE_ASSERT_H
106 #include "private_assert.h"
107 #endif
108 #endif
109 
110 #if LIBCWD_THREAD_SAFE
111 
112 namespace libcwd {
113 
114 #if LIBCWD_DEBUGDEBUGRWLOCK
115 inline
116 _private_::raw_write_nt const&
117 operator<<(_private_::raw_write_nt const& raw_write, pthread_mutex_t const& mutex)
118 {
119  raw_write << "(pthread_mutex_t&)" << (void*)&mutex <<
120  " = { __data = "
121  "{ __lock = " << mutex.__data.__lock << ", "
122  "__count = " << mutex.__data.__count << ", "
123  "__owner = " << mutex.__data.__owner << ", "
124  "__nusers = " << mutex.__data.__nusers << ", "
125  "__kind = " << mutex.__data.__kind << "} }";
126  return raw_write;
127 }
128 #endif
129 
130  namespace _private_ {
131 
132 extern void initialize_global_mutexes();
133 extern bool WST_multi_threaded;
134 
135 #if CWDEBUG_DEBUGT
136 extern void test_for_deadlock(size_t, struct TSD_st&, void const*);
137 inline void test_for_deadlock(int instance, struct TSD_st& __libcwd_tsd, void const* from)
138 {
139  assert(instance < 0x10000);
140  test_for_deadlock(static_cast<size_t>(instance), __libcwd_tsd, from);
141 }
142 inline void test_for_deadlock(void const* ptr, struct TSD_st& __libcwd_tsd, void const* from)
143 {
144  assert(reinterpret_cast<size_t>(ptr) >= 0x10000);
145  test_for_deadlock(reinterpret_cast<size_t>(ptr), __libcwd_tsd, from);
146 }
147 #endif
148 
149 //===================================================================================================
150 //
151 // Mutex locking.
152 //
153 // template <int instance> This class may not use system calls (it may not call malloc(3)).
154 // class mutex_tct;
155 //
156 // Usage.
157 //
158 // Global mutexes can be initialized once, before using the mutex.
159 // mutex_tct<instance_id_const>::initialize();
160 //
161 // Static mutexes in functions (or templates) that can not globally
162 // be initialized need to call `initialize()' prior to *each* use
163 // (using -O2 this is at most a single test and nothing at all when
164 // Linuxthreads are being used.
165 //
166 
167 //========================================================================================================================================17"
168 // class mutex_tct
169 
170 #if LIBCWD_USE_POSIX_THREADS || LIBCWD_USE_LINUXTHREADS
171 // We have to use macros because pthread_cleanup_push and pthread_cleanup_pop
172 // are macros with an unmatched '{' and '}' respectively.
173 #define LIBCWD_DISABLE_CANCEL \
174  { \
175  LIBCWD_DISABLE_CANCEL_NO_BRACE
176 #define LIBCWD_DISABLE_CANCEL_NO_BRACE \
177  int __libcwd_oldstate; \
178  pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &__libcwd_oldstate); \
179  LibcwDebugThreads( ++__libcwd_tsd.cancel_explicitely_disabled )
180 #if CWDEBUG_ALLOC
181 #define LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE \
182  /* pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL) will call, */ \
183  /* and pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) can call, */ \
184  /* __pthread_do_exit() when the thread is cancelled in the meantime. */ \
185  /* This might free allocations that are allocated in userspace. */ \
186  LIBCWD_ASSERT( !__libcwd_tsd.internal || __libcwd_tsd.cancel_explicitely_disabled || __libcwd_tsd.cancel_explicitely_deferred )
187 #else
188 #define LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE
189 #endif
190 #define LIBCWD_ENABLE_CANCEL_NO_BRACE \
191  LibcwDebugThreads(\
192  LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_disabled > 0 ); \
193  --__libcwd_tsd.cancel_explicitely_disabled; \
194  LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE; \
195  ); \
196  pthread_setcancelstate(__libcwd_oldstate, NULL)
197 #define LIBCWD_ENABLE_CANCEL \
198  LIBCWD_ENABLE_CANCEL_NO_BRACE; \
199  }
200 
201 #define LIBCWD_DEFER_CANCEL \
202  { \
203  LIBCWD_DEFER_CANCEL_NO_BRACE
204 #define LIBCWD_DEFER_CANCEL_NO_BRACE \
205  int __libcwd_oldtype; \
206  pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &__libcwd_oldtype); \
207  LibcwDebugThreads( ++__libcwd_tsd.cancel_explicitely_deferred )
208 #define LIBCWD_RESTORE_CANCEL_NO_BRACE \
209  LibcwDebugThreads(\
210  LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_deferred > 0 ); \
211  --__libcwd_tsd.cancel_explicitely_deferred; \
212  LIBCWD_ASSERT_USERSPACE_OR_DEFERED_BEFORE_SETCANCELSTATE; \
213  ); \
214  pthread_setcanceltype(__libcwd_oldtype, NULL)
215 #define LIBCWD_RESTORE_CANCEL \
216  LIBCWD_RESTORE_CANCEL_NO_BRACE; \
217  }
218 
219 #if LIBCWD_USE_LINUXTHREADS
220 #define LIBCWD_DEFER_CLEANUP_PUSH(routine, arg) \
221  pthread_cleanup_push_defer_np(reinterpret_cast<void(*)(void*)>(routine), reinterpret_cast<void*>(arg)); \
222  LibcwDebugThreads( ++__libcwd_tsd.cancel_explicitely_deferred; ++__libcwd_tsd.cleanup_handler_installed )
223 #if CWDEBUG_ALLOC
224 #define LIBCWD_ASSERT_NONINTERNAL LIBCWD_ASSERT( !__libcwd_tsd.internal )
225 #else
226 #define LIBCWD_ASSERT_NONINTERNAL
227 #endif
228 #define LIBCWD_CLEANUP_POP_RESTORE(execute) \
229  LibcwDebugThreads( --__libcwd_tsd.cleanup_handler_installed; \
230  LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_deferred > 0 ); \
231  LIBCWD_ASSERT_NONINTERNAL; ); \
232  pthread_cleanup_pop_restore_np(static_cast<int>(execute)); \
233  LibcwDebugThreads( --__libcwd_tsd.cancel_explicitely_deferred; )
234 #else // !LIBCWD_USE_LINUXTHREADS
235 #define LIBCWD_DEFER_CLEANUP_PUSH(routine, arg) \
236  LIBCWD_DEFER_CANCEL; \
237  LibcwDebugThreads( ++__libcwd_tsd.cleanup_handler_installed ); \
238  pthread_cleanup_push(reinterpret_cast<void(*)(void*)>(routine), reinterpret_cast<void*>(arg))
239 #define LIBCWD_CLEANUP_POP_RESTORE(execute) \
240  LibcwDebugThreads( --__libcwd_tsd.cleanup_handler_installed ); \
241  pthread_cleanup_pop(static_cast<int>(execute)); \
242  LIBCWD_RESTORE_CANCEL
243 #endif // !LIBCWD_USE_LINUXTHREADS
244 
245 #define LIBCWD_PUSH_DEFER_TRYLOCK_MUTEX(instance, unlock_routine) \
246  LIBCWD_DEFER_CLEANUP_PUSH(static_cast<void (*)()>(unlock_routine), &::libcwd::_private_::mutex_tct<(instance)>::S_mutex); \
247  bool __libcwd_lock_successful = ::libcwd::_private_::mutex_tct<(instance)>::try_lock()
248 #define LIBCWD_DEFER_PUSH_LOCKMUTEX(instance, unlock_routine) \
249  LIBCWD_DEFER_CLEANUP_PUSH(static_cast<void (*)()>(unlock_routine), &::libcwd::_private_::mutex_tct<(instance)>::S_mutex); \
250  ::libcwd::_private_::mutex_tct<(instance)>::lock(); \
251  bool const __libcwd_lock_successful = true
252 #define LIBCWD_UNLOCKMUTEX_POP_RESTORE(instance) \
253  LIBCWD_CLEANUP_POP_RESTORE(__libcwd_lock_successful)
254 
255 #define LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED \
256  LibcwDebugThreads( \
257  if (instance != static_tsd_instance) \
258  { \
259  /* When entering a critical area, make sure that we have explictely deferred cancellation of this */ \
260  /* thread (or disabled that) because when cancellation would happen in the middle of the critical */ \
261  /* area then the lock would stay locked. */ \
262  LIBCWD_ASSERT( __libcwd_tsd.cancel_explicitely_deferred || __libcwd_tsd.cancel_explicitely_disabled ); \
263  } )
264 
265 template <int instance>
266  class mutex_tct {
267  public:
268  static pthread_mutex_t S_mutex;
269 #if !LIBCWD_USE_LINUXTHREADS || CWDEBUG_DEBUGT
270  protected:
271  static bool volatile S_initialized;
272  static void S_initialize();
273 #endif
274  public:
275  static void initialize()
276 #if LIBCWD_USE_LINUXTHREADS && !CWDEBUG_DEBUGT
277  { }
278 #else
279  {
280  if (S_initialized) // Check if the static `S_mutex' already has been initialized.
281  return; // No need to lock: `S_initialized' is only set after it is
282  // really initialized.
283  S_initialize();
284  }
285 #endif
286  public:
287  static bool try_lock()
288  {
289  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
290 #if CWDEBUG_DEBUGT
291  LIBCWD_TSD_DECLARATION;
292 #endif
293  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
294  LIBCWD_DEBUGDEBUGLOCK_CERR("Trying to lock mutex " << instance << " (" << (void*)&S_mutex << ") from " << __builtin_return_address(0) << " from " << __builtin_return_address(1));
295  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_trylock(" << S_mutex << ").");
296  bool success = (pthread_mutex_trylock(&S_mutex) == 0);
297  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << success << ". Mutex now " << S_mutex << ".");
298 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
299  if (success)
300  {
301 #if CWDEBUG_DEBUGT
302  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
303 #endif
304  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::try_lock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
305  instance_locked[instance] += 1;
306 #if CWDEBUG_DEBUGT
307  locked_by[instance] = pthread_self();
308  locked_from[instance] = __builtin_return_address(0);
309 #endif
310  }
311 #endif
312  LibcwDebugThreads( if (success) { ++__libcwd_tsd.inside_critical_area; } );
313  return success;
314  }
315  static void lock()
316  {
317  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
318 #if CWDEBUG_DEBUGT
319  TSD_st* tsd_ptr = 0;
320  if (instance != static_tsd_instance)
321  {
322  LIBCWD_TSD_DECLARATION;
323  tsd_ptr = &__libcwd_tsd;
324  }
325  TSD_st& __libcwd_tsd(*tsd_ptr);
326 #endif
327  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
328  LibcwDebugThreads( if (instance != static_tsd_instance) { ++__libcwd_tsd.inside_critical_area; } );
329  LIBCWD_DEBUGDEBUGLOCK_CERR("locking mutex " << instance << " (" << (void*)&S_mutex << ") from " << __builtin_return_address(0) << " from " << __builtin_return_address(1));
330 #if CWDEBUG_DEBUGT
331  if (instance != static_tsd_instance && !(instance >= 2 * reserved_instance_low && instance < 3 * reserved_instance_low))
332  {
333  __libcwd_tsd.waiting_for_lock = instance;
334  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_lock(" << S_mutex << ").");
335  int res = pthread_mutex_lock(&S_mutex);
336  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
337 #if LIBCWD_DEBUGDEBUGRWLOCK
338  LIBCWD_ASSERT( res == 0 || res == EDEADLK );
339 #endif
340  __libcwd_tsd.waiting_for_lock = 0;
341  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
342  LIBCWD_ASSERT( res == 0 );
343  }
344  else
345  {
346  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_lock(" << S_mutex << ").");
347  int res = pthread_mutex_lock(&S_mutex);
348  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
349  LIBCWD_ASSERT( res == 0 );
350  }
351 #else // !CWDEBUG_DEBUGT
352  pthread_mutex_lock(&S_mutex);
353 #endif // !CWDEBUG_DEBUGT
354  LIBCWD_DEBUGDEBUGLOCK_CERR("Lock " << instance << " obtained (" << (void*)&S_mutex << ").");
355 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
356  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::lock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
357  instance_locked[instance] += 1;
358 #if CWDEBUG_DEBUGT
359  if (locked_by[instance] != 0 && locked_by[instance] != pthread_self())
360  {
361  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex " << instance << " (" << (void*)&S_mutex << ") is already set by another thread (" << locked_by[instance] << ")!");
362  core_dump();
363  }
364  locked_by[instance] = pthread_self();
365  locked_from[instance] = __builtin_return_address(0);
366 #endif
367 #endif
368  }
369  static void unlock()
370  {
371 #if CWDEBUG_DEBUGT
372  TSD_st* tsd_ptr = 0;
373  if (instance != static_tsd_instance)
374  {
375  LIBCWD_TSD_DECLARATION;
376  tsd_ptr = &__libcwd_tsd;
377  }
378  TSD_st& __libcwd_tsd(*tsd_ptr);
379 #endif
380  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
381 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
382  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::unlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
383  LIBCWD_ASSERT( instance_locked[instance] > 0 );
384 #if CWDEBUG_DEBUGT
385  if (locked_by[instance] != pthread_self())
386  {
387  LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking instance " << instance << " (" << (void*)&S_mutex << ") failed: locked_by[" << instance << "] == " << locked_by[instance] << ".");
388  core_dump();
389  }
390 #endif
391  instance_locked[instance] -= 1;
392 #if CWDEBUG_DEBUGT
393  if (instance_locked[instance] == 0)
394  {
395  locked_by[instance] = 0;
396  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::unlock(): locked_by[" << instance << "] was reset.");
397  }
398  else LIBCWD_DEBUGDEBUGLOCK_CERR("mutex_tct::unlock(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
399 #endif
400 #endif
401  LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking mutex " << instance << " (" << (void*)&S_mutex << ").");
402  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_mutex_unlock(" << S_mutex << ").");
403 #if CWDEBUG_DEBUGT
404  int res =
405 #endif
406  pthread_mutex_unlock(&S_mutex);
407 #if CWDEBUG_DEBUGT
408  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << S_mutex << ".");
409  LIBCWD_ASSERT(res == 0);
410 #endif
411  LIBCWD_DEBUGDEBUGLOCK_CERR("Lock " << instance << " released (" << (void*)&S_mutex << ").");
412  LibcwDebugThreads( if (instance != static_tsd_instance) { --__libcwd_tsd.inside_critical_area; } );
413  }
414  // This is used as cleanup handler with LIBCWD_DEFER_CLEANUP_PUSH.
415  static void cleanup(void*);
416  };
417 
418 #if !LIBCWD_USE_LINUXTHREADS || CWDEBUG_DEBUGT
419 template <int instance>
420  bool volatile mutex_tct<instance>::S_initialized = false;
421 
422 template <int instance>
423  void mutex_tct<instance>::S_initialize()
424  {
425  if (instance == mutex_initialization_instance) // Specialization.
426  {
427 #if !LIBCWD_USE_LINUXTHREADS
428  pthread_mutexattr_t mutex_attr;
429  pthread_mutexattr_init(&mutex_attr);
430 #if CWDEBUG_DEBUGT
431  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);
432 #else
433  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_NORMAL);
434 #endif
435  pthread_mutex_init(&S_mutex, &mutex_attr);
436  pthread_mutexattr_destroy(&mutex_attr);
437 #endif // !LIBCWD_USE_LINUXTHREADS
438  S_initialized = true;
439  }
440  else // General case.
441  {
442  mutex_tct<mutex_initialization_instance>::initialize();
443  /* LIBCWD_DEFER_PUSH_LOCKMUTEX(mutex_initialization_instance, mutex_tct<mutex_initialization_instance>::unlock); */
444  if (!S_initialized) // Check again now that we are locked.
445  {
446 #if !LIBCWD_USE_LINUXTHREADS
447  pthread_mutexattr_t mutex_attr;
448  pthread_mutexattr_init(&mutex_attr);
449  if (instance < end_recursive_types)
450  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);
451  else
452  {
453 #if CWDEBUG_DEBUGT
454  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);
455 #else
456  pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_NORMAL);
457 #endif
458  }
459  pthread_mutex_init(&S_mutex, &mutex_attr);
460  pthread_mutexattr_destroy(&mutex_attr);
461 #endif // !LIBCWD_USE_LINUXTHREADS
462  S_initialized = true;
463  }
464  /* LIBCWD_UNLOCKMUTEX_POP_RESTORE(mutex_initialization_instance); */
465  }
466  }
467 #endif // !LIBCWD_USE_LINUXTHREADS || CWDEBUG_DEBUGT
468 
469 template <int instance>
470  pthread_mutex_t mutex_tct<instance>::S_mutex
471 #if LIBCWD_USE_LINUXTHREADS
472  =
473 #if CWDEBUG_DEBUGT
474  PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
475 #else
476  PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
477 #endif
478 #else // !LIBCWD_USE_LINUXTHREADS
479  ;
480 #endif // !LIBCWD_USE_LINUXTHREADS
481 
482 template <int instance>
483  void mutex_tct<instance>::cleanup(void*)
484  {
485  unlock();
486  }
487 
488 //========================================================================================================================================17"
489 // class cond_tct
490 
491 template <int instance>
492  class cond_tct : public mutex_tct<instance> {
493  private:
494  static pthread_cond_t S_condition;
495 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
496  static bool volatile S_initialized;
497  private:
498  static void S_initialize();
499 #endif
500  public:
501  static void initialize()
502 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
503  {
504  if (S_initialized)
505  return;
506  S_initialize();
507  }
508 #else
509  { }
510 #endif
511  public:
512  void wait() {
513 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
514  LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
515  LIBCWD_ASSERT( instance_locked[instance] > 0 );
516 #if CWDEBUG_DEBUGT
517  if (locked_by[instance] != pthread_self())
518  {
519  LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking instance " << instance << " (" << (void*)&this->S_mutex << ") failed: locked_by[" << instance << "] == " << locked_by[instance] << ".");
520  core_dump();
521  }
522 #endif
523  instance_locked[instance] -= 1;
524 #if CWDEBUG_DEBUGT
525  if (instance_locked[instance] == 0)
526  {
527  locked_by[instance] = 0;
528  LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): locked_by[" << instance << "] was reset.");
529  }
530  else LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
531 #endif
532 #endif
533  LIBCWD_DEBUGDEBUGLOCK_CERR("unlocking mutex " << instance << " (" << (void*)&this->S_mutex << ").");
534  LIBCWD_DEBUGDEBUGLOCK_CERR("pthread_cond_wait(" << (void*)&S_condition << ", " << this->S_mutex << ").");
535 #if CWDEBUG_DEBUGT
536  int res =
537 #endif
538  pthread_cond_wait(&S_condition, &this->S_mutex);
539 #if CWDEBUG_DEBUGT
540  LIBCWD_DEBUGDEBUGLOCK_CERR("Result = " << res << ". Mutex now " << this->S_mutex << ".");
541  LIBCWD_ASSERT(res == 0);
542 #endif
543  LIBCWD_DEBUGDEBUGLOCK_CERR("Lock " << instance << " obtained (" << (void*)&this->S_mutex << ").");
544 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
545  LIBCWD_DEBUGDEBUGLOCK_CERR("cond_tct::wait(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
546  instance_locked[instance] += 1;
547 #if CWDEBUG_DEBUGT
548  if (locked_by[instance] != 0 && locked_by[instance] != pthread_self())
549  {
550  LIBCWD_DEBUGDEBUGLOCK_CERR("mutex " << instance << " (" << (void*)&this->S_mutex << ") is already set by another thread (" << locked_by[instance] << ")!");
551  core_dump();
552  }
553  locked_by[instance] = pthread_self();
554  locked_from[instance] = __builtin_return_address(0);
555 #endif
556 #endif
557  }
558  void signal() { pthread_cond_signal(&S_condition); }
559  void broadcast() { pthread_cond_broadcast(&S_condition); }
560  };
561 
562 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
563 template <int instance>
564  void cond_tct<instance>::S_initialize()
565  {
566 #if !LIBCWD_USE_LINUXTHREADS
567  mutex_tct<mutex_initialization_instance>::initialize();
568  LIBCWD_DEFER_PUSH_LOCKMUTEX(mutex_initialization_instance, mutex_tct<mutex_initialization_instance>::unlock);
569  if (!S_initialized) // Check again now that we are locked.
570  {
571  pthread_cond_init(&S_condition, NULL);
572  }
573  LIBCWD_UNLOCKMUTEX_POP_RESTORE(mutex_initialization_instance);
574 #endif
575  mutex_tct<instance>::S_initialize();
576  }
577 #endif // !LIBCWD_USE_LINUXTHREADS
578 
579 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
580 template <int instance>
581  bool volatile cond_tct<instance>::S_initialized = false;
582 #endif
583 
584 template <int instance>
585  pthread_cond_t cond_tct<instance>::S_condition
586 #if LIBCWD_USE_LINUXTHREADS
587  = PTHREAD_COND_INITIALIZER;
588 #else // !LIBCWD_USE_LINUXTHREADS
589  ;
590 #endif // !LIBCWD_USE_LINUXTHREADS
591 
592 #endif // LIBCWD_USE_POSIX_THREADS || LIBCWD_USE_LINUXTHREADS
593 
594 //========================================================================================================================================17"
595 // class rwlock_tct
596 
597 //
598 // template <int instance> This class may not use system calls (it may not call malloc(3)).
599 // class rwlock_tct;
600 //
601 // Read/write mutex lock implementation. Readers can set arbitrary number of locks, only locking
602 // writers. Writers lock readers and writers.
603 //
604 // Examples.
605 //
606 // rwlock_tct<instance_id_const>::initialize();
607 // if (rwlock_tct<instance_id_const>::tryrdlock()) ...
608 // if (rwlock_tct<instance_id_const>::trywrlock()) ...
609 // rwlock_tct<instance_id_const>::rdlock(); // Readers lock.
610 // rwlock_tct<instance_id_const>::rdunlock();
611 // rwlock_tct<instance_id_const>::wrlock(); // Writers lock.
612 // rwlock_tct<instance_id_const>::wrunlock();
613 // rwlock_tct<instance_id_const>::rd2wrlock(); // Convert read lock into write lock.
614 // rwlock_tct<instance_id_const>::wr2rdlock(); // Convert write lock into read lock.
615 //
616 
617 template <int instance>
618  class rwlock_tct {
619  private:
620  static int const readers_instance = instance + reserved_instance_low;
621  static int const holders_instance = instance + 2 * reserved_instance_low;
622  typedef cond_tct<holders_instance> cond_t;
623  static cond_t S_no_holders_condition;
624  static int S_holders_count; // Number of readers or -1 if a writer locked this object.
625  static bool volatile S_writer_is_waiting;
626  static pthread_t S_writer_id;
627 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
628  static bool S_initialized; // Set when initialized.
629 #endif
630  public:
631  static void initialize()
632  {
633 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
634  if (S_initialized)
635  return;
636  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling initialize() instance " << instance);
637  mutex_tct<readers_instance>::initialize();
638  S_no_holders_condition.initialize();
639  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving initialize() instance " << instance);
640  S_initialized = true;
641 #endif
642  }
643  static bool tryrdlock()
644  {
645 #if CWDEBUG_DEBUGT
646  LIBCWD_TSD_DECLARATION;
647 #endif
648  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
649  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
650  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::tryrdlock()");
651  if (instance < end_recursive_types && pthread_equal(S_writer_id, pthread_self()))
652  {
653  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::tryrdlock() (skipped: thread has write lock)");
654  return true; // No error checking is done.
655  }
656  // Give a writer a higher priority (kinda fuzzy).
657  if (S_writer_is_waiting || !S_no_holders_condition.try_lock())
658  return false;
659  bool success = (S_holders_count != -1);
660  if (success)
661  ++S_holders_count; // Add one reader.
662  S_no_holders_condition.unlock();
663  LibcwDebugThreads(
664  if (success)
665  {
666  ++__libcwd_tsd.inside_critical_area;
667  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
668  __libcwd_tsd.instance_rdlocked[instance] += 1;
669  if (__libcwd_tsd.instance_rdlocked[instance] == 1)
670  {
671  __libcwd_tsd.rdlocked_by1[instance] = pthread_self();
672  __libcwd_tsd.rdlocked_from1[instance] = __builtin_return_address(0);
673  }
674  else if (__libcwd_tsd.instance_rdlocked[instance] == 2)
675  {
676  __libcwd_tsd.rdlocked_by2[instance] = pthread_self();
677  __libcwd_tsd.rdlocked_from2[instance] = __builtin_return_address(0);
678  }
679  else
680  core_dump();
681  }
682  );
683  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::tryrdlock()");
684  return success;
685  }
686  static bool trywrlock()
687  {
688  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
689 #if CWDEBUG_DEBUGT
690  LIBCWD_TSD_DECLARATION;
691 #endif
692  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
693  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::trywrlock()");
694  bool success;
695  if ((success = mutex_tct<readers_instance>::try_lock()))
696  {
697  S_writer_is_waiting = true;
698  if ((success = S_no_holders_condition.try_lock()))
699  {
700  if ((success = (S_holders_count == 0)))
701  {
702  S_holders_count = -1; // Mark that we have a writer.
703  if (instance < end_recursive_types)
704  S_writer_id = pthread_self();
705 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
706 #if CWDEBUG_DEBUGT
707  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
708 #endif
709  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::trywrlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
710  instance_locked[instance] += 1;
711 #if CWDEBUG_DEBUGT
712  locked_by[instance] = pthread_self();
713  locked_from[instance] = __builtin_return_address(0);
714 #endif
715 #endif
716  }
717  S_no_holders_condition.unlock();
718  }
719  S_writer_is_waiting = false;
720  mutex_tct<readers_instance>::unlock();
721  }
722  LibcwDebugThreads( if (success) { ++__libcwd_tsd.inside_critical_area; } );
723  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::trywrlock()");
724  return success;
725  }
726  static void rdlock(bool high_priority = false)
727  {
728  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
729 #if CWDEBUG_DEBUGT
730  LIBCWD_TSD_DECLARATION;
731 #endif
732  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
733  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::rdlock()");
734  if (instance < end_recursive_types && pthread_equal(S_writer_id, pthread_self()))
735  {
736  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdlock() (skipped: thread has write lock)");
737  return; // No error checking is done.
738  }
739  // Give a writer a higher priority (kinda fuzzy).
740  if (S_writer_is_waiting) // If there is a writer interested,
741  {
742  if (!high_priority)
743  {
744  mutex_tct<readers_instance>::lock(); // then give it precedence and wait here.
745  mutex_tct<readers_instance>::unlock();
746  }
747  }
748 #if CWDEBUG_DEBUGT
749  __libcwd_tsd.waiting_for_rdlock = instance;
750 #endif
751  S_no_holders_condition.lock();
752  while (S_holders_count == -1) // Writer locked it?
753  S_no_holders_condition.wait(); // Wait for writer to finish.
754 #if CWDEBUG_DEBUGT
755  __libcwd_tsd.waiting_for_rdlock = 0;
756 #endif
757  ++S_holders_count; // Add one reader.
758  S_no_holders_condition.unlock();
759  LibcwDebugThreads(
760  ++__libcwd_tsd.inside_critical_area;
761  // Thread A: rdlock<1> ... mutex<2>
762  // Thread B: mutex<2> ... rdlock<1>
763  // ^--- current program counter.
764  // can still lead to a deadlock when a third thread is trying to get the write lock
765  // because trying to acquire a write lock immedeately blocks new read locks.
766  // However, trying to acquire a write lock does not block high priority read locks,
767  // therefore the following is allowed:
768  // Thread A: rdlock<1> ... mutex<2>
769  // Thread B: mutex<2> ... high priority rdlock<1>
770  // provided that the write lock wrlock<1> is never used in combination with mutex<2>.
771  // In order to take this into account, we need to pass the information that this is
772  // a read lock to the test function.
773  _private_::test_for_deadlock(instance + (high_priority ? high_priority_read_lock_offset : read_lock_offset), __libcwd_tsd, __builtin_return_address(0));
774  __libcwd_tsd.instance_rdlocked[instance] += 1;
775  if (__libcwd_tsd.instance_rdlocked[instance] == 1)
776  {
777  __libcwd_tsd.rdlocked_by1[instance] = pthread_self();
778  __libcwd_tsd.rdlocked_from1[instance] = __builtin_return_address(0);
779  }
780  else if (__libcwd_tsd.instance_rdlocked[instance] == 2)
781  {
782  __libcwd_tsd.rdlocked_by2[instance] = pthread_self();
783  __libcwd_tsd.rdlocked_from2[instance] = __builtin_return_address(0);
784  }
785  else
786  core_dump();
787  );
788  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdlock()");
789  }
790  static void rdunlock()
791  {
792 #if CWDEBUG_DEBUGT
793  LIBCWD_TSD_DECLARATION;
794 #endif
795  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
796  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::rdunlock()");
797  if (instance < end_recursive_types && pthread_equal(S_writer_id, pthread_self()))
798  {
799  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdunlock() (skipped: thread has write lock)");
800  return; // No error checking is done.
801  }
802  LibcwDebugThreads( --__libcwd_tsd.inside_critical_area );
803  S_no_holders_condition.lock();
804  if (--S_holders_count == 0) // Was this the last reader?
805  S_no_holders_condition.signal(); // Tell waiting threads.
806  S_no_holders_condition.unlock();
807  LibcwDebugThreads(
808  if (__libcwd_tsd.instance_rdlocked[instance] == 2)
809  __libcwd_tsd.rdlocked_by2[instance] = 0;
810  else
811  __libcwd_tsd.rdlocked_by1[instance] = 0;
812  __libcwd_tsd.instance_rdlocked[instance] -= 1;
813  );
814  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rdunlock()");
815  }
816  static void wrlock()
817  {
818  LibcwDebugThreads( LIBCWD_ASSERT( S_initialized ) );
819 #if CWDEBUG_DEBUGT
820  LIBCWD_TSD_DECLARATION;
821 #endif
822  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
823  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::wrlock()");
824  mutex_tct<readers_instance>::lock(); // Block new readers,
825  S_writer_is_waiting = true; // from this moment on.
826 #if CWDEBUG_DEBUGT
827  __libcwd_tsd.waiting_for_lock = instance;
828 #endif
829  S_no_holders_condition.lock();
830  while (S_holders_count != 0) // Other readers or writers have this lock?
831  S_no_holders_condition.wait(); // Wait until all current holders are done.
832 #if CWDEBUG_DEBUGT
833  __libcwd_tsd.waiting_for_lock = 0;
834 #endif
835  S_writer_is_waiting = false; // Stop checking the lock for new readers.
836  mutex_tct<readers_instance>::unlock(); // Release blocked readers.
837  S_holders_count = -1; // Mark that we have a writer.
838  S_no_holders_condition.unlock();
839  if (instance < end_recursive_types)
840  S_writer_id = pthread_self();
841  LibcwDebugThreads( ++__libcwd_tsd.inside_critical_area );
842 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
843 #if CWDEBUG_DEBUGT
844  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
845 #endif
846  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wrlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
847  instance_locked[instance] += 1;
848 #if CWDEBUG_DEBUGT
849  locked_by[instance] = pthread_self();
850  locked_from[instance] = __builtin_return_address(0);
851 #endif
852 #endif
853  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::wrlock()");
854  }
855  static void wrunlock()
856  {
857 #if CWDEBUG_DEBUGT
858  LIBCWD_TSD_DECLARATION;
859 #endif
860  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
861 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
862  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wrunlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
863 #if CWDEBUG_DEBUGT
864  LIBCWD_ASSERT( instance_locked[instance] > 0 && locked_by[instance] == pthread_self() );
865 #endif
866  instance_locked[instance] -= 1;
867 #endif
868 #if CWDEBUG_DEBUGT
869  if (instance > end_recursive_types || instance_locked[instance] == 0)
870  {
871  locked_by[instance] = 0;
872  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::unlock(): locked_by[" << instance << "] was reset.");
873  }
874  else
875  {
876  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wrunlock(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
877  }
878 #endif
879  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::wrunlock()");
880  LibcwDebugThreads( --__libcwd_tsd.inside_critical_area) ;
881  if (instance < end_recursive_types)
882  S_writer_id = 0;
883  S_no_holders_condition.lock();
884  S_holders_count = 0; // We have no writer anymore.
885  S_no_holders_condition.signal(); // No readers and no writers left.
886  S_no_holders_condition.unlock();
887  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::wrunlock()");
888  }
889  static void rd2wrlock()
890  {
891 #if CWDEBUG_DEBUGT
892  LIBCWD_TSD_DECLARATION;
893 #endif
894  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
895  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::rd2wrlock()");
896 #if CWDEBUG_DEBUGT
897  __libcwd_tsd.waiting_for_lock = instance;
898 #endif
899  S_no_holders_condition.lock();
900  if (--S_holders_count > 0)
901  {
902  mutex_tct<readers_instance>::lock(); // Block new readers.
903  S_writer_is_waiting = true;
904  while (S_holders_count != 0)
905  S_no_holders_condition.wait();
906  S_writer_is_waiting = false;
907  mutex_tct<readers_instance>::unlock(); // Release blocked readers.
908  }
909 #if CWDEBUG_DEBUGT
910  __libcwd_tsd.waiting_for_lock = 0;
911 #endif
912  S_holders_count = -1; // We are a writer now.
913  S_no_holders_condition.unlock();
914  if (instance < end_recursive_types)
915  S_writer_id = pthread_self();
916 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
917 #if CWDEBUG_DEBUGT
918  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
919 #endif
920  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::rd2wrlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; incrementing it.");
921  instance_locked[instance] += 1;
922 #if CWDEBUG_DEBUGT
923  locked_by[instance] = pthread_self();
924  locked_from[instance] = __builtin_return_address(0);
925 #endif
926 #endif
927  LibcwDebugThreads(
928  if (__libcwd_tsd.instance_rdlocked[instance] == 2)
929  __libcwd_tsd.rdlocked_by2[instance] = 0;
930  else
931  __libcwd_tsd.rdlocked_by1[instance] = 0;
932  __libcwd_tsd.instance_rdlocked[instance] -= 1;
933  );
934  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::rd2wrlock()");
935  }
936  static void wr2rdlock()
937  {
938 #if CWDEBUG_DEBUGT
939  LIBCWD_TSD_DECLARATION;
940 #endif
941  LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED;
942 #if CWDEBUG_DEBUG || CWDEBUG_DEBUGT
943  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wr2rdlock(): instance_locked[" << instance << "] == " << instance_locked[instance] << "; decrementing it.");
944 #if CWDEBUG_DEBUGT
945  LIBCWD_ASSERT( instance_locked[instance] > 0 && locked_by[instance] == pthread_self() );
946 #endif
947  instance_locked[instance] -= 1;
948 #if CWDEBUG_DEBUGT
949  if (instance > end_recursive_types || instance_locked[instance] == 0)
950  {
951  locked_by[instance] = 0;
952  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wr2rdlock(): locked_by[" << instance << "] was reset.");
953  }
954  else
955  {
956  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": rwlock_tct::wr2rdlock(): locked_by[" << instance << "] was not reset, it still is " << locked_by[instance] << ".");
957  }
958 #endif
959 #endif
960  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Calling rwlock_tct<" << instance << ">::wr2rdlock()");
961  if (instance < end_recursive_types)
962  S_writer_id = 0;
963  S_no_holders_condition.lock();
964  S_holders_count = 1; // Turn writer into a reader (atomic operation).
965  S_no_holders_condition.signal();
966  S_no_holders_condition.unlock();
967  LibcwDebugThreads(
968  _private_::test_for_deadlock(instance, __libcwd_tsd, __builtin_return_address(0));
969  if (instance >= instance_rdlocked_size)
970  core_dump();
971  __libcwd_tsd.instance_rdlocked[instance] += 1;
972  if (__libcwd_tsd.instance_rdlocked[instance] == 1)
973  {
974  __libcwd_tsd.rdlocked_by1[instance] = pthread_self();
975  __libcwd_tsd.rdlocked_from1[instance] = __builtin_return_address(0);
976  }
977  else if (__libcwd_tsd.instance_rdlocked[instance] == 2)
978  {
979  __libcwd_tsd.rdlocked_by2[instance] = pthread_self();
980  __libcwd_tsd.rdlocked_from2[instance] = __builtin_return_address(0);
981  }
982  else
983  core_dump();
984  );
985  LIBCWD_DEBUGDEBUGRWLOCK_CERR(pthread_self() << ": Leaving rwlock_tct<" << instance << ">::wr2rdlock()");
986  }
987  // This is used as cleanup handler with LIBCWD_DEFER_CLEANUP_PUSH.
988  static void cleanup(void*);
989  };
990 
991 template <int instance>
992  int rwlock_tct<instance>::S_holders_count = 0;
993 
994 template <int instance>
995  bool volatile rwlock_tct<instance>::S_writer_is_waiting = 0;
996 
997 template <int instance>
998  pthread_t rwlock_tct<instance>::S_writer_id = 0;
999 
1000 #if CWDEBUG_DEBUGT || !LIBCWD_USE_LINUXTHREADS
1001 template <int instance>
1002  bool rwlock_tct<instance>::S_initialized = 0;
1003 #endif
1004 
1005 template <int instance>
1006  typename rwlock_tct<instance>::cond_t rwlock_tct<instance>::S_no_holders_condition;
1007 
1008 template <int instance>
1009  void rwlock_tct<instance>::cleanup(void*)
1010  {
1011  if (S_holders_count == -1)
1012  wrunlock();
1013  else
1014  rdunlock();
1015  }
1016 
1017 extern void fatal_cancellation(void*);
1018 
1019  } // namespace _private_
1020 } // namespace libcwd
1021 
1022 #else // !LIBCWD_THREAD_SAFE
1023 #define LIBCWD_DISABLE_CANCEL
1024 #define LIBCWD_DISABLE_CANCEL_NO_BRACE
1025 #define LIBCWD_ENABLE_CANCEL_NO_BRACE
1026 #define LIBCWD_ENABLE_CANCEL
1027 #define LIBCWD_DEFER_CANCEL
1028 #define LIBCWD_DEFER_CANCEL_NO_BRACE
1029 #define LIBCWD_RESTORE_CANCEL_NO_BRACE
1030 #define LIBCWD_RESTORE_CANCEL
1031 #define LIBCWD_DEFER_CLEANUP_PUSH(routine, arg)
1032 #define LIBCWD_CLEANUP_POP_RESTORE(execute)
1033 #define LIBCWD_PUSH_DEFER_TRYLOCK_MUTEX(instance, unlock_routine)
1034 #define LIBCWD_DEFER_PUSH_LOCKMUTEX(instance, unlock_routine)
1035 #define LIBCWD_UNLOCKMUTEX_POP_RESTORE(instance)
1036 #define LIBCWD_DEBUGDEBUG_ASSERT_CANCEL_DEFERRED
1037 #endif // LIBCWD_THREAD_SAFE
1038 #endif // LIBCWD_PRIVATE_THREADING_H
1039 
void core_dump()
Dump core of current thread.
Definition: debug.cc:806
namespace for libcwd.
Definition: debug.cc:87
std::ostream & operator<<(std::ostream &os, memblk_types_nt memblk_type)
Allow writing a memblk_types_nt directly to an ostream.
Definition: debugmalloc.cc:688
Copyright © 2001 - 2004 Carlo Wood.  All rights reserved.