Line | % of fetches | Source |
---|---|---|
1 | // <mutex> -*- C++ -*- | |
2 | ||
3 | // Copyright (C) 2003-2015 Free Software Foundation, Inc. | |
4 | // | |
5 | // This file is part of the GNU ISO C++ Library. This library is free | |
6 | // software; you can redistribute it and/or modify it under the | |
7 | // terms of the GNU General Public License as published by the | |
8 | // Free Software Foundation; either version 3, or (at your option) | |
9 | // any later version. | |
10 | ||
11 | // This library is distributed in the hope that it will be useful, | |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | // GNU General Public License for more details. | |
15 | ||
16 | // Under Section 7 of GPL version 3, you are granted additional | |
17 | // permissions described in the GCC Runtime Library Exception, version | |
18 | // 3.1, as published by the Free Software Foundation. | |
19 | ||
20 | // You should have received a copy of the GNU General Public License and | |
21 | // a copy of the GCC Runtime Library Exception along with this program; | |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | // <http://www.gnu.org/licenses/>. | |
24 | ||
25 | /** @file include/mutex | |
26 | * This is a Standard C++ Library header. | |
27 | */ | |
28 | ||
29 | #ifndef _GLIBCXX_MUTEX | |
30 | #define _GLIBCXX_MUTEX 1 | |
31 | ||
32 | #pragma GCC system_header | |
33 | ||
34 | #if __cplusplus < 201103L | |
35 | # include <bits/c++0x_warning.h> | |
36 | #else | |
37 | ||
38 | #include <tuple> | |
39 | #include <chrono> | |
40 | #include <exception> | |
41 | #include <type_traits> | |
42 | #include <functional> | |
43 | #include <system_error> | |
44 | #include <bits/functexcept.h> | |
45 | #include <bits/gthr.h> | |
46 | #include <bits/move.h> // for std::swap | |
47 | #include <bits/cxxabi_forced.h> | |
48 | ||
49 | #ifdef _GLIBCXX_USE_C99_STDINT_TR1 | |
50 | ||
51 | namespace std _GLIBCXX_VISIBILITY(default) | |
52 | { | |
53 | _GLIBCXX_BEGIN_NAMESPACE_VERSION | |
54 | ||
55 | #ifdef _GLIBCXX_HAS_GTHREADS | |
56 | // Common base class for std::mutex and std::timed_mutex | |
57 | class __mutex_base | |
58 | { | |
59 | protected: | |
60 | typedef __gthread_mutex_t __native_type; | |
61 | ||
62 | #ifdef __GTHREAD_MUTEX_INIT | |
63 | __native_type _M_mutex = __GTHREAD_MUTEX_INIT; | |
64 | ||
65 | constexpr __mutex_base() noexcept = default; | |
66 | #else | |
67 | __native_type _M_mutex; | |
68 | ||
69 | __mutex_base() noexcept | |
70 | { | |
71 | // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may) | |
72 | __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex); | |
73 | } | |
74 | ||
75 | ~__mutex_base() noexcept { __gthread_mutex_destroy(&_M_mutex); } | |
76 | #endif | |
77 | ||
78 | __mutex_base(const __mutex_base&) = delete; | |
79 | __mutex_base& operator=(const __mutex_base&) = delete; | |
80 | }; | |
81 | ||
82 | // Common base class for std::recursive_mutex and std::recursive_timed_mutex | |
83 | class __recursive_mutex_base | |
84 | { | |
85 | protected: | |
86 | typedef __gthread_recursive_mutex_t __native_type; | |
87 | ||
88 | __recursive_mutex_base(const __recursive_mutex_base&) = delete; | |
89 | __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete; | |
90 | ||
91 | #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT | |
92 | __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT; | |
93 | ||
94 | __recursive_mutex_base() = default; | |
95 | #else | |
96 | __native_type _M_mutex; | |
97 | ||
98 | __recursive_mutex_base() | |
99 | { | |
100 | // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may) | |
101 | __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex); | |
102 | } | |
103 | ||
104 | ~__recursive_mutex_base() | |
105 | { __gthread_recursive_mutex_destroy(&_M_mutex); } | |
106 | #endif | |
107 | }; | |
108 | ||
109 | /** | |
110 | * @defgroup mutexes Mutexes | |
111 | * @ingroup concurrency | |
112 | * | |
113 | * Classes for mutex support. | |
114 | * @{ | |
115 | */ | |
116 | ||
117 | /// mutex | |
118 | class mutex : private __mutex_base | |
119 | { | |
120 | public: | |
121 | typedef __native_type* native_handle_type; | |
122 | ||
123 | #ifdef __GTHREAD_MUTEX_INIT | |
124 | constexpr | |
125 | #endif | |
126 | mutex() noexcept = default; | |
127 | ~mutex() = default; | |
128 | ||
129 | mutex(const mutex&) = delete; | |
130 | mutex& operator=(const mutex&) = delete; | |
131 | ||
132 | void | |
133 | lock() | |
134 | { | |
135 | int __e = __gthread_mutex_lock(&_M_mutex); | |
136 | ||
137 | // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) | |
138 | if (__e) | |
139 | __throw_system_error(__e); | |
140 | } | |
141 | ||
142 | bool | |
143 | try_lock() noexcept | |
144 | { | |
145 | // XXX EINVAL, EAGAIN, EBUSY | |
146 | return !__gthread_mutex_trylock(&_M_mutex); | |
147 | } | |
148 | ||
149 | void | |
150 | unlock() | |
151 | { | |
152 | // XXX EINVAL, EAGAIN, EPERM | |
153 | __gthread_mutex_unlock(&_M_mutex); | |
154 | } | |
155 | ||
156 | native_handle_type | |
157 | native_handle() | |
158 | { return &_M_mutex; } | |
159 | }; | |
160 | ||
161 | /// recursive_mutex | |
162 | class recursive_mutex : private __recursive_mutex_base | |
163 | { | |
164 | public: | |
165 | typedef __native_type* native_handle_type; | |
166 | ||
167 | recursive_mutex() = default; | |
168 | ~recursive_mutex() = default; | |
169 | ||
170 | recursive_mutex(const recursive_mutex&) = delete; | |
171 | recursive_mutex& operator=(const recursive_mutex&) = delete; | |
172 | ||
173 | void | |
174 | lock() | |
175 | { | |
176 | int __e = __gthread_recursive_mutex_lock(&_M_mutex); | |
177 | ||
178 | // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) | |
179 | if (__e) | |
180 | __throw_system_error(__e); | |
181 | } | |
182 | ||
183 | bool | |
184 | try_lock() noexcept | |
185 | { | |
186 | // XXX EINVAL, EAGAIN, EBUSY | |
187 | return !__gthread_recursive_mutex_trylock(&_M_mutex); | |
188 | } | |
189 | ||
190 | void | |
191 | unlock() | |
192 | { | |
193 | // XXX EINVAL, EAGAIN, EBUSY | |
194 | __gthread_recursive_mutex_unlock(&_M_mutex); | |
195 | } | |
196 | ||
197 | native_handle_type | |
198 | native_handle() | |
199 | { return &_M_mutex; } | |
200 | }; | |
201 | ||
202 | #if _GTHREAD_USE_MUTEX_TIMEDLOCK | |
203 | template<typename _Derived> | |
204 | class __timed_mutex_impl | |
205 | { | |
206 | protected: | |
207 | typedef chrono::high_resolution_clock __clock_t; | |
208 | ||
209 | template<typename _Rep, typename _Period> | |
210 | bool | |
211 | _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) | |
212 | { | |
213 | using chrono::steady_clock; | |
214 | auto __rt = chrono::duration_cast<steady_clock::duration>(__rtime); | |
215 | if (ratio_greater<steady_clock::period, _Period>()) | |
216 | ++__rt; | |
217 | return _M_try_lock_until(steady_clock::now() + __rt); | |
218 | } | |
219 | ||
220 | template<typename _Duration> | |
221 | bool | |
222 | _M_try_lock_until(const chrono::time_point<__clock_t, | |
223 | _Duration>& __atime) | |
224 | { | |
225 | auto __s = chrono::time_point_cast<chrono::seconds>(__atime); | |
226 | auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s); | |
227 | ||
228 | __gthread_time_t __ts = { | |
229 | static_cast<std::time_t>(__s.time_since_epoch().count()), | |
230 | static_cast<long>(__ns.count()) | |
231 | }; | |
232 | ||
233 | auto __mutex = static_cast<_Derived*>(this)->native_handle(); | |
234 | return !__gthread_mutex_timedlock(__mutex, &__ts); | |
235 | } | |
236 | ||
237 | template<typename _Clock, typename _Duration> | |
238 | bool | |
239 | _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) | |
240 | { | |
241 | auto __rtime = __atime - _Clock::now(); | |
242 | return _M_try_lock_until(__clock_t::now() + __rtime); | |
243 | } | |
244 | }; | |
245 | ||
246 | /// timed_mutex | |
247 | class timed_mutex | |
248 | : private __mutex_base, public __timed_mutex_impl<timed_mutex> | |
249 | { | |
250 | public: | |
251 | typedef __native_type* native_handle_type; | |
252 | ||
253 | timed_mutex() = default; | |
254 | ~timed_mutex() = default; | |
255 | ||
256 | timed_mutex(const timed_mutex&) = delete; | |
257 | timed_mutex& operator=(const timed_mutex&) = delete; | |
258 | ||
259 | void | |
260 | lock() | |
261 | { | |
262 | int __e = __gthread_mutex_lock(&_M_mutex); | |
263 | ||
264 | // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) | |
265 | if (__e) | |
266 | __throw_system_error(__e); | |
267 | } | |
268 | ||
269 | bool | |
270 | try_lock() noexcept | |
271 | { | |
272 | // XXX EINVAL, EAGAIN, EBUSY | |
273 | return !__gthread_mutex_trylock(&_M_mutex); | |
274 | } | |
275 | ||
276 | template <class _Rep, class _Period> | |
277 | bool | |
278 | try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) | |
279 | { return _M_try_lock_for(__rtime); } | |
280 | ||
281 | template <class _Clock, class _Duration> | |
282 | bool | |
283 | try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) | |
284 | { return _M_try_lock_until(__atime); } | |
285 | ||
286 | void | |
287 | unlock() | |
288 | { | |
289 | // XXX EINVAL, EAGAIN, EBUSY | |
290 | __gthread_mutex_unlock(&_M_mutex); | |
291 | } | |
292 | ||
293 | native_handle_type | |
294 | native_handle() | |
295 | { return &_M_mutex; } | |
296 | }; | |
297 | ||
298 | /// recursive_timed_mutex | |
299 | class recursive_timed_mutex | |
300 | : private __recursive_mutex_base, | |
301 | public __timed_mutex_impl<recursive_timed_mutex> | |
302 | { | |
303 | public: | |
304 | typedef __native_type* native_handle_type; | |
305 | ||
306 | recursive_timed_mutex() = default; | |
307 | ~recursive_timed_mutex() = default; | |
308 | ||
309 | recursive_timed_mutex(const recursive_timed_mutex&) = delete; | |
310 | recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; | |
311 | ||
312 | void | |
313 | lock() | |
314 | { | |
315 | int __e = __gthread_recursive_mutex_lock(&_M_mutex); | |
316 | ||
317 | // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) | |
318 | if (__e) | |
319 | __throw_system_error(__e); | |
320 | } | |
321 | ||
322 | bool | |
323 | try_lock() noexcept | |
324 | { | |
325 | // XXX EINVAL, EAGAIN, EBUSY | |
326 | return !__gthread_recursive_mutex_trylock(&_M_mutex); | |
327 | } | |
328 | ||
329 | template <class _Rep, class _Period> | |
330 | bool | |
331 | try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) | |
332 | { return _M_try_lock_for(__rtime); } | |
333 | ||
334 | template <class _Clock, class _Duration> | |
335 | bool | |
336 | try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) | |
337 | { return _M_try_lock_until(__atime); } | |
338 | ||
339 | void | |
340 | unlock() | |
341 | { | |
342 | // XXX EINVAL, EAGAIN, EBUSY | |
343 | __gthread_recursive_mutex_unlock(&_M_mutex); | |
344 | } | |
345 | ||
346 | native_handle_type | |
347 | native_handle() | |
348 | { return &_M_mutex; } | |
349 | }; | |
350 | #endif | |
351 | #endif // _GLIBCXX_HAS_GTHREADS | |
352 | ||
353 | /// Do not acquire ownership of the mutex. | |
354 | struct defer_lock_t { }; | |
355 | ||
356 | /// Try to acquire ownership of the mutex without blocking. | |
357 | struct try_to_lock_t { }; | |
358 | ||
359 | /// Assume the calling thread has already obtained mutex ownership | |
360 | /// and manage it. | |
361 | struct adopt_lock_t { }; | |
362 | ||
363 | constexpr defer_lock_t defer_lock { }; | |
364 | constexpr try_to_lock_t try_to_lock { }; | |
365 | constexpr adopt_lock_t adopt_lock { }; | |
366 | ||
367 | /// @brief Scoped lock idiom. | |
368 | // Acquire the mutex here with a constructor call, then release with | |
369 | // the destructor call in accordance with RAII style. | |
370 | template<typename _Mutex> | |
371 | class lock_guard | |
372 | { | |
373 | public: | |
374 | typedef _Mutex mutex_type; | |
375 | ||
376 | explicit lock_guard(mutex_type& __m) : _M_device(__m) | |
377 | { _M_device.lock(); } | |
378 | ||
379 | lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m) | |
380 | { } // calling thread owns mutex | |
381 | ||
382 | ~lock_guard() | |
383 | { _M_device.unlock(); } | |
384 | ||
385 | lock_guard(const lock_guard&) = delete; | |
386 | lock_guard& operator=(const lock_guard&) = delete; | |
387 | ||
388 | private: | |
389 | mutex_type& _M_device; | |
390 | }; | |
391 | ||
392 | /// unique_lock | |
393 | template<typename _Mutex> | |
394 | class unique_lock | |
395 | { | |
396 | public: | |
397 | typedef _Mutex mutex_type; | |
398 | ||
399 | unique_lock() noexcept | |
400 | : _M_device(0), _M_owns(false) | |
401 | { } | |
402 | ||
403 | explicit unique_lock(mutex_type& __m) | |
404 | : _M_device(std::__addressof(__m)), _M_owns(false) | |
405 | { | |
406 | lock(); | |
407 | _M_owns = true; | |
408 | } | |
409 | ||
410 | unique_lock(mutex_type& __m, defer_lock_t) noexcept | |
411 | : _M_device(std::__addressof(__m)), _M_owns(false) | |
412 | { } | |
413 | ||
414 | unique_lock(mutex_type& __m, try_to_lock_t) | |
415 | : _M_device(std::__addressof(__m)), _M_owns(_M_device->try_lock()) | |
416 | { } | |
417 | ||
418 | unique_lock(mutex_type& __m, adopt_lock_t) | |
419 | : _M_device(std::__addressof(__m)), _M_owns(true) | |
420 | { | |
421 | // XXX calling thread owns mutex | |
422 | } | |
423 | ||
424 | template<typename _Clock, typename _Duration> | |
425 | unique_lock(mutex_type& __m, | |
426 | const chrono::time_point<_Clock, _Duration>& __atime) | |
427 | : _M_device(std::__addressof(__m)), | |
428 | _M_owns(_M_device->try_lock_until(__atime)) | |
429 | { } | |
430 | ||
431 | template<typename _Rep, typename _Period> | |
432 | unique_lock(mutex_type& __m, | |
433 | const chrono::duration<_Rep, _Period>& __rtime) | |
434 | : _M_device(std::__addressof(__m)), | |
435 | _M_owns(_M_device->try_lock_for(__rtime)) | |
436 | { } | |
437 | ||
438 | ~unique_lock() | |
439 | { | |
440 | if (_M_owns) | |
441 | unlock(); | |
442 | } | |
443 | ||
444 | unique_lock(const unique_lock&) = delete; | |
445 | unique_lock& operator=(const unique_lock&) = delete; | |
446 | ||
447 | unique_lock(unique_lock&& __u) noexcept | |
448 | : _M_device(__u._M_device), _M_owns(__u._M_owns) | |
449 | { | |
450 | __u._M_device = 0; | |
451 | __u._M_owns = false; | |
452 | } | |
453 | ||
454 | unique_lock& operator=(unique_lock&& __u) noexcept | |
455 | { | |
456 | if(_M_owns) | |
457 | unlock(); | |
458 | ||
459 | unique_lock(std::move(__u)).swap(*this); | |
460 | ||
461 | __u._M_device = 0; | |
462 | __u._M_owns = false; | |
463 | ||
464 | return *this; | |
465 | } | |
466 | ||
467 | void | |
468 | lock() | |
469 | { | |
470 | if (!_M_device) | |
471 | __throw_system_error(int(errc::operation_not_permitted)); | |
472 | else if (_M_owns) | |
473 | __throw_system_error(int(errc::resource_deadlock_would_occur)); | |
474 | else | |
475 | { | |
476 | _M_device->lock(); | |
477 | _M_owns = true; | |
478 | } | |
479 | } | |
480 | ||
481 | bool | |
482 | try_lock() | |
483 | { | |
484 | if (!_M_device) | |
485 | __throw_system_error(int(errc::operation_not_permitted)); | |
486 | else if (_M_owns) | |
487 | __throw_system_error(int(errc::resource_deadlock_would_occur)); | |
488 | else | |
489 | { | |
490 | _M_owns = _M_device->try_lock(); | |
491 | return _M_owns; | |
492 | } | |
493 | } | |
494 | ||
495 | template<typename _Clock, typename _Duration> | |
496 | bool | |
497 | try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) | |
498 | { | |
499 | if (!_M_device) | |
500 | __throw_system_error(int(errc::operation_not_permitted)); | |
501 | else if (_M_owns) | |
502 | __throw_system_error(int(errc::resource_deadlock_would_occur)); | |
503 | else | |
504 | { | |
505 | _M_owns = _M_device->try_lock_until(__atime); | |
506 | return _M_owns; | |
507 | } | |
508 | } | |
509 | ||
510 | template<typename _Rep, typename _Period> | |
511 | bool | |
512 | try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) | |
513 | { | |
514 | if (!_M_device) | |
515 | __throw_system_error(int(errc::operation_not_permitted)); | |
516 | else if (_M_owns) | |
517 | __throw_system_error(int(errc::resource_deadlock_would_occur)); | |
518 | else | |
519 | { | |
520 | _M_owns = _M_device->try_lock_for(__rtime); | |
521 | return _M_owns; | |
522 | } | |
523 | } | |
524 | ||
525 | void | |
526 | unlock() | |
527 | { | |
528 | if (!_M_owns) | |
529 | __throw_system_error(int(errc::operation_not_permitted)); | |
530 | else if (_M_device) | |
531 | { | |
532 | _M_device->unlock(); | |
533 | _M_owns = false; | |
534 | } | |
535 | } | |
536 | ||
537 | void | |
538 | swap(unique_lock& __u) noexcept | |
539 | { | |
540 | std::swap(_M_device, __u._M_device); | |
541 | std::swap(_M_owns, __u._M_owns); | |
542 | } | |
543 | ||
544 | mutex_type* | |
545 | release() noexcept | |
546 | { | |
547 | mutex_type* __ret = _M_device; | |
548 | _M_device = 0; | |
549 | _M_owns = false; | |
550 | return __ret; | |
551 | } | |
552 | ||
553 | bool | |
554 | owns_lock() const noexcept | |
555 | { return _M_owns; } | |
556 | ||
557 | explicit operator bool() const noexcept | |
558 | { return owns_lock(); } | |
559 | ||
560 | mutex_type* | |
561 | mutex() const noexcept | |
562 | { return _M_device; } | |
563 | ||
564 | private: | |
565 | mutex_type* _M_device; | |
566 | bool _M_owns; // XXX use atomic_bool | |
567 | }; | |
568 | ||
569 | /// Swap overload for unique_lock objects. | |
570 | template<typename _Mutex> | |
571 | inline void | |
572 | swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept | |
573 | { __x.swap(__y); } | |
574 | ||
575 | template<typename _Lock> | |
576 | inline unique_lock<_Lock> | |
577 | __try_to_lock(_Lock& __l) | |
578 | { return unique_lock<_Lock>{__l, try_to_lock}; } | |
579 | ||
580 | template<int _Idx, bool _Continue = true> | |
581 | struct __try_lock_impl | |
582 | { | |
583 | template<typename... _Lock> | |
584 | static void | |
585 | __do_try_lock(tuple<_Lock&...>& __locks, int& __idx) | |
586 | { | |
587 | __idx = _Idx; | |
588 | auto __lock = std::__try_to_lock(std::get<_Idx>(__locks)); | |
589 | if (__lock.owns_lock()) | |
590 | { | |
591 | constexpr bool __cont = _Idx + 2 < sizeof...(_Lock); | |
592 | using __try_locker = __try_lock_impl<_Idx + 1, __cont>; | |
593 | __try_locker::__do_try_lock(__locks, __idx); | |
594 | if (__idx == -1) | |
595 | __lock.release(); | |
596 | } | |
597 | } | |
598 | }; | |
599 | ||
600 | template<int _Idx> | |
601 | struct __try_lock_impl<_Idx, false> | |
602 | { | |
603 | template<typename... _Lock> | |
604 | static void | |
605 | __do_try_lock(tuple<_Lock&...>& __locks, int& __idx) | |
606 | { | |
607 | __idx = _Idx; | |
608 | auto __lock = std::__try_to_lock(std::get<_Idx>(__locks)); | |
609 | if (__lock.owns_lock()) | |
610 | { | |
611 | __idx = -1; | |
612 | __lock.release(); | |
613 | } | |
614 | } | |
615 | }; | |
616 | ||
617 | /** @brief Generic try_lock. | |
618 | * @param __l1 Meets Mutex requirements (try_lock() may throw). | |
619 | * @param __l2 Meets Mutex requirements (try_lock() may throw). | |
620 | * @param __l3 Meets Mutex requirements (try_lock() may throw). | |
621 | * @return Returns -1 if all try_lock() calls return true. Otherwise returns | |
622 | * a 0-based index corresponding to the argument that returned false. | |
623 | * @post Either all arguments are locked, or none will be. | |
624 | * | |
625 | * Sequentially calls try_lock() on each argument. | |
626 | */ | |
627 | template<typename _Lock1, typename _Lock2, typename... _Lock3> | |
628 | int | |
629 | try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3) | |
630 | { | |
631 | int __idx; | |
632 | auto __locks = std::tie(__l1, __l2, __l3...); | |
633 | __try_lock_impl<0>::__do_try_lock(__locks, __idx); | |
634 | return __idx; | |
635 | } | |
636 | ||
637 | /** @brief Generic lock. | |
638 | * @param __l1 Meets Mutex requirements (try_lock() may throw). | |
639 | * @param __l2 Meets Mutex requirements (try_lock() may throw). | |
640 | * @param __l3 Meets Mutex requirements (try_lock() may throw). | |
641 | * @throw An exception thrown by an argument's lock() or try_lock() member. | |
642 | * @post All arguments are locked. | |
643 | * | |
644 | * All arguments are locked via a sequence of calls to lock(), try_lock() | |
645 | * and unlock(). If the call exits via an exception any locks that were | |
646 | * obtained will be released. | |
647 | */ | |
648 | template<typename _L1, typename _L2, typename... _L3> | |
649 | void | |
650 | lock(_L1& __l1, _L2& __l2, _L3&... __l3) | |
651 | { | |
652 | while (true) | |
653 | { | |
654 | using __try_locker = __try_lock_impl<0, sizeof...(_L3) != 0>; | |
655 | unique_lock<_L1> __first(__l1); | |
656 | int __idx; | |
657 | auto __locks = std::tie(__l2, __l3...); | |
658 | __try_locker::__do_try_lock(__locks, __idx); | |
659 | if (__idx == -1) | |
660 | { | |
661 | __first.release(); | |
662 | return; | |
663 | } | |
664 | } | |
665 | } | |
666 | ||
667 | #ifdef _GLIBCXX_HAS_GTHREADS | |
668 | /// once_flag | |
669 | struct once_flag | |
670 | { | |
671 | private: | |
672 | typedef __gthread_once_t __native_type; | |
673 | __native_type _M_once = __GTHREAD_ONCE_INIT; | |
674 | ||
675 | public: | |
676 | /// Constructor | |
677 | constexpr once_flag() noexcept = default; | |
678 | ||
679 | /// Deleted copy constructor | |
680 | once_flag(const once_flag&) = delete; | |
681 | /// Deleted assignment operator | |
682 | once_flag& operator=(const once_flag&) = delete; | |
683 | ||
684 | template<typename _Callable, typename... _Args> | |
685 | friend void | |
686 | call_once(once_flag& __once, _Callable&& __f, _Args&&... __args); | |
687 | }; | |
688 | ||
689 | #ifdef _GLIBCXX_HAVE_TLS | |
690 | extern __thread void* __once_callable; | |
691 | extern __thread void (*__once_call)(); | |
692 | ||
693 | template<typename _Callable> | |
694 | inline void | |
695 | __once_call_impl() | |
696 | { | |
697 | (*(_Callable*)__once_callable)(); | |
698 | } | |
699 | #else | |
700 | extern function<void()> __once_functor; | |
701 | ||
702 | extern void | |
703 | __set_once_functor_lock_ptr(unique_lock<mutex>*); | |
704 | ||
705 | extern mutex& | |
706 | __get_once_mutex(); | |
707 | #endif | |
708 | ||
709 | extern "C" void __once_proxy(void); | |
710 | ||
711 | /// call_once | |
712 | template<typename _Callable, typename... _Args> | |
713 | void | |
714 | call_once(once_flag& __once, _Callable&& __f, _Args&&... __args) | |
715 | { | |
716 | #ifdef _GLIBCXX_HAVE_TLS | |
717 | auto __bound_functor = std::__bind_simple(std::forward<_Callable>(__f), | |
718 | std::forward<_Args>(__args)...); | |
719 | __once_callable = std::__addressof(__bound_functor); | |
720 | __once_call = &__once_call_impl<decltype(__bound_functor)>; | |
721 | #else | |
722 | unique_lock<mutex> __functor_lock(__get_once_mutex()); | |
723 | auto __callable = std::__bind_simple(std::forward<_Callable>(__f), | |
724 | std::forward<_Args>(__args)...); | |
725 | __once_functor = [&]() { __callable(); }; | |
726 | __set_once_functor_lock_ptr(&__functor_lock); | |
727 | #endif | |
728 | ||
729 | int __e = __gthread_once(&__once._M_once, &__once_proxy); | |
730 | ||
731 | #ifndef _GLIBCXX_HAVE_TLS | |
732 | if (__functor_lock) | |
733 | __set_once_functor_lock_ptr(0); | |
734 | #endif | |
735 | ||
736 | if (__e) | |
737 | __throw_system_error(__e); | |
738 | } | |
739 | #endif // _GLIBCXX_HAS_GTHREADS | |
740 | ||
741 | // @} group mutexes | |
742 | _GLIBCXX_END_NAMESPACE_VERSION | |
743 | } // namespace | |
744 | #endif // _GLIBCXX_USE_C99_STDINT_TR1 | |
745 | ||
746 | #endif // C++11 | |
747 | ||
748 | #endif // _GLIBCXX_MUTEX | |
749 |
Copyright (c) 2006-2012 Rogue Wave Software, Inc. All Rights Reserved.
Patents pending.