libjmmcg  release_579_6_g8cffd
A C++ library containing an eclectic mix of useful, advanced components.
thread_safe_adaptors.hpp
Go to the documentation of this file.
1 #ifndef LIBJMMCG_CORE_THREAD_SAFE_ADAPTORS_HPP
2 #define LIBJMMCG_CORE_THREAD_SAFE_ADAPTORS_HPP
3 
4 /******************************************************************************
5 ** Copyright © 2004 by J.M.McGuiness, coder@hussar.me.uk
6 **
7 ** This library is free software; you can redistribute it and/or
8 ** modify it under the terms of the GNU Lesser General Public
9 ** License as published by the Free Software Foundation; either
10 ** version 2.1 of the License, or (at your option) any later version.
11 **
12 ** This library is distributed in the hope that it will be useful,
13 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ** Lesser General Public License for more details.
16 **
17 ** You should have received a copy of the GNU Lesser General Public
18 ** License along with this library; if not, write to the Free Software
19 ** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21 
22 #include "exception.hpp"
23 
24 #include <boost/mpl/assert.hpp>
25 
26 namespace jmmcg { namespace LIBJMMCG_VER_NAMESPACE { namespace ppd {
27 
28  /// Atomically count the amount of work that there is to do, and provide access to the lock on the containing collection.
29  /**
30  This class adds a constant-time counter to the safe_colln or queue or funky_queue.
31  This uses standard locks.
32  \todo I suppose I could use some kind of enable_if to detect if the container has a size() member-method, and only use this if it doesn't. That's a micro-optimisation to do.
33  */
34  template<class Lk>
35  class no_signalling {
36  public:
37  typedef api_lock_traits<platform_api, sequential_mode>::anon_event_type atomic_t;
38  typedef Lk locker_type;
39  typedef typename locker_type::lock_traits lock_traits;
40 
41  /**
42  To assist in allowing compile-time computation of the algorithmic order of the threading model.
43  */
49  );
50 
51  constexpr no_signalling() noexcept(true) FORCE_INLINE
52  : lock_(), have_work_() {
53  }
54  explicit no_signalling(atomic_t &ev) noexcept(true) FORCE_INLINE
55  : lock_(), have_work_(&ev) {
56  assert(dynamic_cast<atomic_t *>(have_work_));
57  }
58  constexpr no_signalling(no_signalling const &s) noexcept(true) FORCE_INLINE
59  : lock_(), have_work_(s.have_work_) {
60  }
61 
62  atomic_t & __fastcall have_work() noexcept(true) FORCE_INLINE {
63  assert(dynamic_cast<atomic_t *>(have_work_));
64  return *have_work_;
65  }
66  locker_type & __fastcall locker() const noexcept(true) FORCE_INLINE {
67  assert(dynamic_cast<locker_type const *>(&lock_));
68  return lock_;
69  }
70  locker_type & __fastcall locker() noexcept(true) {
71  assert(dynamic_cast<locker_type *>(&lock_));
72  return lock_;
73  }
74 
75  void __fastcall add() noexcept(true) FORCE_INLINE {
76  if (have_work_) {
77  have_work_->set();
78  }
79  }
80  void __fastcall add(typename atomic_t::count_type const c) noexcept(true) FORCE_INLINE {
81  if (have_work_) {
82  for (typename atomic_t::count_type i=0; i<c; ++i) {
83  have_work_->set();
84  }
85  }
86  }
87  typename atomic_t::atomic_state_type __fastcall remove() noexcept(true) FORCE_INLINE {
88  if (have_work_) {
89  return have_work_->lock();
90  } else {
91  return atomic_t::lock_traits::atom_unset;
92  }
93  }
94  void __fastcall remove(typename atomic_t::count_type const c) noexcept(true) FORCE_INLINE {
95  if (have_work_) {
96  for (typename atomic_t::count_type i=0; i<c; ++i) {
97  have_work_->lock();
98  }
99  }
100  }
101  typename atomic_t::atomic_state_type __fastcall try_remove() noexcept(true) FORCE_INLINE {
102  assert(dynamic_cast<atomic_t *>(have_work_));
103  return have_work_->try_lock();
104  }
105  static constexpr void clear() noexcept(true) FORCE_INLINE {
106  }
107  static constexpr typename atomic_t::count_type __fastcall count() noexcept(true) FORCE_INLINE {
108  return 0;
109  }
110 
111  private:
112  mutable locker_type lock_;
113  atomic_t *have_work_;
114  };
115 
116  /// A flag to atomically signal if the container contains work or not and also count the amount of work that there is to do.
117  /**
118  This uses standard locks.
119  */
120  template<class Lk>
121  class signalling {
122  public:
123  typedef Lk atomic_t;
124  typedef typename atomic_t::lock_traits lock_traits;
126  typedef typename atomic_t::locker_type locker_type;
127 
128  /**
129  To assist in allowing compile-time computation of the algorithmic order of the threading model.
130  */
131  static constexpr generic_traits::memory_access_modes memory_access_mode=atomic_t::memory_access_mode;
132 
133  private:
134  atomic_t *have_work_;
135 
136  public:
137  constexpr __stdcall signalling() noexcept(true) FORCE_INLINE
138  : have_work_() {
139  }
140  explicit signalling(atomic_t &ev) noexcept(true) FORCE_INLINE
141  : have_work_(&ev) {
142  assert(dynamic_cast<atomic_t *>(have_work_));
143  }
144  __stdcall signalling(signalling const &s) noexcept(true) FORCE_INLINE
145  : have_work_(s.have_work_) {
146  assert(dynamic_cast<atomic_t *>(have_work_));
147  }
148 
149  signalling(signalling &&)=delete;
150  void operator=(signalling const &)=delete;
151  void operator=(signalling &&)=delete;
152 
153  constexpr atomic_t & __fastcall have_work() const noexcept(true) FORCE_INLINE {
154  assert(dynamic_cast<atomic_t *>(have_work_));
155  return *have_work_;
156  }
157  atomic_t & __fastcall have_work() noexcept(true) FORCE_INLINE {
158  assert(dynamic_cast<atomic_t *>(have_work_));
159  return *have_work_;
160  }
161  constexpr locker_type & __fastcall locker() const noexcept(true) FORCE_INLINE {
162  assert(dynamic_cast<atomic_t const *>(have_work_));
163  return have_work_->locker();
164  }
165  locker_type & __fastcall locker() noexcept(true) FORCE_INLINE {
166  assert(dynamic_cast<atomic_t *>(have_work_));
167  return have_work_->locker();
168  }
169 
170  void add() noexcept(false) FORCE_INLINE {
171  assert(dynamic_cast<atomic_t *>(have_work_));
172  typename lock_traits::atomic_state_type const ret=have_work_->set_nolk(atomic_t::states::new_work_arrived);
173  if (ret!=lock_traits::atom_set) {
174  throw exception_type(_T("Could not add more work to the atomic object."), info::function(__LINE__, __PRETTY_FUNCTION__, typeid(*this)), JMMCG_REVISION_HDR(_T(LIBJMMCG_VERSION_NUMBER)));
175  }
176  }
177  void __fastcall add(typename atomic_t::count_type const c) noexcept(false) FORCE_INLINE {
178  for (typename atomic_t::count_type i=0; i<c; ++i) {
179  add();
180  }
181  }
182  typename atomic_t::lock_result_type __fastcall remove() noexcept(noexcept(have_work_->lock(0))) FORCE_INLINE;
183  void __fastcall remove(typename atomic_t::count_type const c) noexcept(false) FORCE_INLINE;
184  typename atomic_t::lock_result_type __fastcall try_remove() noexcept(noexcept(have_work_->try_lock())) FORCE_INLINE;
185  void clear() noexcept(noexcept(have_work_->clear())) FORCE_INLINE;
186  typename atomic_t::count_type __fastcall count() const noexcept(noexcept(have_work_->count())) FORCE_INLINE;
187  };
188 
189  /// An adaptor for a container that attempts to add some thread-safety to assist in making thread-safe programs.
190  /**
191  By default the adapted container does not use a read-write lock.
192  Note that if the read_lock_type and write_lock_types are the same, i.e. an exclusive lock were used, then the adaptor will exhibit EREW semantics. If a read-writer lock is used for them ,then it will exhibit CREW semantics.
193 
194  \see queue
195  */
196  template<
197  typename C,
198  typename M,
199  typename WL=typename M::write_lock_type,
200  class Sig=no_signalling<M>,
201  class MLk=typename lock::any_order::all<M::lock_traits::api_type, typename M::lock_traits::model_type, M, M>
202  >
203  class safe_colln : private C ///< We want to be able to pass it as a "C (container)", but we also don't want to expose the unprotected, base functionality.
204  {
205  public:
206  typedef C container_type; ///< The container to be adapted.
207  typedef Sig have_work_type; ///< Used to enable functionality to atomically signal if the container contains work or not.
208  typedef M atomic_t; ///< The underlying lock object to use that will be locked in some (EREW or CREW or other) manner.
209  typedef WL write_lock_type; ///< The type of write-lock to use. This allows the possibility of using a read-write lock.
210  typedef typename atomic_t::read_lock_type read_lock_type; ///< The type of read lock to use, by default the write lock. This allows the possibility of using a read-write lock.
211  typedef MLk lock_all_type; ///< The multi-lock type to use to ensured that operations on combined safe_collns are thread-safe. Note that locking them in any order reduces the likelihood of deadlock at the cost of performance.
212  typedef typename atomic_t::lock_traits lock_traits;
213  typedef api_threading_traits<lock_traits::api_type, typename lock_traits::model_type> thread_traits;
214  typedef typename container_type::reference reference;
216  typedef typename container_type::size_type size_type;
219  using exit_requested_type=typename have_work_type::atomic_t;
220 
221  /**
222  To assist in allowing compile-time computation of the algorithmic order of the threading model.
223  */
228 // TODO: some do and some don't have this as a member... && value_type::memory_access_mode==generic_traits::memory_access_modes::crew_memory_access
232  );
233 
234  BOOST_MPL_ASSERT((std::is_same<typename write_lock_type::atomic_t, atomic_t>));
235 
236  /// A flag to atomically signal if the container contains work or not, how much work and the underlying lock, to assist in writing thread-safe code.
238 
239  /// A flag to atomically signal if the container contains work or not, how much work and the underlying lock, to assist in writing thread-safe code.
240  atomic_t & __fastcall pop_lock() noexcept(true) FORCE_INLINE {
241  return have_work.locker();
242  }
243  /// A flag to atomically signal if the container contains work or not, how much work and the underlying lock, to assist in writing thread-safe code.
244  atomic_t & __fastcall pop_lock() const noexcept(true) FORCE_INLINE {
245  return have_work.locker();
246  }
247  /// A flag to atomically signal if the container contains work or not, how much work and the underlying lock, to assist in writing thread-safe code.
248  atomic_t & __fastcall push_lock() noexcept(true) FORCE_INLINE {
249  return have_work.locker();
250  }
251  /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
252  atomic_t & __fastcall push_lock() const noexcept(true) FORCE_INLINE {
253  return have_work.locker();
254  }
255 
256  __stdcall safe_colln() noexcept(noexcept(container_type()) && noexcept(have_work_type())) FORCE_INLINE;
257  explicit safe_colln(typename have_work_type::atomic_t &) FORCE_INLINE;
258  explicit safe_colln(std::initializer_list<value_type>) FORCE_INLINE;
259  explicit __stdcall safe_colln(size_type const sz, value_type const &v=value_type()) FORCE_INLINE;
260  template<class T1, class T2>
261  __stdcall FORCE_INLINE
262  safe_colln(size_type const sz, T1 const &, T2 const &);
263  explicit __stdcall safe_colln(const container_type &) FORCE_INLINE;
264  __stdcall safe_colln(const safe_colln &) noexcept(false) FORCE_INLINE;
265  __stdcall ~safe_colln() FORCE_INLINE;
266  safe_colln & __fastcall operator=(const safe_colln &) noexcept(false) FORCE_INLINE;
267 
268  bool __fastcall empty() const noexcept(true) FORCE_INLINE;
269  size_type __fastcall sync_size() const noexcept(false) FORCE_INLINE;
270  size_type __fastcall size() const noexcept(true) FORCE_INLINE;
271 
272  value_type __fastcall operator[](size_type s) const noexcept(false) FORCE_INLINE;
273 
274  void __fastcall push_back(value_type const &v) noexcept(false) FORCE_INLINE;
275  void __fastcall push_back(value_type &&v) noexcept(false) FORCE_INLINE;
276 
277  void __fastcall push_front(const value_type &v) noexcept(false) FORCE_INLINE;
278 
279  void __fastcall push_front(value_type &&v) noexcept(false) FORCE_INLINE;
280 
281  size_type __fastcall erase(const value_type &v) noexcept(false) FORCE_INLINE;
282 
283  void __fastcall reserve(size_type sz) noexcept(false) FORCE_INLINE;
284 
285  void __fastcall clear() noexcept(false) FORCE_INLINE;
286 
287  void __fastcall swap(safe_colln &t) noexcept(false) FORCE_INLINE;
288 
289  /// Resize the container to the requested size, but try to minimise (re-)initialising or deleting any of the existing elements.
290  /**
291  Current C++03 & C++11 containers have an implicit sequential order of initialisation or re-initialisation of the elements they contain. This enforces a O(n) complexity on resize(). To minimise this (re-)initialisation of existing elements, only initialise new elements added to the container, or delete the excess.
292 
293  \todo Ideally I want to have an "uninitialized resize()" (reserve() does not set the size), so that I can initialise the elements of the container in the order I wish, using a parallel fill_n() for example.
294 
295  \see resize(), reserve()
296  */
297  void __fastcall resize_noinit_nolk(const size_type sz) noexcept(false) FORCE_INLINE;
298 
299  /// Resize the container to the requested size.
300  /**
301  \see resize_noinit_nolk(), resize(), reserve()
302  */
303  void __fastcall resize(const size_type sz) noexcept(false) FORCE_INLINE;
304 
305  bool __fastcall operator==(safe_colln const &) const noexcept(true) FORCE_INLINE;
306  template<typename M1, typename WL1, class Sig1, class MLk1>
307  bool __fastcall FORCE_INLINE
308  operator==(safe_colln<C, M1, WL1, Sig1, MLk1> const &) const noexcept(true);
309 
310  container_type const &colln() const noexcept(true) FORCE_INLINE {
311  return static_cast<container_type const &>(*this);
312  }
313  container_type &colln() noexcept(true) FORCE_INLINE {
314  return static_cast<container_type &>(*this);
315  }
316 
317  };
318 
319  /// An adaptor to add thread-safety assistance, specifically for queues.
320  /**
321  Note that this adaptor relies on the standardised behaviour of a sequence (or an adaptor thereof) with respect to invalidating iterators when items are added to or removed from removed from the container. Basically only std::list is guaranteed to satisfy these requirements, but std::queue often does, but that is implementation-dependent.
322  This queue operates with one big fat lock.
323  The iterators are not exposed to assist with writing thread-safe code.
324  Note that if the read_lock_type and write_lock_types are the same, i.e. an exclusive lock were used, then the adaptor will exhibit EREW semantics. If a read-writer lock is used for them, then it will exhibit CREW semantics.
325 
326  \see safe_colln, funky_queue
327  */
328  template<
329  typename QT,
330  typename M,
331  typename WL=typename M::write_lock_type,
332  class Sig=no_signalling<M>, ///< \todo Should be a template type to ensure that M is a unique type.
333  class ValRet=typename QT::value_type,
334  class MLk=typename lock::any_order::all<M::lock_traits::api_type, typename M::lock_traits::model_type, M, M>
335  >
336  class queue : protected QT {
337  public:
338  typedef QT container_type; ///< The queue to be adapted, usually std::list or std::queue.
339  typedef Sig have_work_type; ///< Used to enable functionality to atomically signal if the container contains work or not.
340  typedef M atomic_t; ///< The underlying lock object to use.
341  typedef WL write_lock_type; ///< The type of write-lock to use. This allows the possibility of using a read-write lock.
342  typedef typename atomic_t::read_lock_type read_lock_type; ///< The type of read lock to use, by default the write lock. This allows the possibility of using a read-write lock.
343  typedef MLk lock_all_type; ///< The multi-lock type to use to ensured that operations on combined queues are thread-safe. Note that locking them in any order reduces the likelihood of deadlock at the cost of performance.
345  typedef api_threading_traits<lock_traits::api_type, typename lock_traits::model_type> thread_traits;
346  typedef typename container_type::reference reference;
348  typedef typename container_type::size_type size_type;
350  typedef ValRet value_ret_type; ///< The type to return when removing items from the queue.
352 
353  /**
354  To assist in allowing compile-time computation of the algorithmic order of the threading model.
355  */
364  );
365 
366  BOOST_MPL_ASSERT((std::is_same<typename write_lock_type::atomic_t, atomic_t>));
367 
368  /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
370 
371  /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
372  atomic_t & __fastcall pop_lock() noexcept(true) FORCE_INLINE {
373  return have_work.locker();
374  }
375  /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
376  atomic_t & __fastcall pop_lock() const noexcept(true) FORCE_INLINE {
377  return have_work.locker();
378  }
379  /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
380  atomic_t & __fastcall push_lock() noexcept(true) FORCE_INLINE {
381  return have_work.locker();
382  }
383  /// A flag to atomically signal if the container contains work or not, and how much work and the underlying lock, to assist in writing thread-safe code.
384  atomic_t & __fastcall push_lock() const noexcept(true) FORCE_INLINE {
385  return have_work.locker();
386  }
387 
388  __stdcall queue() noexcept(noexcept(container_type()) && noexcept(have_work_type())) FORCE_INLINE;
389  explicit queue(typename have_work_type::atomic_t &) FORCE_INLINE;
390  __stdcall queue(queue const &) noexcept(false) FORCE_INLINE;
391  __stdcall ~queue() noexcept(true) FORCE_INLINE;
392  queue &__fastcall operator=(queue const &) noexcept(false) FORCE_INLINE;
393 
394  bool __fastcall empty() const noexcept(true) FORCE_INLINE;
395  size_type __fastcall sync_size() const noexcept(false) FORCE_INLINE;
396  size_type __fastcall size() const noexcept(true) FORCE_INLINE;
397 
398  value_type __fastcall front() const noexcept(false) FORCE_INLINE;
399 
400  void __fastcall push_back(value_type const &v) noexcept(false) FORCE_INLINE;
401  void __fastcall push_back(value_type &&v) noexcept(false) FORCE_INLINE;
402 
403  value_ret_type __fastcall pop_front() noexcept(false) FORCE_INLINE;
404  void __fastcall push_front(const value_type &v) noexcept(false) FORCE_INLINE;
405  void __fastcall push_front(value_type &&v) noexcept(false) FORCE_INLINE;
406 
407  size_type __fastcall erase(const value_type &v) noexcept(false) FORCE_INLINE;
408 
409  void __fastcall clear() noexcept(false) FORCE_INLINE;
410 
411  container_type const &colln() const noexcept(true) FORCE_INLINE {
412  return static_cast<container_type const &>(*this);
413  }
414  container_type &colln() noexcept(true) FORCE_INLINE {
415  return static_cast<container_type &>(*this);
416  }
417 
418  value_ret_type __fastcall pop_front_nolk() noexcept(false) FORCE_INLINE;
419 
420  value_type __fastcall pop_front_1_nochk_nolk() noexcept(noexcept(have_work.remove())) FORCE_INLINE;
421  value_type __fastcall pop_front_1_nochk_nosig() noexcept(true) FORCE_INLINE;
422 
423  protected:
424  virtual value_ret_type __fastcall pop_front_nochk_nolk() noexcept(false) FORCE_INLINE;
425  };
426 
427  /// An adaptor to add thread-safety assistance, specifically for queues.
428  /**
429  Note that this adaptor relies on the standardised behaviour of a sequence (or an adaptor thereof) with respect to invalidating iterators when items are added to or removed from removed from the container. Basically only std::list is guaranteed to satisfy these requirements, but std::queue often does, but that is implementation-dependent.
430  This queue operates two locks, a pop & a push lock, that operate independently as long as the queue is large enough.
431  The operations push() and push_back() have a push lock and are thus serialised.
432  The operations pop() and pop_front() have a pop lock and are thus serialised.
433  When the queue is too short, these pairs of operations are also mutually serialised.
434  By default the adapted queue does not use a read-write lock.
435  The iterators are not exposed to assist with writing thread-safe code.
436  Note that if the read_lock_type and write_lock_types are the same, i.e. an exclusive lock were used, then the adaptor will exhibit EREW semantics. If a read-writer lock is used for them, then it will exhibit CREW semantics.
437 
438  \see safe_colln, queue
439  */
440  template<
441  typename QT,
442  typename M,
443  typename WL=typename M::write_lock_type,
444  class Sig=no_signalling<M>,
445  class ValRet=typename QT::value_type,
446  class MLk=typename lock::any_order::all<M::lock_traits::api_type, typename M::lock_traits::model_type, M, M>
447  >
448  class funky_queue : private QT {
449  public:
450  typedef QT container_type; ///< The queue to be adapted, usually std::list or std::queue.
451  typedef Sig have_work_type; ///< Used to enable functionality to atomically signal if the container contains work or not.
452  typedef M atomic_t; ///< The underlying lock object to use.
453  typedef WL write_lock_type; ///< The type of write-lock to use. This allows the possibility of using a read-write lock.
454  typedef typename atomic_t::read_lock_type read_lock_type; ///< The type of read lock to use, by default the write lock. This allows the possibility of using a read-write lock.
455  typedef MLk lock_all_type; ///< The multi-lock type to use to ensured that operations on combined queues are thread-safe. Note that locking them in any order reduces the likelihood of deadlock at the cost of performance.
457  template<class... T>
458  using scoped_lock=typename lock_traits::template scoped_lock<T...>;
459  typedef api_threading_traits<lock_traits::api_type, typename lock_traits::model_type> thread_traits;
460  typedef typename container_type::reference reference;
462  typedef typename container_type::size_type size_type;
464  typedef ValRet value_ret_type; ///< The type to return when removing items from the queue.
466 
467  static constexpr size_type serialise_size=2; ///< The size of the queue, below which the push & pop operations serialise.
468 
469  /**
470  To assist in allowing compile-time computation of the algorithmic order of the threading model.
471  */
480  );
481 
482  BOOST_MPL_ASSERT((std::is_same<typename write_lock_type::atomic_t, atomic_t>));
483 
484  /// A flag to atomically signal if the container contains work or not, and how much work.
486 
487  /// The underlying locks, to assist in writing thread-safe code.
488  atomic_t & __fastcall pop_lock() noexcept(true) FORCE_INLINE {
489  return pop_lock_;
490  }
491  /// The underlying locks, to assist in writing thread-safe code.
492  atomic_t & __fastcall pop_lock() const noexcept(true) FORCE_INLINE {
493  return pop_lock_;
494  }
495  /// The underlying locks, to assist in writing thread-safe code.
496  atomic_t & __fastcall push_lock() noexcept(true) FORCE_INLINE {
497  return push_lock_;
498  }
499  /// The underlying locks, to assist in writing thread-safe code.
500  atomic_t & __fastcall push_lock() const noexcept(true) FORCE_INLINE {
501  return push_lock_;
502  }
503 
504  __stdcall funky_queue() noexcept(noexcept(container_type()) && noexcept(have_work_type())) FORCE_INLINE;
505  explicit funky_queue(typename have_work_type::atomic_t &) FORCE_INLINE;
506  __stdcall funky_queue(funky_queue const &) noexcept(false) FORCE_INLINE;
507  __stdcall ~funky_queue() FORCE_INLINE;
508  funky_queue &__fastcall operator=(funky_queue const &) noexcept(false) FORCE_INLINE;
509 
510  bool __fastcall empty() const noexcept(true) FORCE_INLINE;
511  size_type __fastcall sync_size() const noexcept(false) FORCE_INLINE;
512  size_type __fastcall size() const noexcept(true) FORCE_INLINE;
513 
514  void __fastcall clear() noexcept(false) FORCE_INLINE;
515  /**
516  This function is provided to assist with writing thread-safe code.
517 
518  \return Returns true if a value was erased, otherwise false.
519  */
520  bool __fastcall erase(value_type const &) noexcept(false) FORCE_INLINE;
521 
522  /**
523  If the queue is long enough, then this function will not block the operation of push() or push_back().
524 
525  \return A copy of the value that is on the front of the queue.
526  */
527  value_type __fastcall front() const noexcept(false) FORCE_INLINE;
528  /**
529  If the queue is long enough, then this function will not block the operation of pop() or pop_back().
530 
531  \return A copy of the value that is on the back of the queue.
532  */
533  value_type __fastcall back() const noexcept(false) FORCE_INLINE;
534 
535  /**
536  If the queue is long enough, then this function will not block the operation of pop() or pop_back().
537 
538  \param v The value to be added.
539  */
540  void __fastcall push(value_type const &v) noexcept(false) FORCE_INLINE;
541  /**
542  If the queue is long enough, then this function will not block the operation of pop() or pop_back().
543 
544  \param v The value to be added.
545  */
546  void __fastcall push(value_type &&v) noexcept(false) FORCE_INLINE;
547  /**
548  If the queue is long enough, then this function will not block the operation of pop() or pop_back().
549 
550  \param v The value to be added.
551  */
552  void __fastcall push_back(value_type const &v) noexcept(false) FORCE_INLINE;
553  /**
554  If the queue is long enough, then this function will not block the operation of pop() or pop_back().
555 
556  \param v The value to be added.
557  */
558  void __fastcall push_back(value_type &&v) noexcept(false) FORCE_INLINE;
559 
560  /**
561  If the queue is long enough, then this function will not block the operation of push() or push_front().
562 
563  \return The value popped off the queue.
564  */
565  value_ret_type __fastcall pop() noexcept(false) FORCE_INLINE;
566  /**
567  If the queue is long enough, then this function will not block the operation of push() or push_front().
568 
569  \return The value popped off the queue.
570  */
571  value_ret_type __fastcall pop_front() noexcept(false) FORCE_INLINE;
572 
573  /**
574  \param e The item to be removed from the container_type.
575  */
576  void __fastcall remove(const value_type &e) noexcept(false) FORCE_INLINE;
577 
578  container_type const &colln() const noexcept(true) FORCE_INLINE {
579  return static_cast<container_type const &>(*this);
580  }
581  container_type &colln() noexcept(true) FORCE_INLINE {
582  return static_cast<container_type &>(*this);
583  }
584 
585  value_ret_type __fastcall pop_front_nolk() noexcept(false) FORCE_INLINE;
586 
587  value_type __fastcall pop_front_1_nochk_nolk() noexcept(noexcept(have_work.remove())) FORCE_INLINE;
588  value_type __fastcall pop_front_1_nochk_nosig() noexcept(true) FORCE_INLINE;
589 
590  protected:
591  value_type &__fastcall back_nolk() noexcept(true) FORCE_INLINE;
592  virtual value_ret_type __fastcall pop_front_nochk_nolk() noexcept(false) FORCE_INLINE;
593 
594  private:
595  mutable atomic_t push_lock_;
596  mutable atomic_t pop_lock_;
597 
598  value_ret_type __fastcall pop_nolk() noexcept(false) FORCE_INLINE;
599 
600  value_type const &__fastcall back_nolk() const noexcept(true) FORCE_INLINE;
601  void __fastcall push_back_nolk(const value_type &e) noexcept(false) FORCE_INLINE;
602  void __fastcall push_back_nolk(value_type &&e) noexcept(false) FORCE_INLINE;
603  void __fastcall push_nolk(const value_type &e) noexcept(false) FORCE_INLINE;
604  void __fastcall push_nolk(value_type &&e) noexcept(false) FORCE_INLINE;
605  void __fastcall pop_nochk_nolk() noexcept(false) FORCE_INLINE;
606  };
607 
608 } } }
609 
611 
612 #endif