libjmmcg  release_579_6_g8cffd
A C++ library containing an eclectic mix of useful, advanced components.
batch.hpp
Go to the documentation of this file.
1 /******************************************************************************
2 ** Copyright © 2004 by J.M.McGuiness, coder@hussar.me.uk
3 **
4 ** This library is free software; you can redistribute it and/or
5 ** modify it under the terms of the GNU Lesser General Public
6 ** License as published by the Free Software Foundation; either
7 ** version 2.1 of the License, or (at your option) any later version.
8 **
9 ** This library is distributed in the hope that it will be useful,
10 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
11 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 ** Lesser General Public License for more details.
13 **
14 ** You should have received a copy of the GNU Lesser General Public
15 ** License along with this library; if not, write to the Free Software
16 ** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18 
19 #include "config.h"
20 
21 #include <bits/ios_base.h>
22 
23 namespace jmmcg { namespace LIBJMMCG_VER_NAMESPACE {
24 
25  /// An adaptor for collections that batches up to I items being atomically removed from the collection, Colln.
26  /**
27  This means that as items are removed from the collection, they are potentially added to the last item that was removed from the collection. This implies that the Colln::value_type is some kind of collection to which that work may be added.
28  */
29  template<
30  class Colln, ///< The collection to be adapted.
31  unsigned long I ///< The maximum number of items per batch.
32  >
33  struct front_batch : public Colln {
34  static constexpr unsigned long max_size=I; ///< The maximum number of items to batch.
35  typedef Colln base_t;
36  typedef typename base_t::thread_traits thread_traits;
37  typedef Colln container_type;
40 
41  static_assert(max_size>1UL, "Wrong batch size.");
42 
43  constexpr front_batch() noexcept(noexcept(base_t()))
44  : base_t() {}
45  explicit front_batch(typename base_t::have_work_type::atomic_t &a) noexcept(noexcept(base_t(std::declval<base_t>())))
46  : base_t(a) {}
47  front_batch(front_batch const &fb) noexcept(noexcept(base_t(std::declval<front_batch>())))
48  : base_t(fb) {}
49  front_batch &__fastcall FORCE_INLINE operator=(front_batch const &fb) noexcept(noexcept(base_t::operator=(std::declval<front_batch>()))) {
50  base_t::operator=(fb);
51  return *this;
52  }
53 
54  /// The items are batched when popped from the queue.
55  /**
56  This is used to return a collection of items from the signalled_work_queue, in the order in which they were inserted. At least one item will be returned, and if there are sufficient items in the signalled_work_queue, then max_size items will be returned. This implies that the thread that extracts items from the queue does the work in batching them.
57 
58  \return A batch of either one or max_size items.
59  */
60  value_ret_type __fastcall pop_front_nochk_nolk() noexcept(true) FORCE_INLINE;
61  };
62 
63  /// Batch-sizes of zero aren't allowed.
64  template<
65  class Colln ///< The collection to be adapted.
66  >
67  class front_batch<Colln, 0UL> : public Colln {
68  };
69 
70  /// Batch-sizes of zero aren't allowed.
71  template<
72  class Colln ///< The collection to be adapted.
73  >
74  struct front_batch<Colln, 1UL> : public Colln {
75  static constexpr unsigned long max_size=1UL; ///< The maximum number of items to batch.
76  typedef Colln base_t;
77  typedef typename base_t::thread_traits thread_traits;
78  typedef Colln container_type;
80 
81  constexpr front_batch() noexcept(noexcept(base_t()))
82  : base_t() {}
83  explicit front_batch(typename base_t::have_work_type::atomic_t &a) noexcept(noexcept(base_t(a)))
84  : base_t(a) {}
85  front_batch(front_batch const &fb) noexcept(noexcept(base_t(std::declval<front_batch>())))
86  : base_t(fb) {}
87  front_batch &__fastcall FORCE_INLINE operator=(front_batch const &fb) noexcept(noexcept(base_t::operator=(std::declval<front_batch>()))) {
88  base_t::operator=(fb);
89  return *this;
90  }
91 
92  value_ret_type __fastcall pop_front_nochk_nolk() noexcept(true);
93  };
94 
95  /// An adaptor for collections that batches up to I items being both added to or removed from the collection, Colln.
96  /**
97  This means that as items are added to the collection, they are potentially added to the last item that was added to the collection. This implies that the Colln::value_type is some kind of collection to which the new work may be added.
98  */
99  template<
100  class Colln, ///< The collection to be adapted.
101  unsigned long I ///< The maximum number of items per batch.
102  >
103  struct back_batch final : public front_batch<Colln, I> {
104  typedef front_batch<Colln, I> base_t;
105  using base_t::max_size;
106  typedef typename base_t::thread_traits thread_traits;
110 
111  static_assert(max_size>1UL, "Wrong batch size.");
112 
113  constexpr back_batch() noexcept(noexcept(base_t()))
114  : base_t() {}
115  explicit back_batch(typename base_t::have_work_type::atomic_t &a) noexcept(noexcept(base_t(a)))
116  : base_t(a) {}
117  /// The items are batched when pushed onto the queue.
118  /**
119  The items are batched as they are added to the queue. Therefore the thread that adds the items does the batching work, and that the queue contains a mix of batched and unbatched items, thus potentially reducing the number of items added to the queue, therefore the number of memory allocations done.
120  */
121 // TODO Implement this: void __fastcall push_back(const value_type &data_item) noexcept(false);
122 
123  /// The items are batched when pushed onto the queue.
124  /**
125  The items are batched as they are added to the queue. Therefore the thread that adds the items does the batching work, and that the queue contains a mix of batched and unbatched items, thus potentially reducing the number of items added to the queue, therefore the number of memory allocations done.
126  */
127 // TODO Implement this: void __fastcall push(const value_type &data_item) noexcept(false);
128  };
129 
130  /// Batch-sizes of zero aren't allowed.
131  template<
132  class Colln ///< The collection to be adapted.
133  >
134  class back_batch<Colln, 0UL> final : public front_batch<Colln, 0UL> {
135  };
136 
137  /// If the batch-size is one, collapse this to an empty wrapper of the collection_type.
138  template<
139  class Colln ///< The collection to be adapted.
140  >
141  struct back_batch<Colln, 1UL> final : public front_batch<Colln, 1UL> {
142  typedef front_batch<Colln, 1UL> base_t;
143  using base_t::max_size;
144  typedef typename base_t::thread_traits thread_traits;
148 
149  constexpr back_batch() noexcept(noexcept(base_t()))
150  : base_t() {}
151  explicit back_batch(typename base_t::have_work_type::atomic_t &a) noexcept(noexcept(base_t(a)))
152  : base_t(a) {}
153  back_batch(back_batch const &bb) noexcept(noexcept(base_t(std::declval<back_batch>())))
154  : base_t(bb) {}
155  back_batch &__fastcall FORCE_INLINE operator=(back_batch const &bb) noexcept(noexcept(base_t::operator=(std::declval<back_batch>()))) {
156  base_t::operator=(bb);
157  return *this;
158  }
159 
160  void __fastcall push_back(const value_type &data_item) noexcept(false) FORCE_INLINE;
161  void __fastcall push_back(value_type &&data_item) noexcept(false) FORCE_INLINE;
162  void __fastcall push(const value_type &data_item) noexcept(false) FORCE_INLINE;
163  void __fastcall push(value_type &&data_item) noexcept(false) FORCE_INLINE;
164  };
165 
166 } }
167 
168 #include "batch_impl.hpp"