25/// An adaptor for collections that batches up to I items being atomically removed from the collection, Colln.
26/**
27 This means that as items are removed from the collection, they are potentially added to the last item that was removed from the collection. This implies that the Colln::value_type is some kind of collection to which that work may be added.
28 */
29template<
30class Colln, ///< The collection to be adapted.
31unsignedlong I ///< The maximum number of items per batch.
54/// The items are batched when popped from the queue.
55/**
56 This is used to return a collection of items from the signalled_work_queue, in the order in which they were inserted. At least one item will be returned, and if there are sufficient items in the signalled_work_queue, then max_size items will be returned. This implies that the thread that extracts items from the queue does the work in batching them.
57
58 \return A batch of either one or max_size items.
95/// An adaptor for collections that batches up to I items being both added to or removed from the collection, Colln.
96/**
97 This means that as items are added to the collection, they are potentially added to the last item that was added to the collection. This implies that the Colln::value_type is some kind of collection to which the new work may be added.
98 */
99template<
100class Colln, ///< The collection to be adapted.
101unsignedlong I ///< The maximum number of items per batch.
117/// The items are batched when pushed onto the queue.
118/**
119 The items are batched as they are added to the queue. Therefore the thread that adds the items does the batching work, and that the queue contains a mix of batched and unbatched items, thus potentially reducing the number of items added to the queue, therefore the number of memory allocations done.
120 */
121// TODO Implement this: void __fastcall push_back(const value_type &data_item) noexcept(false);
122
123/// The items are batched when pushed onto the queue.
124/**
125 The items are batched as they are added to the queue. Therefore the thread that adds the items does the batching work, and that the queue contains a mix of batched and unbatched items, thus potentially reducing the number of items added to the queue, therefore the number of memory allocations done.
126 */
127// TODO Implement this: void __fastcall push(const value_type &data_item) noexcept(false);
128 };
129
130/// Batch-sizes of zero aren't allowed.
131template<
132class Colln ///< The collection to be adapted.
133 >
134class back_batch<Colln, 0UL> final : publicfront_batch<Colln, 0UL> {
135 };
136
137/// If the batch-size is one, collapse this to an empty wrapper of the collection_type.
138template<
139class Colln ///< The collection to be adapted.
140 >
141struct back_batch<Colln, 1UL> final : publicfront_batch<Colln, 1UL> {