libjmmcg  release_579_6_g8cffd
A C++ library containing an eclectic mix of useful, advanced components.
thread_pool_base_impl.hpp
Go to the documentation of this file.
1 /******************************************************************************
2 ** Copyright © 2014 by J.M.McGuiness, coder@hussar.me.uk
3 **
4 ** This library is free software; you can redistribute it and/or
5 ** modify it under the terms of the GNU Lesser General Public
6 ** License as published by the Free Software Foundation; either
7 ** version 2.1 of the License, or (at your option) any later version.
8 **
9 ** This library is distributed in the hope that it will be useful,
10 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
11 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 ** Lesser General Public License for more details.
13 **
14 ** You should have received a copy of the GNU Lesser General Public
15 ** License along with this library; if not, write to the Free Software
16 ** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18 
19 namespace jmmcg { namespace LIBJMMCG_VER_NAMESPACE { namespace ppd { namespace private_ {
20 
21 template<
22  class DM,
24  typename PTT,
25  class Pt
26 >
27 template<class InpWk>
28 struct thread_pool_base<DM, Ps, PTT, Pt>::execution_context_stack final : public private_::execution_context_stack_type<work_distribution_mode, pool_traits_type::result_traits_, thread_pool_base, InpWk> {
29  typedef private_::execution_context_stack_type<work_distribution_mode, pool_traits_type::result_traits_, thread_pool_base, InpWk> base_t;
30 // TODO fails: BOOST_MPL_ASSERT((std::is_same<typename base_t::thread_wk_t::closure_t::argument_type, InpWk>));
31  execution_context_stack(thread_pool_base &pool, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &p, typename base_t::thread_wk_t::closure_t::argument_type &&wk) FORCE_INLINE
32  : base_t(pool, p, std::move(wk)) {
33  }
34  /**
35  This needs to be declared, to be standards compliant, but needn't be defined, as cctor elision does not require the definition.
36  */
37  execution_context_stack(execution_context_stack const &) FORCE_INLINE;
38  ~execution_context_stack() noexcept(false) FORCE_INLINE {}
39 };
40 
41 template<
42  class DM,
44  typename PTT,
45  class Pt
46 >
47 template<
48  typename InpWk,
49  class PtrFnType
50 >
51 struct thread_pool_base<DM, Ps, PTT, Pt>::create_direct final : public private_::create_direct<pool_traits_type, InpWk, PtrFnType, &std::remove_reference<InpWk>::type::process> {
54  using closure_t=typename base_t::closure_t;
56  /// This is a useful typedef to get at the execution_context.
57  /**
58  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
59 
60  \see execution_context_stack_type
61  */
62  typedef typename thread_pool_base<DM, Ps, PTT, Pt>::template execution_context_stack<InpWk> execution_context_stack;
63 };
64 
65 template<
66  class DM,
68  typename PTT,
69  class Pt
70 >
71 template<
72  class Colln,
73  class Fn
74 >
75 class thread_pool_base<DM, Ps, PTT, Pt>::for_each_t {
76 private:
77  typedef Fn operation_type;
78  typedef alg_wk_wrap::for_each_work_type<operation_type> work_type;
79  typedef create_direct<work_type> creator_t;
80  typedef subdivide_n_gen_wk1<size_mode, thread_pool_base, typename creator_t::closure_t, one_container<Colln>, alg_wk_wrap::for_each_reduce> gen_wk_t;
81 
82 public:
83  /// This is a useful typedef to get at the execution_context.
84  /**
85  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
86 
87  \see create_direct
88  \see execution_context_algo_buff_stack_type
89  \see joinable_t
90  \see thread_wk_t
91  \see closure_base
92  */
93  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type> execution_context;
94 
95  /**
96  To assist in allowing compile-time computation of the algorithmic order of the threading model.
97  */
98  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
99  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
100  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
101  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
102  && Colln::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
103  ? ppd::generic_traits::memory_access_modes::crew_memory_access
104  : ppd::generic_traits::memory_access_modes::erew_memory_access
105  );
106 
107  __stdcall for_each_t(thread_pool_base &p, Colln const &c, operation_type const &f) noexcept(true) FORCE_INLINE
108  : pool(p), colln(c), fn(f) {
109  }
110 
111  /// Joinably transfer the work to the pool.
112  /**
113  \see thread_wk_t, execution_context, cliques, cliques_t
114  */
115  execution_context
116  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
117  return execution_context(
118  pool,
119  cfg_parms,
120  typename execution_context::thread_wk_t::closure_t::argument_type(fn),
121  init_num_jobs_par_alg_other,
122  typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type(colln),
123  cliques,
124  default_num_subranges
125  );
126  }
127 
128 private:
129  thread_pool_base &pool;
130  Colln const &colln;
131  operation_type const &fn;
132 };
133 
134 template<
135  class DM,
137  typename PTT,
138  class Pt
139 >
140 template<
141  class Colln,
142  typename Pred
143 >
144 class thread_pool_base<DM, Ps, PTT, Pt>::count_if_t {
145 public:
146  typedef Pred operation_type;
147 
148 private:
149  typedef typename os_traits::lock_traits::template atomic_counter_type<typename Colln::size_type> num_elems_ct_t;
150  typedef alg_wk_wrap::countor_work_type<operation_type, num_elems_ct_t> work_type;
151  typedef create_direct<work_type> creator_t;
152  typedef subdivide_n_gen_wk1<size_mode, thread_pool_base, typename creator_t::closure_t, one_container<Colln>, alg_wk_wrap::count_if_reduce> gen_wk_t;
153 
154 public:
155  /// A bit of syntactic sugar: allow the user to not have to double-dereference the execution_context to get at the result_type. This is a useful typedef to get at the execution_context.
156  /**
157  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
158 
159  \see create_direct
160  \see execution_context_algo_buff_stack_type
161  \see joinable_t
162  \see thread_wk_t
163  \see closure_base
164  */
165  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type, deref::extra_deref, core_work_result::to_zero> execution_context;
166 
167  /**
168  To assist in allowing compile-time computation of the algorithmic order of the threading model.
169  */
170  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
171  num_elems_ct_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
172  && work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
173  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
174  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
175  && Colln::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
176  ? ppd::generic_traits::memory_access_modes::crew_memory_access
177  : ppd::generic_traits::memory_access_modes::erew_memory_access
178  );
179 
180  /**
181  If this assertion fails, then the counter type in num_elems_ct_t needs changing.
182 
183  \see num_elems_ct_t
184  */
185  BOOST_MPL_ASSERT((std::is_same<typename execution_context::result_type::value_type, typename Colln::size_type>));
186 
187  __stdcall count_if_t(thread_pool_base &p, Colln const &c, operation_type const &pr) noexcept(true) FORCE_INLINE
188  : pool(p), colln(c), pred(pr) {
189  }
190 
191  /// Joinably transfer the predicate to the pool.
192  /**
193  \see create_direct, execution_context, cliques, cliques_t
194  */
195  execution_context
196  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
197  return execution_context(
198  pool,
199  typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params("count_if", cfg_parms),
200  typename execution_context::thread_wk_t::closure_t::argument_type(pred),
201  init_num_jobs_par_alg_other,
202  typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type(colln),
203  cliques,
204  default_num_subranges
205  );
206  }
207 
208 private:
209  thread_pool_base &pool;
210  Colln const &colln;
211  operation_type const pred;
212 };
213 
214 template<
215  class DM,
217  typename PTT,
218  class Pt
219 >
220 template<
221  class Colln
222 >
223 struct thread_pool_base<DM, Ps, PTT, Pt>::count_t
224 : public thread_pool_base<DM, Ps, PTT, Pt>::template count_if_t<
225  Colln,
226  decltype(
227  std::bind(std::equal_to<typename Colln::value_type>(), typename Colln::value_type(), std::placeholders::_1)
228  )
229 > {
230  typedef count_if_t<
231  Colln,
232  decltype(
233  std::bind(std::equal_to<typename Colln::value_type>(), typename Colln::value_type(), std::placeholders::_1)
234  )
235  > base_t;
236  typedef typename base_t::execution_context execution_context;
237 
238  __stdcall count_t(thread_pool_base &p, Colln const &c, typename Colln::value_type const &v) noexcept(true) FORCE_INLINE
239  : base_t(p, c, typename base_t::operation_type(std::equal_to<typename Colln::value_type>(), v, std::placeholders::_1)) {
240  }
241 };
242 
243 template<
244  class DM,
246  typename PTT,
247  class Pt
248 >
249 template<
250  class Colln,
251  typename Pred
252 >
253 class thread_pool_base<DM, Ps, PTT, Pt>::find_if_t {
254 public:
255  typedef Pred operation_type;
256 
257 private:
258  typedef stl_functor_result_type<bool> found_t;
259  typedef alg_wk_wrap::countor_work_type<operation_type, found_t> work_type;
260  typedef create_direct<work_type> creator_t;
261  typedef subdivide_n_gen_wk1<size_mode, thread_pool_base, typename creator_t::closure_t, one_container<Colln>, alg_wk_wrap::find_if_reduce> gen_wk_t;
262 
263 public:
264  /// A bit of syntactic sugar: allow the user to not have to double-dereference the execution_context to get at the result_type. This is a useful typedef to get at the execution_context.
265  /**
266  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
267 
268  \see create_direct
269  \see execution_context_algo_buff_stack_type
270  \see joinable_t
271  \see thread_wk_t
272  \see closure_base
273  */
274  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type, deref::extra, core_work_result::to_false> execution_context;
275 
276  /**
277  To assist in allowing compile-time computation of the algorithmic order of the threading model.
278  */
279  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
280  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
281  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
282  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
283  && Colln::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
284  ? ppd::generic_traits::memory_access_modes::crew_memory_access
285  : ppd::generic_traits::memory_access_modes::erew_memory_access
286  );
287 
288  __stdcall find_if_t(thread_pool_base &p, Colln const &c, operation_type const &pr) noexcept(true) FORCE_INLINE
289  : pool(p), colln(c), pred(pr) {
290  }
291 
292  /// Joinably transfer the predicate to the pool.
293  /**
294  \todo It would be nice if this algorithm checked if the item had been found and didn't subsequently blindly continue to add more work search other sub-ranges, i.e. terminated early.
295 
296  \see create_direct, execution_context, cliques, cliques_t
297  */
298  execution_context
299  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
300  return execution_context(
301  pool,
302  typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params("find_if", cfg_parms),
303  typename execution_context::thread_wk_t::closure_t::argument_type(pred),
304  init_num_jobs_par_alg_other,
305  typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type(colln),
306  cliques,
307  default_num_subranges
308  );
309  }
310 
311 private:
312  thread_pool_base &pool;
313  Colln const &colln;
314  operation_type const pred;
315 };
316 
317 template<
318  class DM,
320  typename PTT,
321  class Pt
322 >
323 template<
324  class Colln
325 >
326 struct thread_pool_base<DM, Ps, PTT, Pt>::find_t
327 : public thread_pool_base<DM, Ps, PTT, Pt>::template find_if_t<
328  Colln,
329  decltype(
330  std::bind(std::equal_to<typename Colln::value_type>(), typename Colln::value_type(), std::placeholders::_1)
331  )
332 > {
333  typedef find_if_t<
334  Colln,
335  decltype(
336  std::bind(std::equal_to<typename Colln::value_type>(), typename Colln::value_type(), std::placeholders::_1)
337  )
338  > base_t;
339  typedef typename base_t::execution_context execution_context;
340 
341  __stdcall find_t(thread_pool_base &p, Colln const &c, typename Colln::value_type const &v) noexcept(true) FORCE_INLINE
342  : base_t(p, c, typename base_t::operation_type(std::equal_to<typename Colln::value_type>(), v, std::placeholders::_1)) {
343  }
344 };
345 
346 template<
347  class DM,
349  typename PTT,
350  class Pt
351 >
352 template<
353  typename CollnIn,
354  typename CollnOut,
355  typename UniOp
356 >
357 class thread_pool_base<DM, Ps, PTT, Pt>::transform_t {
358 public:
359  typedef UniOp operation_type;
360 
361 private:
362  typedef alg_wk_wrap::transform_work_type<operation_type> work_type;
363  typedef create_direct<work_type> creator_t;
364  typedef subdivide_n_gen_wk2<size_mode, thread_pool_base, typename creator_t::closure_t, two_containers<CollnIn, CollnOut>, alg_wk_wrap::transform_reduce> gen_wk_t;
365 
366 public:
367  /// This is a useful typedef to get at the execution_context.
368  /**
369  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
370 
371  \see create_direct
372  \see execution_context_algo_buff_stack_type
373  \see joinable_t
374  \see thread_wk_t
375  \see closure_base
376  */
377  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type> execution_context;
378 
379  /**
380  To assist in allowing compile-time computation of the algorithmic order of the threading model.
381  */
382  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
383  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
384  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
385  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
386  && CollnIn::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
387  && CollnOut::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
388  ? ppd::generic_traits::memory_access_modes::crew_memory_access
389  : ppd::generic_traits::memory_access_modes::erew_memory_access
390  );
391 
392  __stdcall transform_t(thread_pool_base &p, CollnIn const &i, CollnOut &o, operation_type const &op) noexcept(true) FORCE_INLINE
393  : pool(p), in(i), out(o), uniop(op) {
394  }
395 
396  /// Joinably transfer the predicate to the pool.
397  /**
398  \see create_direct, execution_context, cliques, cliques_t
399  */
400  execution_context
401  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
402  const typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type containers(in, out);
403  return execution_context(
404  pool,
405  cfg_parms,
406  typename execution_context::thread_wk_t::closure_t::argument_type(uniop),
407  init_num_jobs_par_alg_other,
408  containers,
409  cliques,
410  default_num_subranges
411  );
412  }
413 
414 private:
415  thread_pool_base &pool;
416  CollnIn const &in;
417  CollnOut &out;
418  operation_type const &uniop;
419 };
420 
421 template<
422  class DM,
424  typename PTT,
425  class Pt
426 >
427 template<
428  typename CollnIn,
429  typename CollnOut,
430  class IterIn,
431  typename UniOp
432 >
433 class thread_pool_base<DM, Ps, PTT, Pt>::transform_iter_t {
434 public:
435  typedef UniOp operation_type;
436 
437 protected:
438  typedef alg_wk_wrap::for_each_work_type<operation_type> work_type;
439  typedef create_direct<work_type> creator_t;
440  typedef subdivide_n_gen_wk2<size_mode, thread_pool_base, typename creator_t::closure_t, two_ranges<CollnIn, CollnOut, IterIn, typename CollnOut::container_type::iterator>, alg_wk_wrap::transform_reduce> gen_wk_t;
441 
442 public:
443  /// This is a useful typedef to get at the execution_context.
444  /**
445  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
446 
447  \see create_direct
448  \see execution_context_algo_buff_stack_type
449  \see joinable_t
450  \see thread_wk_t
451  \see closure_base
452  */
453  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type> execution_context;
454 
455  /**
456  To assist in allowing compile-time computation of the algorithmic order of the threading model.
457  */
458  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
459  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
460  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
461  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
462  ? ppd::generic_traits::memory_access_modes::crew_memory_access
463  : ppd::generic_traits::memory_access_modes::erew_memory_access
464  );
465 
466  __stdcall transform_iter_t(thread_pool_base &p, IterIn b1, IterIn e1, typename CollnOut::container_type::iterator b2, operation_type const &op) noexcept(true) FORCE_INLINE
467  : pool(p), beg1(b1), end1(e1), beg2(b2), uniop(op) {
468  }
469 
470  /// Joinably transfer the predicate to the pool.
471  /**
472  \see create_direct, execution_context, cliques, cliques_t
473  */
474  execution_context
475  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
476  return execution_context(
477  pool,
478  cfg_parms,
479  typename execution_context::thread_wk_t::closure_t::argument_type(uniop),
480  init_num_jobs_par_alg_other,
481  typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type(beg1, end1, beg2),
482  cliques,
483  default_num_subranges
484  );
485  }
486 
487 private:
488  thread_pool_base &pool;
489  IterIn beg1;
490  IterIn end1;
491  typename CollnOut::container_type::iterator beg2;
492  operation_type const &uniop;
493 };
494 
495 template<
496  class DM,
498  typename PTT,
499  class Pt
500 >
501 template<
502  typename CollnIn,
503  typename CollnOut,
504  class IterIn
505 >
506 struct thread_pool_base<DM, Ps, PTT, Pt>::copy_iter_t : public thread_pool_base<DM, Ps, PTT, Pt>::template transform_iter_t<CollnIn, CollnOut, IterIn, noop<typename CollnOut::value_type> > {
507  typedef transform_iter_t<CollnIn, CollnOut, IterIn, noop<typename CollnOut::value_type> > base_t;
508 
509  __stdcall copy_iter_t(thread_pool_base &p, IterIn b1, IterIn e1, typename CollnOut::container_type::iterator b2) noexcept(true) FORCE_INLINE
510  : base_t(p, b1, e1, b2, typename base_t::operation_type()) {
511  }
512 };
513 
514 template<
515  class DM,
517  typename PTT,
518  class Pt
519 >
520 template<
521  typename CollnIn1,
522  typename CollnIn2,
523  typename CollnOut,
524  typename BinOp
525 >
526 class thread_pool_base<DM, Ps, PTT, Pt>::transform2_t {
527 public:
528  typedef BinOp operation_type;
529 
530 private:
531  typedef alg_wk_wrap::transform_work_type<operation_type> work_type;
532  typedef create_direct<work_type> creator_t;
533  typedef subdivide_n_gen_wk3<size_mode, thread_pool_base, typename creator_t::closure_t, three_containers<CollnIn1, CollnIn2, CollnOut>, alg_wk_wrap::transform2_reduce> gen_wk_t;
534 
535 public:
536  /// This is a useful typedef to get at the execution_context.
537  /**
538  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
539 
540  \see create_direct
541  \see execution_context_algo_buff_stack_type
542  \see joinable_t
543  \see thread_wk_t
544  \see closure_base
545  */
546  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type> execution_context;
547 
548  /**
549  To assist in allowing compile-time computation of the algorithmic order of the threading model.
550  */
551  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
552  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
553  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
554  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
555  && CollnIn1::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
556  && CollnIn2::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
557  && CollnOut::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
558  ? ppd::generic_traits::memory_access_modes::crew_memory_access
559  : ppd::generic_traits::memory_access_modes::erew_memory_access
560  );
561 
562  __stdcall transform2_t(thread_pool_base &p, CollnIn1 const &i1, CollnIn2 const &i2, CollnOut &o, operation_type const &op) noexcept(true) FORCE_INLINE
563  : pool(p), in1(i1), in2(i2), out(o), binop(op) {
564  }
565 
566  /// Joinably transfer the predicate to the pool.
567  /**
568  \see create_direct, execution_context, cliques, cliques_t
569  */
570  execution_context
571  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
572  const typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type containers(in1, in2, out);
573  return execution_context(
574  pool,
575  cfg_parms,
576  typename execution_context::thread_wk_t::closure_t::argument_type(binop),
577  init_num_jobs_par_alg_other,
578  containers,
579  cliques,
580  default_num_subranges
581  );
582  }
583 
584 private:
585  thread_pool_base &pool;
586  CollnIn1 const &in1;
587  CollnIn2 const &in2;
588  CollnOut &out;
589  operation_type const binop;
590 };
591 
592 template<
593  class DM,
595  typename PTT,
596  class Pt
597 >
598 template<class Res>
599 struct thread_pool_base<DM, Ps, PTT, Pt>::map_red_initialiser : std::unary_function<void, Res> {
600  typedef std::unary_function<void, Res> base_t;
601  typedef typename base_t::argument_type argument_type;
602  typedef typename base_t::result_type result_type;
603 
604  result_type const val;
605 
606  explicit map_red_initialiser(result_type const &va) FORCE_INLINE
607  : val(va) {}
608 
609  result_type const &__fastcall operator()() const noexcept(true) FORCE_INLINE {
610  return val;
611  }
612 };
613 
614 template<
615  class DM,
617  typename PTT,
618  class Pt
619 >
620 template<
621  class Colln,
622  typename BinOp,
623  class V,
624  template<class, class> class Reduce,
625  class Init
626 >
627 class thread_pool_base<DM, Ps, PTT, Pt>::map_reduce_t {
628 public:
629  typedef BinOp operation_type;
630 
631 protected:
632  typedef Init initialiser;
633 
634 private:
635  typedef typename os_traits::lock_traits::template atomic_counter_type<V> accumulated_res_t;
636  typedef alg_wk_wrap::accumulator_work_type<operation_type, accumulated_res_t> work_type;
637  typedef create_direct<work_type> creator_t;
638  typedef subdivide_n_gen_wk1<size_mode, thread_pool_base, typename creator_t::closure_t, one_container<Colln>, Reduce> gen_wk_t;
639 
640 public:
641  /// A bit of syntactic sugar: allow the user to not have to double-dereference the execution_context to get at the result_type. This is a useful typedef to get at the execution_context.
642  /**
643  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
644 
645  \see create_direct
646  \see execution_context_algo_buff_stack_type
647  \see joinable
648  \see thread_wk_t
649  \see closure_base
650  */
651  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type, deref::extra_deref, core_work_result::to_op> execution_context;
652 
653  /**
654  To assist in allowing compile-time computation of the algorithmic order of the threading model.
655  */
656  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
657  accumulated_res_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
658  && work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
659  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
660  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
661  && Colln::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
662  ? ppd::generic_traits::memory_access_modes::crew_memory_access
663  : ppd::generic_traits::memory_access_modes::erew_memory_access
664  );
665 
666  map_reduce_t(thread_pool_base &p, Colln const &c, initialiser const &i, operation_type const &op, typename cfg_type::node_property_t::value_type const *n_d) noexcept(true) FORCE_INLINE
667  : pool(p), colln(c), init_val(i), binop(op), node_details(n_d) {
668  }
669 
670  /// Joinably transfer the predicate to the pool.
671  /**
672  \see create_direct, execution_context, cliques, cliques_t
673  */
674  execution_context
675  process(cliques::element_type const cliques, typename creator_t::closure_t::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
676  return execution_context(
677  pool,
678  typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params(node_details, cfg_parms),
679  typename execution_context::thread_wk_t::closure_t::argument_type(binop, typename execution_context::thread_wk_t::closure_t::result_type(init_val.operator()())),
680  init_num_jobs_par_alg_other,
681  typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type(colln),
682  cliques,
683  default_num_subranges
684  );
685  }
686 
687 private:
688  thread_pool_base &pool;
689  Colln const &colln;
690  initialiser const init_val;
691  operation_type const binop;
692  typename cfg_type::node_property_t::value_type const *node_details;
693 };
694 
695 template<
696  class DM,
698  typename PTT,
699  class Pt
700 >
701 template<class Colln>
702 struct thread_pool_base<DM, Ps, PTT, Pt>::max_element_initialiser : std::unary_function<void, typename Colln::value_type> {
703  typedef std::unary_function<void, typename Colln::value_type> base_t;
704  typedef typename base_t::result_type result_type;
705 
706  Colln const &colln;
707 
708  explicit max_element_initialiser(Colln const &c) noexcept(true) FORCE_INLINE
709  : colln(c) {}
710 
711  result_type __fastcall operator()() const noexcept(true) FORCE_INLINE {
712  return !colln.colln().empty() ? *colln.colln().begin() : std::numeric_limits<result_type>::min();
713  }
714 };
715 
716 template<
717  class DM,
719  typename PTT,
720  class Pt
721 >
722 template<class Colln>
723 struct thread_pool_base<DM, Ps, PTT, Pt>::min_element_initialiser : std::unary_function<void, typename Colln::value_type> {
724  typedef std::unary_function<void, typename Colln::value_type> base_t;
725  typedef typename base_t::result_type result_type;
726 
727  Colln const &colln;
728 
729  explicit min_element_initialiser(Colln const &c) noexcept(true) FORCE_INLINE
730  : colln(c) {}
731 
732  result_type __fastcall operator()() const noexcept(true) FORCE_INLINE {
733  return !colln.colln().empty() ? *colln.colln().begin() : std::numeric_limits<result_type>::max();
734  }
735 };
736 
737 template<
738  class DM,
740  typename PTT,
741  class Pt
742 >
743 template<
744  class Colln,
745  class BinOp
746 >
747 struct thread_pool_base<DM, Ps, PTT, Pt>::accumulate_op_processor : public thread_pool_base<DM, Ps, PTT, Pt>::template map_reduce_t<Colln, BinOp, typename BinOp::result_type, alg_wk_wrap::accumulate_reduce, map_red_initialiser<typename BinOp::result_type>> {
748  typedef map_reduce_t<Colln, BinOp, typename BinOp::result_type, alg_wk_wrap::accumulate_reduce, map_red_initialiser<typename BinOp::result_type>> base_t;
749 
750  __stdcall accumulate_op_processor(thread_pool_base &pool, Colln const &colln, typename BinOp::result_type const &v, typename base_t::operation_type const &binop, typename cfg_type::node_property_t::value_type const *n_d=node_details_acc_op) noexcept(true) FORCE_INLINE
751  : base_t(pool, colln, typename base_t::initialiser(v), binop, n_d) {
752  }
753 };
754 
755 template<
756  class DM,
758  typename PTT,
759  class Pt
760 >
761 template<
762  class Colln,
763  class V
764 >
765 struct thread_pool_base<DM, Ps, PTT, Pt>::accumulate_processor : public thread_pool_base<DM, Ps, PTT, Pt>::template accumulate_op_processor<Colln, std::plus<V>> {
766  typedef accumulate_op_processor<Colln, std::plus<V>> base_t;
767 
768  __stdcall accumulate_processor(thread_pool_base &pool, Colln const &colln, V const &v) noexcept(true) FORCE_INLINE
769  : base_t(pool, colln, v, typename base_t::base_t::operation_type(), node_details_acc) {
770  }
771 };
772 
773 template<
774  class DM,
776  typename PTT,
777  class Pt
778 >
779 template<
780  class Colln,
781  class Comp
782 >
783 struct thread_pool_base<DM, Ps, PTT, Pt>::max_element_t : public thread_pool_base<DM, Ps, PTT, Pt>::template map_reduce_t<Colln, Comp, typename Colln::value_type, alg_wk_wrap::max_element_reduce, max_element_initialiser<Colln>> {
784  typedef map_reduce_t<Colln, Comp, typename Colln::value_type, alg_wk_wrap::max_element_reduce, max_element_initialiser<Colln>> base_t;
785  typedef typename base_t::execution_context execution_context;
786 
787  __stdcall max_element_t(thread_pool_base &pool, Colln const &colln, typename base_t::operation_type const &comp) noexcept(true) FORCE_INLINE
788  : base_t(pool, colln, typename base_t::initialiser(colln), comp, max_element_str) {
789  }
790 };
791 
792 template<
793  class DM,
795  typename PTT,
796  class Pt
797 >
798 template<
799  class Colln,
800  class Comp
801 >
802 struct thread_pool_base<DM, Ps, PTT, Pt>::min_element_t : public thread_pool_base<DM, Ps, PTT, Pt>::template map_reduce_t<Colln, Comp, typename Colln::value_type, alg_wk_wrap::min_element_reduce, min_element_initialiser<Colln>> {
803  typedef map_reduce_t<Colln, Comp, typename Colln::value_type, alg_wk_wrap::min_element_reduce, min_element_initialiser<Colln>> base_t;
804  typedef typename base_t::execution_context execution_context;
805 
806  __stdcall min_element_t(thread_pool_base &pool, Colln const &colln, typename base_t::operation_type const &comp) noexcept(true) FORCE_INLINE
807  : base_t(pool, colln, typename base_t::initialiser(colln), comp, min_element_str) {
808  }
809 };
810 
811 template<
812  class DM,
814  typename PTT,
815  class Pt
816 >
817 template<
818  class Colln
819 >
820 class thread_pool_base<DM, Ps, PTT, Pt>::fill_n_t {
821 private:
822  typedef alg_wk_wrap::pass_value<typename Colln::value_type const> work_type;
823  typedef create_direct<work_type> creator_t;
824  typedef subdivide_n_gen_wk1<size_mode, thread_pool_base, typename creator_t::closure_t, one_output_container_rw_lk<Colln>, alg_wk_wrap::fill_n_reduce> gen_wk_t;
825 
826 public:
827  /// This is a useful typedef to get at the execution_context.
828  /**
829  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
830 
831  \see create_direct
832  \see execution_context_algo_buff_stack_type
833  \see joinable_t
834  \see thread_wk_t
835  \see closure_base
836  */
837  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type> execution_context;
838 
839  /**
840  To assist in allowing compile-time computation of the algorithmic order of the threading model.
841  */
842  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
843  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
844  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
845  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
846  && Colln::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
847  ? ppd::generic_traits::memory_access_modes::crew_memory_access
848  : ppd::generic_traits::memory_access_modes::erew_memory_access
849  );
850 
851  __stdcall fill_n_t(thread_pool_base &p, Colln &c, typename Colln::size_type sz, typename work_type::element_type &v) noexcept(true) FORCE_INLINE
852  : pool(p), colln(c), size(sz), val(v) {
853  }
854 
855  /// Joinably transfer the predicate to the pool.
856  /**
857  \see create_direct, execution_context, cliques, cliques_t
858  */
859  execution_context
860  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
861  return execution_context(
862  pool,
863  typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params("fill_n", cfg_parms),
864  typename execution_context::thread_wk_t::closure_t::argument_type(val),
865  init_num_jobs_par_alg_other,
866  typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type(colln),
867  cliques,
868  default_num_subranges,
869  size
870  );
871  }
872 
873 private:
874  thread_pool_base &pool;
875  Colln &colln;
876  const typename Colln::size_type size;
877  typename work_type::element_type &val;
878 };
879 
880 template<
881  class DM,
883  typename PTT,
884  class Pt
885 >
886 template<
887  class Colln
888 >
889 class thread_pool_base<DM, Ps, PTT, Pt>::fill_t {
890 private:
891  typedef alg_wk_wrap::pass_value<typename Colln::value_type const> work_type;
892  typedef create_direct<work_type> creator_t;
893  typedef subdivide_n_gen_wk1<size_mode, thread_pool_base, typename creator_t::closure_t, one_output_container_simple_lk<Colln>, alg_wk_wrap::fill_reduce> gen_wk_t;
894 
895 public:
896  /// This is a useful typedef to get at the execution_context.
897  /**
898  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
899 
900  \see create_direct
901  \see execution_context_algo_buff_stack_type
902  \see joinable_t
903  \see thread_wk_t
904  \see closure_base
905  */
906  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type> execution_context;
907 
908  /**
909  To assist in allowing compile-time computation of the algorithmic order of the threading model.
910  */
911  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
912  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
913  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
914  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
915  && Colln::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
916  ? ppd::generic_traits::memory_access_modes::crew_memory_access
917  : ppd::generic_traits::memory_access_modes::erew_memory_access
918  );
919 
920  __stdcall fill_t(thread_pool_base &p, Colln &c, typename work_type::element_type &v) noexcept(true) FORCE_INLINE
921  : pool(p), colln(c), val(v) {
922  }
923 
924  /// Joinably transfer the predicate to the pool.
925  /**
926  \see create_direct, execution_context, cliques, cliques_t
927  */
928  execution_context
929  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
930  return execution_context(
931  pool,
932  typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params("fill", cfg_parms),
933  typename execution_context::thread_wk_t::closure_t::argument_type(val),
934  init_num_jobs_par_alg_other,
935  typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type(colln),
936  cliques,
937  default_num_subranges
938  );
939  }
940 
941 private:
942  thread_pool_base &pool;
943  Colln &colln;
944  typename work_type::element_type &val;
945 };
946 
947 template<
948  class DM,
950  typename PTT,
951  class Pt
952 >
953 template<
954  class Colln
955 >
956 class thread_pool_base<DM, Ps, PTT, Pt>::reverse_t {
957 private:
958  typedef Colln argument_type;
959  typedef alg_wk_wrap::reverse_work_type<argument_type> work_type;
960  typedef create_direct<work_type> creator_t;
961  typedef subdivide_n_gen_wk1<size_mode, thread_pool_base, typename creator_t::closure_t, one_output_container_simple_lk<Colln>, alg_wk_wrap::reverse_reduce> gen_wk_t;
962 
963 public:
964  /// This is a useful typedef to get at the execution_context.
965  /**
966  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
967 
968  \see create_direct
969  \see execution_context_algo_buff_stack_type
970  \see joinable_t
971  \see thread_wk_t
972  \see closure_base
973  */
974  typedef execution_context_algo_buff_stack_type<work_distribution_mode, pool_traits_type::result_traits_, pool_traits_type::template algo_thread_wk_buffered, gen_wk_t, work_type> execution_context;
975 
976  /**
977  To assist in allowing compile-time computation of the algorithmic order of the threading model.
978  */
979  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
980  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
981  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
982  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
983  && Colln::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
984  ? ppd::generic_traits::memory_access_modes::crew_memory_access
985  : ppd::generic_traits::memory_access_modes::erew_memory_access
986  );
987 
988  __stdcall reverse_t(thread_pool_base &p, Colln &c) noexcept(true) FORCE_INLINE
989  : pool(p), colln(c) {
990  }
991 
992  /// Joinably transfer the predicate to the pool.
993  /**
994  \see create_direct, execution_context, cliques, cliques_t
995  */
996  execution_context
997  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
998  const unsigned short half_subrange=2;
999  typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type containers(colln);
1000  return execution_context(
1001  pool,
1002  typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params("reverse", cfg_parms),
1003  typename execution_context::thread_wk_t::closure_t::argument_type(work_type(containers.input1.begin(), containers.input1.end())),
1004  init_num_jobs_par_alg_other,
1005  containers,
1006  cliques,
1007  half_subrange
1008  );
1009  }
1010 
1011 private:
1012  thread_pool_base &pool;
1013  Colln &colln;
1014 };
1015 
1016 template<
1017  class DM,
1019  typename PTT,
1020  class Pt
1021 >
1022 template<
1023  typename CollnIn1,
1024  typename CollnIn2,
1025  typename CollnOut,
1026  typename Compare
1027 >
1028 class thread_pool_base<DM, Ps, PTT, Pt>::merge_t {
1029 public:
1030  typedef Compare operation_type;
1031 
1032 private:
1033  typedef alg_wk_wrap::merge_work_type<operation_type, thread_pool_base> work_type;
1034  typedef create_direct<work_type> creator_t;
1035  typedef alg_wrapper3<pool_traits_type, alg_wk_wrap::batchers_bitonic_merge_reduce<three_containers<CollnIn1, CollnIn2, CollnOut>, typename creator_t::closure_t>, pool_traits_type::result_traits_> alg_wrap_t;
1036 
1037 public:
1038  /// This is a useful typedef to get at the execution_context.
1039  /**
1040  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
1041 
1042  \see create_direct
1043  \see execution_context_algo_stack_type
1044  \see joinable
1045  \see thread_wk_t
1046  \see closure_base
1047  */
1048  typedef execution_context_algo_stack_type<work_distribution_mode, pool_traits_type::result_traits_, thread_pool_base, pool_traits_type::template algo_thread_wk, alg_wrap_t, work_type> execution_context;
1049 
1050  /**
1051  To assist in allowing compile-time computation of the algorithmic order of the threading model.
1052  */
1053  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
1054  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1055  && alg_wrap_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1056  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1057  && CollnIn1::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1058  && CollnIn2::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1059  && CollnOut::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1060  ? ppd::generic_traits::memory_access_modes::crew_memory_access
1061  : ppd::generic_traits::memory_access_modes::erew_memory_access
1062  );
1063 
1064  __stdcall merge_t(thread_pool_base &p, CollnIn1 const &i1, CollnIn2 const &i2, CollnOut &o, operation_type const &op) noexcept(true) FORCE_INLINE
1065  : pool(p), in1(i1), in2(i2), out(o), comp(op) {
1066  }
1067 
1068  /// Joinably transfer the predicate to the pool.
1069  /**
1070  \todo Move sorter to the pool.merge() call so that users can specify alternative underlying sort operations.
1071 
1072  \see create_direct, execution_context, cliques, cliques_t
1073  */
1074  execution_context __fastcall
1075  process(const typename thread_pool_base::pool_type::size_type clique, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
1076  const typename alg_wrap_t::work_complete_t::containers_type containers(in1, in2, out);
1077  return execution_context(
1078  pool,
1079  typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params("merge", cfg_parms),
1080  typename execution_context::thread_wk_t::closure_t::argument_type(comp, pool),
1081  init_num_jobs_par_alg_other,
1082  containers,
1083  clique
1084  );
1085  }
1086 
1087 private:
1088  thread_pool_base &pool;
1089  CollnIn1 const &in1;
1090  CollnIn2 const &in2;
1091  CollnOut &out;
1092  operation_type const &comp;
1093 };
1094 
1095 template<
1096  class DM,
1098  typename PTT,
1099  class Pt
1100 >
1101 template<
1102  class Colln,
1103  typename Compare
1104 >
1105 class thread_pool_base<DM, Ps, PTT, Pt>::sort_t {
1106 public:
1107  typedef Compare operation_type;
1108 
1109 private:
1110  typedef alg_wk_wrap::sort_work_type<operation_type, thread_pool_base> work_type;
1111  typedef create_direct<work_type> creator_t;
1112  typedef alg_wrapper1<pool_traits_type, alg_wk_wrap::bitonic_sort_reduce<one_output_container_simple_lk<Colln>, typename creator_t::closure_t>, pool_traits_type::result_traits_> alg_wrap_t;
1113 
1114 public:
1115  /// This is a useful typedef to get at the execution_context.
1116  /**
1117  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
1118 
1119  \see create_direct
1120  \see execution_context_algo_stack_type
1121  \see joinable_t
1122  \see thread_wk_t
1123  \see closure_base
1124  */
1125  typedef execution_context_algo_stack_type<work_distribution_mode, pool_traits_type::result_traits_, thread_pool_base, pool_traits_type::template algo_thread_wk, alg_wrap_t, work_type> execution_context;
1126 
1127  /**
1128  To assist in allowing compile-time computation of the algorithmic order of the threading model.
1129  */
1130  static constexpr ppd::generic_traits::memory_access_modes memory_access_mode=(
1131  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1132  && alg_wrap_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1133  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1134  && Colln::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1135  ? ppd::generic_traits::memory_access_modes::crew_memory_access
1136  : ppd::generic_traits::memory_access_modes::erew_memory_access
1137  );
1138 
1139  __stdcall sort_t(thread_pool_base &p, Colln &c, operation_type const &op) noexcept(true) FORCE_INLINE
1140  : pool(p), colln(c), comp(op) {
1141  }
1142 
1143  /// Joinably transfer the predicate to the pool.
1144  /**
1145  \see create_direct, execution_context, cliques, cliques_t
1146  */
1147  execution_context __fastcall
1148  process(const typename thread_pool_base::pool_type::size_type clique, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
1149  return execution_context(
1150  pool,
1151  typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params("sort", cfg_parms),
1152  typename execution_context::thread_wk_t::closure_t::argument_type(comp, pool),
1153  init_num_jobs_par_alg_other,
1154  typename alg_wrap_t::work_complete_t::containers_type(colln),
1155  clique
1156  );
1157  }
1158 
1159 private:
1160  thread_pool_base &pool;
1161  Colln &colln;
1162  operation_type const &comp;
1163 };
1164 
1165 template<
1166  class DM,
1168  typename PTT,
1169  class Pt
1170 >
1171 template<
1172  class Colln,
1173  class Pred
1174 >
1175 class thread_pool_base<DM, Ps, PTT, Pt>::swap_ranges_t {
1176 public:
1177  typedef Pred operation_type;
1178 
1179 private:
1180  typedef alg_wk_wrap::for_each_work_type<operation_type> work_type;
1181  typedef create_direct<work_type> creator_t;
1182  typedef subdivide_n_gen_wk2<size_mode, thread_pool_base, typename creator_t::closure_t, two_out_ranges<Colln, Colln, typename Colln::container_type::iterator, typename Colln::container_type::iterator>, alg_wk_wrap::swap_ranges_reduce> gen_wk_t;
1183 
1184 public:
1185  /// This is a useful typedef to get at the execution_context.
1186  /**
1187  The execution_context is created by joinably transferring work into the pool. It has various uses, but is primarily used to atomically and synchronously wait on the results of the work on the closure_base-derived closure-derived object, as specified by the thread_wk_t object transferred into the pool. But it can also pass back specified exceptions that may be thrown by the work. It can also be used to asynchronously test if the work has been completed, and delete the work from the pool, if it has not been started.
1188 
1189  \see create_direct
1190  \see execution_context_algo_buff_stack_type
1191  \see joinable_t
1192  \see thread_wk_t
1193  \see closure_base
1194  */
1196 
1197  /**
1198  To assist in allowing compile-time computation of the algorithmic order of the threading model.
1199  */
1201  work_type::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1202  && gen_wk_t::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1203  && execution_context::memory_access_mode==ppd::generic_traits::memory_access_modes::crew_memory_access
1204  ? ppd::generic_traits::memory_access_modes::crew_memory_access
1205  : ppd::generic_traits::memory_access_modes::erew_memory_access
1206  );
1207 
1208  __stdcall swap_ranges_t(thread_pool_base &p, typename Colln::container_type::iterator b1, typename Colln::container_type::iterator e1, typename Colln::container_type::iterator b2, Pred const &pr) noexcept(true) FORCE_INLINE
1209  : pool(p), beg1(b1), end1(e1), beg2(b2), pred(pr) {
1210  }
1211 
1212  /// Joinably transfer the predicate to the pool.
1213  /**
1214  \see create_direct, execution_context, cliques, cliques_t
1215  */
1217  process(cliques::element_type const cliques, typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params const &cfg_parms) const FORCE_INLINE {
1218  return execution_context(
1219  pool,
1220  typename pool_traits_type::thread_wk_elem_type::cfg_details_type::params("swap_ranges", cfg_parms),
1221  typename execution_context::thread_wk_t::closure_t::argument_type(pred),
1222  init_num_jobs_par_alg_other,
1223  typename gen_wk_t::alg_wrap_t::work_complete_t::containers_type(beg1, end1, beg2),
1224  cliques,
1225  default_num_subranges
1226  );
1227  }
1228 
1229 private:
1230  thread_pool_base &pool;
1231  typename Colln::container_type::iterator beg1;
1232  typename Colln::container_type::iterator end1;
1233  typename Colln::container_type::iterator beg2;
1234  Pred const &pred;
1235 };
1236 
1237 template<
1238  class DM,
1240  typename PTT,
1241  class Pt
1242 >
1243 template<
1244  class Colln,
1245  typename Fn
1246 >
1247 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template for_each_t<Colln, Fn> > __fastcall
1248 thread_pool_base<DM, Ps, PTT, Pt>::for_each(Colln const &c, Fn const &fn) {
1249  typedef parallel_algorithm<for_each_t<Colln, Fn> > reduction_t;
1250  this->set_statistics().update_colln_stats(c.size());
1251  return reduction_t(typename reduction_t::operation_type(*this, c, fn));
1252 }
1253 
1254 template<
1255  class DM,
1257  typename PTT,
1258  class Pt
1259 >
1260 template<
1261  class Colln,
1262  class Pred
1263 >
1264 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template count_if_t<Colln, Pred> > __fastcall
1265 thread_pool_base<DM, Ps, PTT, Pt>::count_if(Colln const &c, Pred const &p) {
1266  typedef parallel_algorithm<count_if_t<Colln, Pred> > reduction_t;
1267  this->set_statistics().update_colln_stats(c.size());
1268  return reduction_t(typename reduction_t::operation_type(*this, c, p));
1269 }
1270 
1271 template<
1272  class DM,
1274  typename PTT,
1275  class Pt
1276 >
1277 template<
1278  class Colln
1279 >
1280 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template count_t<Colln> > __fastcall
1281 thread_pool_base<DM, Ps, PTT, Pt>::count(Colln const &c, typename Colln::value_type const &v) {
1282  typedef parallel_algorithm<count_t<Colln> > reduction_t;
1283  this->set_statistics().update_colln_stats(c.size());
1284  return reduction_t(typename reduction_t::operation_type(*this, c, v));
1285 }
1286 
1287 template<
1288  class DM,
1290  typename PTT,
1291  class Pt
1292 >
1293 template<
1294  class Colln,
1295  class Pred
1296 >
1297 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template find_if_t<Colln, Pred> > __fastcall
1298 thread_pool_base<DM, Ps, PTT, Pt>::find_if(Colln const &c, Pred const &p) {
1299  typedef parallel_algorithm<find_if_t<Colln, Pred> > reduction_t;
1300  this->set_statistics().update_colln_stats(c.size());
1301  return reduction_t(typename reduction_t::operation_type(*this, c, p));
1302 }
1303 
1304 template<
1305  class DM,
1307  typename PTT,
1308  class Pt
1309 >
1310 template<
1311  class Colln
1312 >
1313 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template find_t<Colln> > __fastcall
1314 thread_pool_base<DM, Ps, PTT, Pt>::find(Colln const &c, typename Colln::value_type const &v) {
1315  typedef parallel_algorithm<find_t<Colln> > reduction_t;
1316  this->set_statistics().update_colln_stats(c.size());
1317  return reduction_t(typename reduction_t::operation_type(*this, c, v));
1318 }
1319 
1320 template<
1321  class DM,
1323  typename PTT,
1324  class Pt
1325 >
1326 template<
1327  typename CollnIn,
1328  typename CollnOut,
1329  class UniOp
1330 >
1331 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template transform_t<CollnIn, CollnOut, UniOp> > __fastcall
1332 thread_pool_base<DM, Ps, PTT, Pt>::transform(CollnIn const &in, CollnOut &out, UniOp const &uniop) {
1333  typedef parallel_algorithm<transform_t<CollnIn, CollnOut, UniOp> > reduction_t;
1334  this->set_statistics().update_colln_stats(in.size());
1335  return reduction_t(typename reduction_t::operation_type(*this, in, out, uniop));
1336 }
1337 
1338 template<
1339  class DM,
1341  typename PTT,
1342  class Pt
1343 >
1344 template<
1345  typename CollnIn1,
1346  typename CollnIn2,
1347  typename CollnOut,
1348  class BinOp
1349 >
1350 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template transform2_t<CollnIn1, CollnIn2, CollnOut, BinOp> > __fastcall
1351 thread_pool_base<DM, Ps, PTT, Pt>::transform(CollnIn1 const &in1, CollnIn2 const &in2, CollnOut &out, BinOp const &binop) {
1352  typedef parallel_algorithm<transform2_t<CollnIn1, CollnIn2, CollnOut, BinOp> > reduction_t;
1353  this->set_statistics().update_colln_stats(in1.size());
1354  return reduction_t(typename reduction_t::operation_type(*this, in1, in2, out, binop));
1355 }
1356 
1357 template<
1358  class DM,
1360  typename PTT,
1361  class Pt
1362 >
1363 template<
1364  typename CollnIn,
1365  typename CollnOut
1366 >
1367 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template transform_t<CollnIn, CollnOut, noop<typename CollnOut::value_type> > > __fastcall
1368 thread_pool_base<DM, Ps, PTT, Pt>::copy(CollnIn const &in, CollnOut &out) {
1369  return transform(in, out, noop<typename CollnOut::value_type>());
1370 }
1371 
1372 template<
1373  class DM,
1375  typename PTT,
1376  class Pt
1377 >
1378 template<
1379  class Colln,
1380  typename BinOp
1381 >
1382 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template accumulate_op_processor<Colln, BinOp> > __fastcall
1383 thread_pool_base<DM, Ps, PTT, Pt>::accumulate(Colln const &c, typename BinOp::result_type const &v, BinOp const &binop) {
1384  typedef parallel_algorithm<accumulate_op_processor<Colln, BinOp> > reduction_t;
1385  this->set_statistics().update_colln_stats(c.size());
1386  return reduction_t(typename reduction_t::operation_type(*this, c, v, binop));
1387 }
1388 
1389 template<
1390  class DM,
1392  typename PTT,
1393  class Pt
1394 >
1395 template<
1396  class Colln,
1397  class V
1398 >
1399 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template accumulate_processor<Colln, V>> __fastcall
1400 thread_pool_base<DM, Ps, PTT, Pt>::accumulate(Colln const &c, V const &v) {
1401  typedef parallel_algorithm<accumulate_processor<Colln, V>> reduction_t;
1402  this->set_statistics().update_colln_stats(c.size());
1403  return reduction_t(typename reduction_t::operation_type(*this, c, v));
1404 }
1405 
1406 template<
1407  class DM,
1409  typename PTT,
1410  class Pt
1411 >
1412 template<
1413  class Colln
1414 >
1415 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template fill_n_t<Colln> > __fastcall
1416 thread_pool_base<DM, Ps, PTT, Pt>::fill_n(Colln &c, typename Colln::size_type sz, typename Colln::value_type const &v) {
1417  typedef parallel_algorithm<fill_n_t<Colln> > reduction_t;
1418  this->set_statistics().update_colln_stats(sz);
1419  return reduction_t(typename reduction_t::operation_type(*this, c, sz, v));
1420 }
1421 
1422 template<
1423  class DM,
1425  typename PTT,
1426  class Pt
1427 >
1428 template<
1429  class Colln
1430 >
1431 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template fill_t<Colln> > __fastcall
1432 thread_pool_base<DM, Ps, PTT, Pt>::fill(Colln &c, typename Colln::value_type const &v) {
1433  typedef parallel_algorithm<fill_t<Colln> > reduction_t;
1434  this->set_statistics().update_colln_stats(c.size());
1435  return reduction_t(typename reduction_t::operation_type(*this, c, v));
1436 }
1437 
1438 template<
1439  class DM,
1441  typename PTT,
1442  class Pt
1443 >
1444 template<
1445  typename Colln
1446 >
1447 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template reverse_t<Colln> > __fastcall
1448 thread_pool_base<DM, Ps, PTT, Pt>::reverse(Colln &c) {
1449  typedef parallel_algorithm<reverse_t<Colln> > reduction_t;
1450  this->set_statistics().update_colln_stats(c.size());
1451  return reduction_t(typename reduction_t::operation_type(*this, c));
1452 }
1453 
1454 template<
1455  class DM,
1457  typename PTT,
1458  class Pt
1459 >
1460 template<
1461  class Colln,
1462  class Comp
1463 >
1464 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template max_element_t<Colln, Comp>> __fastcall
1465 thread_pool_base<DM, Ps, PTT, Pt>::max_element(Colln const &c, Comp const &comp) {
1466  typedef parallel_algorithm<max_element_t<Colln, Comp>> reduction_t;
1467  this->set_statistics().update_colln_stats(c.size());
1468  return reduction_t(typename reduction_t::operation_type(*this, c, comp));
1469 }
1470 
1471 template<
1472  class DM,
1474  typename PTT,
1475  class Pt
1476 >
1477 template<
1478  typename Colln
1479 >
1480 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template max_element_t<Colln, std::less<typename Colln::value_type>>> __fastcall
1481 thread_pool_base<DM, Ps, PTT, Pt>::max_element(Colln const &c) {
1482  return max_element(c, std::less<typename Colln::value_type>());
1483 }
1484 
1485 template<
1486  class DM,
1488  typename PTT,
1489  class Pt
1490 >
1491 template<
1492  class Colln,
1493  class Comp
1494 >
1495 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template min_element_t<Colln, Comp>> __fastcall
1496 thread_pool_base<DM, Ps, PTT, Pt>::min_element(Colln const &c, Comp const &comp) {
1497  typedef parallel_algorithm<min_element_t<Colln, Comp>> reduction_t;
1498  this->set_statistics().update_colln_stats(c.size());
1499  return reduction_t(typename reduction_t::operation_type(*this, c, comp));
1500 }
1501 
1502 template<
1503  class DM,
1505  typename PTT,
1506  class Pt
1507 >
1508 template<
1509  typename Colln
1510 >
1511 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template min_element_t<Colln, std::less<typename Colln::value_type>>> __fastcall
1512 thread_pool_base<DM, Ps, PTT, Pt>::min_element(Colln const &c) {
1513  return min_element(c, std::less<typename Colln::value_type>());
1514 }
1515 
1516 template<
1517  class DM,
1519  typename PTT,
1520  class Pt
1521 >
1522 template<
1523  typename CollnIn1,
1524  typename CollnIn2,
1525  typename CollnOut,
1526  class Compare
1527 >
1528 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template merge_t<CollnIn1, CollnIn2, CollnOut, Compare> > __fastcall
1529 thread_pool_base<DM, Ps, PTT, Pt>::merge(CollnIn1 const &in1, CollnIn2 const &in2, CollnOut &out, Compare const &comp) {
1530  typedef parallel_algorithm<merge_t<CollnIn1, CollnIn2, CollnOut, Compare> > reduction_t;
1531  this->set_statistics().update_colln_stats(in1.size()+in2.size());
1532  return reduction_t(typename reduction_t::operation_type(*this, in1, in2, out, comp));
1533 }
1534 
1535 template<
1536  class DM,
1538  typename PTT,
1539  class Pt
1540 >
1541 template<
1542  typename CollnIn1,
1543  typename CollnIn2,
1544  typename CollnOut
1545 >
1546 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template merge_t<CollnIn1, CollnIn2, CollnOut, std::less<typename CollnOut::value_type> > > __fastcall
1547 thread_pool_base<DM, Ps, PTT, Pt>::merge(CollnIn1 const &in1, CollnIn2 const &in2, CollnOut &out) {
1548  return merge(in1, in2, out, std::less<typename CollnIn1::value_type>());
1549 }
1550 
1551 template<
1552  class DM,
1554  typename PTT,
1555  class Pt
1556 >
1557 template<
1558  class Colln,
1559  typename Compare
1560 >
1561 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template sort_t<Colln, Compare> > __fastcall
1562 thread_pool_base<DM, Ps, PTT, Pt>::sort(Colln &c, Compare const &comp) {
1563  typedef parallel_algorithm<sort_t<Colln, Compare> > reduction_t;
1564  this->set_statistics().update_colln_stats(c.size());
1565  return reduction_t(typename reduction_t::operation_type(*this, c, comp));
1566 }
1567 
1568 template<
1569  class DM,
1571  typename PTT,
1572  class Pt
1573 >
1574 template<
1575  typename Colln
1576 >
1577 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template sort_t<Colln, std::less<typename Colln::value_type> > > __fastcall
1578 thread_pool_base<DM, Ps, PTT, Pt>::sort(Colln &in) {
1579  return sort(in, std::less<typename Colln::value_type>());
1580 }
1581 
1582 template<
1583  class DM,
1585  typename PTT,
1586  class Pt
1587 >
1588 template<
1589  class ArgT,
1590  class UniFn
1591 >
1592 inline typename thread_pool_base<DM, Ps, PTT, Pt>::template execution_context_stack<unary_fun_work_type<ArgT, UniFn, thread_pool_base<DM, Ps, PTT, Pt>>> __fastcall
1593 thread_pool_base<DM, Ps, PTT, Pt>::unary_fun(ArgT &&a, UniFn const &op) {
1594  typedef unary_fun_work_type<ArgT, UniFn, thread_pool_base> work_type;
1595 
1596  return *this<<joinable(this, unary_fun_str)<<work_type(std::forward<ArgT>(a), op, *this);
1597 }
1598 
1599 template<
1600  class DM,
1602  typename PTT,
1603  class Pt
1604 >
1605 template<
1606  class LHSArg,
1607  class RHSArg,
1608  class BinFn
1609 >
1610 inline typename thread_pool_base<DM, Ps, PTT, Pt>::template execution_context_stack<binary_fun_work_type<LHSArg, RHSArg, BinFn, thread_pool_base<DM, Ps, PTT, Pt>>> __fastcall
1611 thread_pool_base<DM, Ps, PTT, Pt>::binary_fun(LHSArg &&lhs, RHSArg &&rhs, BinFn const &op) {
1612  typedef binary_fun_work_type<LHSArg, RHSArg, BinFn, thread_pool_base> work_type;
1613 
1614  return *this<<joinable(this, binary_fun_str)<<work_type(std::forward<LHSArg>(lhs), std::forward<RHSArg>(rhs), op, *this);
1615 }
1616 
1617 template<
1618  class DM,
1620  typename PTT,
1621  class Pt
1622 >
1623 template<
1624  class T
1625 >
1626 inline typename thread_pool_base<DM, Ps, PTT, Pt>::template execution_context_stack<binary_fun_work_type<T const, T const, std::logical_and<bool>, thread_pool_base<DM, Ps, PTT, Pt>>> __fastcall
1627 thread_pool_base<DM, Ps, PTT, Pt>::logical_and(T &&lhs, T &&rhs) {
1628  return binary_fun<T const, T const, std::logical_and<bool>>(std::forward<T>(lhs), std::forward<T>(rhs));
1629 }
1630 
1631 template<
1632  class DM,
1634  typename PTT,
1635  class Pt
1636 >
1637 template<
1638  class T
1639 >
1640 inline typename thread_pool_base<DM, Ps, PTT, Pt>::template execution_context_stack<binary_fun_work_type<T const, T const, std::logical_or<bool>, thread_pool_base<DM, Ps, PTT, Pt>>> __fastcall
1641 thread_pool_base<DM, Ps, PTT, Pt>::logical_or(T &&lhs, T &&rhs) {
1642  return binary_fun<T const, T const, std::logical_or<bool>>(std::forward<T>(lhs), std::forward<T>(rhs));
1643 }
1644 
1645 template<
1646  class DM,
1648  typename PTT,
1649  class Pt
1650 >
1651 template<
1652  class Colln,
1653  typename Pred
1654 >
1655 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template swap_ranges_t<Colln, Pred> > __fastcall
1656 thread_pool_base<DM, Ps, PTT, Pt>::swap_ranges(typename Colln::container_type::iterator b1, typename Colln::container_type::iterator e1, typename Colln::container_type::iterator b2, Pred const &p) {
1657  typedef parallel_algorithm<swap_ranges_t<Colln, Pred> > reduction_t;
1658  this->set_statistics().update_colln_stats(std::distance(b1, e1));
1659  return reduction_t(typename reduction_t::operation_type(*this, b1, e1, b2, p));
1660 }
1661 
1662 template<
1663  class DM,
1665  typename PTT,
1666  class Pt
1667 >
1668 template<
1669  typename CollnIn,
1670  typename CollnOut,
1671  class IterIn,
1672  class UniOp
1673 >
1674 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template transform_iter_t<CollnIn, CollnOut, IterIn, UniOp> > __fastcall
1675 thread_pool_base<DM, Ps, PTT, Pt>::transform(IterIn b1, IterIn e1, typename CollnOut::container_type::iterator b2, UniOp const &uniop) {
1676  typedef parallel_algorithm<transform_iter_t<CollnIn, CollnOut, IterIn, UniOp> > reduction_t;
1677  this->set_statistics().update_colln_stats(std::distance(b1, e1));
1678  return reduction_t(typename reduction_t::operation_type(*this, b1, e1, b2, uniop));
1679 }
1680 
1681 template<
1682  class DM,
1684  typename PTT,
1685  class Pt
1686 >
1687 template<
1688  class CollnIn,
1689  class CollnOut,
1690  class IterIn
1691 >
1692 inline parallel_algorithm<typename thread_pool_base<DM, Ps, PTT, Pt>::template copy_iter_t<CollnIn, CollnOut, IterIn> > __fastcall
1693 thread_pool_base<DM, Ps, PTT, Pt>::copy(IterIn b1, IterIn e1, typename CollnOut::container_type::iterator b2) {
1694  typedef parallel_algorithm<copy_iter_t<CollnIn, CollnOut, IterIn> > reduction_t;
1695  this->set_statistics().update_colln_stats(std::distance(b1, e1));
1696  return reduction_t(typename reduction_t::operation_type(*this, b1, e1, b2));
1697 }
1698 
1699 template<
1700  class DM1,
1701  pool_traits::size_mode_t Ps1,
1702  typename PTT1,
1703  class Pt1
1704 >
1705 inline tostream &__fastcall
1706 operator<<(tostream &os, thread_pool_base<DM1, Ps1, PTT1, Pt1> const &t) {
1707  os
1708  <<_T("Pool=0x")<<&t
1709  <<_T(", type: ")<<thread_pool_base<DM1, Ps1, PTT1, Pt1>::thread_traits::demangle_name(typeid(t))
1710  <<_T(", size_mode=")<<t.size_mode
1711  <<_T(", memory_access_mode=")<<t.memory_access_mode
1712  <<_T(", max_num_threads_in_pool=")<<t.max_num_threads_in_pool;
1713  return os;
1714 }
1715 
1716 } } } }