23 explicit __stdcall attr(
const int shared)
noexcept(
true)
FORCE_INLINE {
24 [[maybe_unused]]
const int ret1=pthread_condattr_init(&attr_);
26 [[maybe_unused]]
const int ret2=pthread_condattr_setpshared(&attr_, shared);
29 attr(attr
const &)=
delete;
31 [[maybe_unused]]
const int ret=pthread_condattr_destroy(&attr_);
35 operator pthread_condattr_t
const *()
const noexcept(
true)
FORCE_INLINE {
43 operator<<(
tostream &os, attr
const &a) {
44 os<<
static_cast<pthread_condattr_t
const *>(a);
49 pthread_condattr_t attr_;
55 const atomic_state_type pth_err=
static_cast<atomic_state_type>(pthread_cond_init(&cond_var,
static_cast<
const pthread_condattr_t *>(attrs)));
68 [[maybe_unused]]
const atomic_state_type pth_err=
static_cast<atomic_state_type>(pthread_cond_destroy(&cond_var));
69 assert(pth_err==lock_traits::atom_set);
74 __stdcall attr(
const int shared,
const int err_chk)
noexcept(
true)
FORCE_INLINE {
75 [[maybe_unused]]
const int ret1=pthread_mutexattr_init(&attr_);
77 [[maybe_unused]]
const int ret2=pthread_mutexattr_setpshared(&attr_,shared);
79 [[maybe_unused]]
const int ret3=pthread_mutexattr_settype(&attr_,err_chk);
82 attr(attr
const &)=
delete;
84 [[maybe_unused]]
const int ret=pthread_mutexattr_destroy(&attr_);
88 operator pthread_mutexattr_t
const *()
const noexcept(
true)
FORCE_INLINE {
96 operator<<(
tostream &os, attr
const &a)
noexcept(
false) {
97 os<<
static_cast<pthread_mutexattr_t
const *>(a);
102 pthread_mutexattr_t attr_;
107 attr attrs(PTHREAD_PROCESS_PRIVATE, PTHREAD_MUTEX_ERRORCHECK);
108 const atomic_state_type pth_err=
static_cast<atomic_state_type>(pthread_mutex_init(&mutex,
static_cast<pthread_mutexattr_t *>(attrs)));
109 if (pth_err!=lock_traits::atom_set) {
118 [[maybe_unused]]
const atomic_state_type pth_err=
static_cast<atomic_state_type>(pthread_mutex_destroy(&mutex));
119 assert(pth_err==lock_traits::atom_set);
129 return static_cast<atomic_state_type>(pthread_mutex_lock(&mutex));
134 if (timeout==lock_traits::infinite_timeout()) {
137 return static_cast<atomic_state_type>(pthread_mutex_trylock(&mutex));
143 const atomic_state_type ret=
static_cast<atomic_state_type>(pthread_mutex_unlock(&mutex));
144 return ret ?
static_cast<atomic_state_type>(ret) : lock_traits::atom_unset;
153 attr attrs(shared, err_chk);
154 const atomic_state_type pth_err=
static_cast<atomic_state_type>(pthread_mutex_init(&mutex,
static_cast<pthread_mutexattr_t *>(attrs)));
155 if (pth_err!=lock_traits::atom_set) {
168 const atomic_state_type err=
static_cast<atomic_state_type>(sem_init(&sem, 0, state==lock_traits::atom_set));
169 if (err!=lock_traits::atom_set) {
178 const atomic_state_type err=
static_cast<atomic_state_type>(sem_init(&sem, shared, state==lock_traits::atom_set));
179 if (err!=lock_traits::atom_set) {
193 assert(err==lock_traits::atom_set);
218 if (!sem_trywait(&sem)) {
219 return lock_traits::atom_set;
221 return lock_traits::atom_unset;
227 if (!sem_wait(&sem)) {
228 return lock_traits::atom_set;
230 return lock_traits::atom_abandoned;
236 while (try_lock()==lock_traits::atom_set);
243 if (err!=lock_traits::atom_set) {
253 assert(event_.count()==1);
264 assert(event_.count()>=0);
271 assert(event_.count()>=0);
277 assert(event_.count()>=0);
279 assert(event_.count()>=0);
284 nonrecursive_anon_mutex::
decay()
noexcept(
true) {