/** * Try locking the specified Micro-Lock, returning immediately if it can't be acquired. * Recursive locking is supported, up to a maximum limit. * * @param ml * Micro-Lock data * * @return * boolean indicating success. * On error, the following error codes are set: * - EAGAIN (maximum number of recursive locks reached or non-recursive lock) * - EBUSY (lock already held by a thread) */ bool rig_mlock_trylock(RIG_MLOCK ml) { NULLCHECK_EXIT(ml); size_t tid = rig_thread_id(); // I own the mutex lock, take the fast path if (atomic_ops_uint_load(&ml->owner_id, ATOMIC_OPS_FENCE_NONE) == tid) { if (TEST_BITFIELD(ml->flags, RIG_MLOCK_RECURSIVE)) { // I'm the owner, recursive locking, if possible! if (atomic_ops_uint_load(&ml->mlock, ATOMIC_OPS_FENCE_NONE) == SIZE_MAX) { ERRET(EAGAIN, false); } atomic_ops_uint_inc(&ml->mlock, ATOMIC_OPS_FENCE_ACQUIRE); return (true); } else { ERRET(EAGAIN, false); } } if ((atomic_ops_uint_load(&ml->mlock, ATOMIC_OPS_FENCE_NONE) == 0) && (atomic_ops_uint_cas(&ml->mlock, 0, 1, ATOMIC_OPS_FENCE_NONE))) { atomic_ops_uint_store(&ml->owner_id, tid, ATOMIC_OPS_FENCE_ACQUIRE); return (true); } ERRET(EBUSY, false); }
/** * Lock the specified Micro-Read/Write (MRW) lock for reading, waiting on it if it can't be acquired. * Recursive locking of read locks is supported, up to a maximum limit. * Downgrading a write lock to a read lock is not supported. * * @param mrwl * Micro-Read/Write lock data * * @return * boolean indicating success. * On error, the following error codes are set: * - EAGAIN (maximum number of recursive read locks reached or non-recursive lock) * - EDEADLK (thread already holds lock for writing) */ bool rig_mrwlock_rdlock(RIG_MRWLOCK mrwl) { NULLCHECK_EXIT(mrwl); // Get thread-local lock status data void *owned = rig_tls_get(&mrwl->owned); // If the thread already holds a write-lock, attempting a read-lock deadlocks // NO DOWNGRADE SUPPORTED! if ((size_t)owned & RIG_MRWLOCK_WRLOCK_BIT) { ERRET(EDEADLK, false); } // The thread doesn't hold a write-lock at this point, so, if it instead // already holds a read-lock, we can recursively read-lock right away if ((size_t)owned > 0) { if (TEST_BITFIELD(mrwl->flags, RIG_MRWLOCK_RECURSIVE_READ)) { if ((size_t)owned == (RIG_MRWLOCK_WRLOCK_BIT - 1)) { ERRET(EAGAIN, false); } rig_tls_set(&mrwl->owned, (void *)((size_t)owned + 1)); return (true); } else { ERRET(EAGAIN, false); } } // The thread held nothing, so we must obtain the lock fully size_t spin = 0; size_t mrwlock; while (true) { mrwlock = atomic_ops_uint_load(&mrwl->mrwlock, ATOMIC_OPS_FENCE_NONE); if ((mrwlock < (RIG_MRWLOCK_WRLOCK_BIT - 1)) && (atomic_ops_uint_cas(&mrwl->mrwlock, mrwlock, mrwlock + 1, ATOMIC_OPS_FENCE_NONE))) { rig_tls_set(&mrwl->owned, (void *)((size_t)1)); atomic_ops_fence(ATOMIC_OPS_FENCE_ACQUIRE); return (true); } else if (mrwlock == (RIG_MRWLOCK_WRLOCK_BIT - 1)) { ERRET(EAGAIN, false); } else { spin++; if (spin == RIG_MRWLOCK_SPIN_MAX) { spin = 0; rig_thread_yield(); } } } }
/** * Try locking the specified Micro-Read/Write (MRW) lock for writing, returning immediately if it can't be acquired. * Recursive locking of a write lock is supported, up to a maximum limit. * Upgrading a read lock to a write lock is not supported, you must first * release all read locks that are being held. * * @param mrwl * Micro-Read/Write lock data * * @return * boolean indicating success. * On error, the following error codes are set: * - EAGAIN (maximum number of recursive write locks reached or non-recursive lock) * - EBUSY (lock already held by a thread) * - EDEADLK (thread already holds lock for reading) */ bool rig_mrwlock_trywrlock(RIG_MRWLOCK mrwl) { NULLCHECK_EXIT(mrwl); // Get thread-local lock status data void *owned = rig_tls_get(&mrwl->owned); // If the thread already holds a read-lock, attempting a write-lock deadlocks // NO UPGRADE SUPPORTED! if ((!((size_t)owned & RIG_MRWLOCK_WRLOCK_BIT)) && ((size_t)owned != 0)) { ERRET(EDEADLK, false); } // The thread doesn't hold a read-lock at this point, so, if it instead // already holds a write-lock, we can recursively write-lock right away if ((size_t)owned > RIG_MRWLOCK_WRLOCK_BIT) { if (TEST_BITFIELD(mrwl->flags, RIG_MRWLOCK_RECURSIVE_WRITE)) { if ((size_t)owned == SIZE_MAX) { ERRET(EAGAIN, false); } rig_tls_set(&mrwl->owned, (void *)((size_t)owned + 1)); return (true); } else { ERRET(EAGAIN, false); } } // The thread held nothing, so we must obtain the lock fully size_t mrwlock = atomic_ops_uint_load(&mrwl->mrwlock, ATOMIC_OPS_FENCE_NONE); if ((mrwlock == 0) && (atomic_ops_uint_cas(&mrwl->mrwlock, 0, RIG_MRWLOCK_WRLOCK_BIT, ATOMIC_OPS_FENCE_NONE))) { rig_tls_set(&mrwl->owned, (void *)(RIG_MRWLOCK_WRLOCK_BIT + 1)); atomic_ops_fence(ATOMIC_OPS_FENCE_ACQUIRE); return (true); } ERRET(EBUSY, false); }
/** * Start the threads in the specified Thread-group and assign them a task. * A task is specified by a function of the form 'void *func(void *arg)', * taking one void * argument and returning a void *. * * @param thr * Thread-group data * @param *start_routine * pointer to function, this is the task that is going to be executed * @param arg * the argument passed to the start_routine specified above, can be NULL * * @return * boolean indicating success. * On error, the following error codes are set: * - EAGAIN (insufficient resources, other than memory) * - EALREADY (Thread-group already running) * - ENOMEM (insufficient memory) */ bool rig_thread_start(RIG_THREAD thr, void *(*start_routine)(void *arg), void *arg) { NULLCHECK_EXIT(thr); NULLCHECK_EXIT(start_routine); if (TEST_BITFIELD(thr->flags, RIG_THREAD_STARTED)) { ERRET(EALREADY, false); } thr->start_routine = start_routine; thr->arg = arg; if (!thread_ops_start(thr)) { ERRET(errno, false); } SET_BITFIELD(thr->flags, RIG_THREAD_STARTED); return (true); }
/** * Initialize and return a Read/Write (RW) lock. * * @return * Read/Write lock data, NULL on error. * On error, the following error codes are set: * - EAGAIN (insufficient resources, other than memory) * - ENOMEM (insufficient memory) */ RIG_RWLOCK rig_rwlock_init(void) { RIG_RWLOCK rwl = rig_mem_alloc(sizeof(*rwl), 0); NULLCHECK_ERRET(rwl, ENOMEM, NULL); if (!thread_ops_rwlock_init(rwl)) { rig_mem_free(rwl); ERRET(errno, NULL); } return (rwl); }
/** * Initialize and return a Condition Variable. * * @return * Condition Variable data, NULL on error. * On error, the following error codes are set: * - EAGAIN (insufficient resources, other than memory) * - ENOMEM (insufficient memory) */ RIG_CONDVAR rig_condvar_init(void) { RIG_CONDVAR cond = rig_mem_alloc(sizeof(*cond), 0); NULLCHECK_ERRET(cond, ENOMEM, NULL); if (!thread_ops_condvar_init(cond)) { rig_mem_free(cond); ERRET(errno, NULL); } return (cond); }
/** * Initialize and return a Mutual Exclusion (Mutex) lock. * * @return * Mutex Lock data, NULL on error. * On error, the following error codes are set: * - EAGAIN (insufficient resources, other than memory) * - ENOMEM (insufficient memory) */ RIG_MXLOCK rig_mxlock_init(void) { RIG_MXLOCK mxl = rig_mem_alloc(sizeof(*mxl), 0); NULLCHECK_ERRET(mxl, ENOMEM, NULL); if (!thread_ops_mxlock_init(mxl)) { rig_mem_free(mxl); ERRET(errno, NULL); } return (mxl); }
/** * Initialize and return a TLS (Thread Local Storage) key. * * @return * TLS key data, NULL on error. * On error, the following error codes are set: * - EAGAIN (insufficient resources, other than memory) * - ENOMEM (insufficient memory) */ RIG_TLS rig_tls_init(void) { RIG_TLS tls = rig_mem_alloc(sizeof(*tls), 0); NULLCHECK_ERRET(tls, ENOMEM, NULL); if (!thread_ops_tls_init(tls)) { rig_mem_free(tls); ERRET(errno, NULL); } return (tls); }
/** * Detach the running threads in the specified Thread-group. * Once detached, threads cannot be made joinable again! * * @param thr * Thread-group data * * @return * boolean indicating success. * On error, the following error codes are set: * - EALREADY (Thread-group already detached) * - EINVAL (Thread-group not running or not joinable) */ bool rig_thread_detach(RIG_THREAD thr) { NULLCHECK_EXIT(thr); if (!TEST_BITFIELD(thr->flags, RIG_THREAD_STARTED)) { ERRET(EINVAL, false); } if (TEST_BITFIELD(thr->flags, RIG_THREAD_DETACHED)) { ERRET(EALREADY, false); } if (!thread_ops_detach(thr)) { VERIFY_ERRET(errno == EINVAL); ERRET(errno, false); } SET_BITFIELD(thr->flags, RIG_THREAD_DETACHED); return (true); }
/** * Join the running threads in the specified Thread-group, and optionally get * their return values. After a successful join, the Thread-group's state * changes to not running. * * @param thr * Thread-group data * @param retval[] * array in which to put pointers returned by a successful join, * NULL means to ignore and discard the returned values * * @return * boolean indicating success. * On error, the following error codes are set: * - EDEADLK (deadlock, join with each other or join to oneself) * - EINVAL (Thread-group not running or not joinable anymore) */ bool rig_thread_join(RIG_THREAD thr, void *retval[]) { NULLCHECK_EXIT(thr); if (!TEST_BITFIELD(thr->flags, RIG_THREAD_STARTED)) { ERRET(EINVAL, false); } if (TEST_BITFIELD(thr->flags, RIG_THREAD_DETACHED)) { ERRET(EINVAL, false); } if (!thread_ops_join(thr, retval)) { VERIFY_ERRET((errno == EDEADLK) || (errno == EINVAL)); ERRET(errno, false); } RESET_BITFIELD(thr->flags, RIG_THREAD_STARTED); return (true); }
/** * Unlock the specified Micro-Lock. * * @param ml * Micro-Lock data * * @return * boolean indicating success. * On error, the following error codes are set: * - EPERM (current thread doesn't hold the lock or not locked at all) */ bool rig_mlock_unlock(RIG_MLOCK ml) { NULLCHECK_EXIT(ml); size_t mlock = atomic_ops_uint_load(&ml->mlock, ATOMIC_OPS_FENCE_NONE); if (mlock == 0) { ERRET(EPERM, false); } if (atomic_ops_uint_load(&ml->owner_id, ATOMIC_OPS_FENCE_NONE) != rig_thread_id()) { ERRET(EPERM, false); } if (mlock == 1) { atomic_ops_uint_store(&ml->owner_id, 0, ATOMIC_OPS_FENCE_NONE); } atomic_ops_uint_dec(&ml->mlock, ATOMIC_OPS_FENCE_RELEASE); return (true); }
/** * Unlock the specified Micro-Read/Write (MRW) lock. * * @param mrwl * Micro-Read/Write lock data * * @return * boolean indicating success. * On error, the following error codes are set: * - EPERM (current thread doesn't hold the lock or not locked at all) */ bool rig_mrwlock_unlock(RIG_MRWLOCK mrwl) { NULLCHECK_EXIT(mrwl); size_t mrwlock = atomic_ops_uint_load(&mrwl->mrwlock, ATOMIC_OPS_FENCE_NONE); if (mrwlock == 0) { ERRET(EPERM, false); } // Get thread-local lock status data void *owned = rig_tls_get(&mrwl->owned); if ((size_t)owned == 0) { ERRET(EPERM, false); } if (mrwlock == RIG_MRWLOCK_WRLOCK_BIT) { if ((size_t)owned == (RIG_MRWLOCK_WRLOCK_BIT + 1)) { rig_tls_set(&mrwl->owned, (void *)((size_t)0)); atomic_ops_uint_store(&mrwl->mrwlock, 0, ATOMIC_OPS_FENCE_RELEASE); } else { rig_tls_set(&mrwl->owned, (void *)((size_t)owned - 1)); } } else { if ((size_t)owned == 1) { rig_tls_set(&mrwl->owned, (void *)((size_t)0)); atomic_ops_uint_dec(&mrwl->mrwlock, ATOMIC_OPS_FENCE_RELEASE); } else { rig_tls_set(&mrwl->owned, (void *)((size_t)owned - 1)); } } return (true); }
/** * Destroy specified Read/Write (RW) lock and set pointer to NULL. * * @param *rwl * pointer to Read/Write lock data * * @return * boolean indicating success. * On error, the following error codes are set: * - EBUSY (lock still in use) */ bool rig_rwlock_destroy(RIG_RWLOCK *rwl) { NULLCHECK_EXIT(rwl); // If a valid pointer already contains NULL, nothing to do! if (*rwl == NULL) { return (true); } if (!thread_ops_rwlock_destroy(*rwl)) { ERRET(EBUSY, false); } rig_mem_free(*rwl); *rwl = NULL; return (true); }
/** * Destroy specified Condition Variable and set pointer to NULL. * * @param *cond * pointer to Condition Variable data * * @return * boolean indicating success. * On error, the following error codes are set: * - EBUSY (condition variable still in use) */ bool rig_condvar_destroy(RIG_CONDVAR *cond) { NULLCHECK_EXIT(cond); // If a valid pointer already contains NULL, nothing to do! if (*cond == NULL) { return (true); } if (!thread_ops_condvar_destroy(*cond)) { ERRET(EBUSY, false); } rig_mem_free(*cond); *cond = NULL; return (true); }
/** * Initialize and return a Micro-Read/Write (MRW) lock. * * @param flags * modify lock behavior. * Supported flags are: * - RIG_MRWLOCK_RECURSIVE_READ: support recursive read-side lock acquisition * - RIG_MRWLOCK_RECURSIVE_WRITE: support recursive write-side lock acquisition * - RIG_MRWLOCK_RECURSIVE: support recursive lock acquisition (read & write) * * @return * Micro-Read/Write (MRW) lock data, NULL on error. * On error, the following error codes are set: * - EAGAIN (insufficient resources, other than memory) * - ENOMEM (insufficient memory) */ RIG_MRWLOCK rig_mrwlock_init(uint16_t flags) { CHECK_PERMITTED_FLAGS(flags, RIG_MRWLOCK_RECURSIVE_READ | RIG_MRWLOCK_RECURSIVE_WRITE); RIG_MRWLOCK mrwl = rig_mem_alloc_aligned(sizeof(*mrwl), 0, CACHELINE_SIZE, RIG_MEM_ALLOC_ALIGN_PAD); NULLCHECK_ERRET(mrwl, ENOMEM, NULL); atomic_ops_uint_store(&mrwl->mrwlock, 0, ATOMIC_OPS_FENCE_NONE); if (!thread_ops_tls_init(&mrwl->owned)) { rig_mem_free_aligned(mrwl); ERRET(errno, NULL); } mrwl->flags = flags; atomic_ops_fence(ATOMIC_OPS_FENCE_RELEASE); return (mrwl); }
/** * Initialize and return a Thread-group. * A Thread-group is Rig's way to manage threads: it keeps them together by * groups, each executing the same, specified task (see rig_thread_start()). * * @param flags * flags to influence Thread-group behavior, currently valid are: * - RIG_THREAD_DETACHED (starts threads as detached right away) * @param nr_threads * number of threads in this Thread-group, must be > 0 * * @return * Thread-group data, NULL on error. * On error, the following error codes are set: * - EINVAL (invalid arguments passed) * - ENOMEM (insufficient memory) */ RIG_THREAD rig_thread_init(uint16_t flags, size_t nr_threads) { CHECK_PERMITTED_FLAGS(flags, RIG_THREAD_DETACHED); if (nr_threads == 0) { ERRET(EINVAL, NULL); } RIG_THREAD thr = rig_mem_alloc(sizeof(*thr), sizeof(RIG_THREAD_TYPE) * nr_threads); NULLCHECK_ERRET(thr, ENOMEM, NULL); thr->flags = flags; thr->nr_threads = nr_threads; thr->start_routine = NULL; thr->arg = NULL; return (thr); }
/** * Destroy specified Micro-Lock and set pointer to NULL. * * @param *ml * pointer to Micro-Lock data * * @return * boolean indicating success. * On error, the following error codes are set: * - EBUSY (lock still in use) */ bool rig_mlock_destroy(RIG_MLOCK *ml) { NULLCHECK_EXIT(ml); // If a valid pointer already contains NULL, nothing to do! if (*ml == NULL) { return (true); } // Detect if the lock is still used somewhere if (atomic_ops_uint_load(&(*ml)->mlock, ATOMIC_OPS_FENCE_ACQUIRE) != 0) { ERRET(EBUSY, false); } rig_mem_free_aligned(*ml); *ml = NULL; return (true); }
int _ae_externalize_file( FILE *gfile, /* global assign environment file. */ assign_environment *aep) { register int i; register int ss; register long bytecnt; assign_record *arp; rewind(gfile); for (i = 0; i < aep->rec_cnt; i++) { arp = &aep->ar[i]; ss = fprintf(gfile, "assign %s", arp->attr); if (ss == -1) { ERRET(ERAS_WRERR); } switch (arp->id.type) { case BYFILE: case BYGLOBAL: case BYPATTERN: ss = fprintf(gfile, " %c:%s\n", arp->id.type, arp->id.u.str); break; case BYUNIT: ss = fprintf(gfile, " u:%ld\n", arp->id.u.unit); break; } if (ss == -1) { ERRET(ERAS_WRERR); } } /* * We truncate the file after the end of the data written. */ bytecnt = ftell(gfile); /* get file size */ if (fflush(gfile) == EOF) { ERRET(ERAS_WRERR); } ss = lseek(fileno(gfile), bytecnt, SEEK_SET); if (ss == -1) return(-1); #ifdef _UNICOS ss = trunc(fileno(gfile)); #else ss = ftruncate(fileno(gfile), bytecnt); #endif if (ss == -1) return(-1); return(0); }