/** * start a thread pool. * * @param[in] pool thread pool object * * @return operation status * @retval 0 success * @retval AFS_TP_ERROR thread create failure */ int afs_tp_start(struct afs_thread_pool * pool) { int code, ret = 0; struct afs_thread_pool_worker * worker; afs_uint32 i; MUTEX_ENTER(&pool->lock); if (pool->state != AFS_TP_STATE_INIT) { ret = AFS_TP_ERROR; goto done_sync; } pool->state = AFS_TP_STATE_STARTING; MUTEX_EXIT(&pool->lock); for (i = 0; i < pool->max_threads; i++) { code = _afs_tp_worker_start(pool, &worker); if (code) { ret = code; } } MUTEX_ENTER(&pool->lock); pool->state = AFS_TP_STATE_RUNNING; done_sync: MUTEX_EXIT(&pool->lock); return ret; }
/** * low-level thread entry point. * * @param[in] rock opaque pointer to thread worker object * * @return opaque return pointer from pool entry function * * @internal */ static void * _afs_tp_worker_run(void * rock) { struct afs_thread_pool_worker * worker = rock; struct afs_thread_pool * pool = worker->pool; /* register worker with pool */ MUTEX_ENTER(&pool->lock); queue_Append(&pool->thread_list, worker); pool->nthreads++; MUTEX_EXIT(&pool->lock); /* call high-level entry point */ worker->ret = (*pool->entry)(pool, worker, pool->work_queue, pool->rock); /* adjust pool live thread count */ MUTEX_ENTER(&pool->lock); osi_Assert(pool->nthreads); queue_Remove(worker); pool->nthreads--; if (!pool->nthreads) { CV_BROADCAST(&pool->shutdown_cv); pool->state = AFS_TP_STATE_STOPPED; } MUTEX_EXIT(&pool->lock); _afs_tp_worker_free(worker); return NULL; }
/* * thread to combine salvager child logs * back into the main salvageserver log */ static void * SalvageLogCleanupThread(void * arg) { struct log_cleanup_node * cleanup; MUTEX_ENTER(&worker_lock); while (1) { while (queue_IsEmpty(&log_cleanup_queue)) { CV_WAIT(&log_cleanup_queue.queue_change_cv, &worker_lock); } while (queue_IsNotEmpty(&log_cleanup_queue)) { cleanup = queue_First(&log_cleanup_queue, log_cleanup_node); queue_Remove(cleanup); MUTEX_EXIT(&worker_lock); SalvageLogCleanup(cleanup->pid); free(cleanup); MUTEX_ENTER(&worker_lock); } } MUTEX_EXIT(&worker_lock); return NULL; }
/** * lock a file on disk for the process. * * @param[in] lf the struct VLockFile representing the file to lock * @param[in] offset the offset in the file to lock * @param[in] locktype READ_LOCK or WRITE_LOCK * @param[in] nonblock 0 to wait for conflicting locks to clear before * obtaining the lock; 1 to fail immediately if a * conflicting lock is held by someone else * * @return operation status * @retval 0 success * @retval EBUSY someone else is holding a conflicting lock and nonblock=1 was * specified * @retval EIO error acquiring file lock * * @note DAFS only * * @note do not try to lock/unlock the same offset in the same file from * different threads; use VGetDiskLock to protect threads from each other in * addition to other processes */ int VLockFileLock(struct VLockFile *lf, afs_uint32 offset, int locktype, int nonblock) { int code; osi_Assert(locktype == READ_LOCK || locktype == WRITE_LOCK); MUTEX_ENTER(&lf->mutex); if (lf->fd == INVALID_FD) { lf->fd = _VOpenPath(lf->path); if (lf->fd == INVALID_FD) { MUTEX_EXIT(&lf->mutex); return EIO; } } lf->refcount++; MUTEX_EXIT(&lf->mutex); code = _VLockFd(lf->fd, offset, locktype, nonblock); if (code) { MUTEX_ENTER(&lf->mutex); if (--lf->refcount < 1) { _VCloseFd(lf->fd); lf->fd = INVALID_FD; } MUTEX_EXIT(&lf->mutex); } return code; }
/** * remove a node from a list. * * @param[in] node node object * @param[in] next_state node state following successful dequeue * * @return operation status * @retval 0 success * @retval AFS_WQ_ERROR in any of the following conditions: * - node not associated with a work queue * - node was not on a linked list (e.g. RUNNING state) * - we raced another thread * * @pre node->lock held * * @post node removed from node list * * @note node->lock may be dropped internally * * @internal */ static int _afs_wq_node_list_remove(struct afs_work_queue_node * node, afs_wq_work_state_t next_state) { int code, ret = 0; struct afs_work_queue_node_list * list = NULL; _afs_wq_node_state_wait_busy(node); if (!node->queue) { ret = AFS_WQ_ERROR; goto error; } switch (node->qidx) { case AFS_WQ_NODE_LIST_READY: list = &node->queue->ready_list; break; case AFS_WQ_NODE_LIST_BLOCKED: list = &node->queue->blocked_list; break; case AFS_WQ_NODE_LIST_DONE: list = &node->queue->done_list; break; default: ret = AFS_WQ_ERROR; } if (list) { code = MUTEX_TRYENTER(&list->lock); if (!code) { /* contended */ _afs_wq_node_state_change(node, AFS_WQ_NODE_STATE_BUSY); MUTEX_EXIT(&node->lock); MUTEX_ENTER(&list->lock); MUTEX_ENTER(&node->lock); if (node->qidx == AFS_WQ_NODE_LIST_NONE) { /* raced */ ret= AFS_WQ_ERROR; goto done_sync; } } queue_Remove(node); node->qidx = AFS_WQ_NODE_LIST_NONE; _afs_wq_node_state_change(node, next_state); done_sync: MUTEX_EXIT(&list->lock); } error: return ret; }
void OMR::Monitor::enter() { #ifdef WIN32 MUTEX_ENTER(_monitor); #else int32_t rc = MUTEX_ENTER(_monitor); TR_ASSERT(rc == 0, "error locking monitor\n"); #endif }
void OMR::Monitor::enter() { #if defined(OMR_OS_WINDOWS) MUTEX_ENTER(_monitor); #else int32_t rc = MUTEX_ENTER(_monitor); TR_ASSERT(rc == 0, "error locking monitor\n"); #endif /* defined(OMR_OS_WINDOWS) */ }
/* * The event handling process. */ static void * event_handler(void *argp) { unsigned long rx_pthread_n_event_expired = 0; unsigned long rx_pthread_n_event_waits = 0; long rx_pthread_n_event_woken = 0; unsigned long rx_pthread_n_event_error = 0; struct timespec rx_pthread_next_event_time = { 0, 0 }; int error; MUTEX_ENTER(&event_handler_mutex); for (;;) { struct clock cv; struct clock next; MUTEX_EXIT(&event_handler_mutex); next.sec = 30; /* Time to sleep if there are no events scheduled */ next.usec = 0; clock_GetTime(&cv); rxevent_RaiseEvents(&next); MUTEX_ENTER(&event_handler_mutex); if (rx_pthread_event_rescheduled) { rx_pthread_event_rescheduled = 0; continue; } clock_Add(&cv, &next); rx_pthread_next_event_time.tv_sec = cv.sec; rx_pthread_next_event_time.tv_nsec = cv.usec * 1000; rx_pthread_n_event_waits++; error = CV_TIMEDWAIT(&rx_event_handler_cond, &event_handler_mutex, &rx_pthread_next_event_time); if (error == 0) { rx_pthread_n_event_woken++; } #ifdef AFS_NT40_ENV else if (error == ETIMEDOUT) { rx_pthread_n_event_expired++; } else { rx_pthread_n_event_error++; } #else else if (errno == ETIMEDOUT) { rx_pthread_n_event_expired++; } else { rx_pthread_n_event_error++; } #endif rx_pthread_event_rescheduled = 0; }
static void * SalvageChildReaperThread(void * args) { int slot, pid, status; struct log_cleanup_node * cleanup; MUTEX_ENTER(&worker_lock); /* loop reaping our children */ while (1) { /* wait() won't block unless we have children, so * block on the cond var if we're childless */ while (current_workers == 0) { CV_WAIT(&worker_cv, &worker_lock); } MUTEX_EXIT(&worker_lock); cleanup = (struct log_cleanup_node *) malloc(sizeof(struct log_cleanup_node)); while (Reap_Child("salvageserver", &pid, &status) < 0) { /* try to prevent livelock if something goes wrong */ sleep(1); } VOL_LOCK; for (slot = 0; slot < Parallel; slot++) { if (child_slot[slot] == pid) break; } osi_Assert(slot < Parallel); child_slot[slot] = 0; VOL_UNLOCK; SALVSYNC_doneWorkByPid(pid, status); MUTEX_ENTER(&worker_lock); if (cleanup) { cleanup->pid = pid; queue_Append(&log_cleanup_queue, cleanup); CV_SIGNAL(&log_cleanup_queue.queue_change_cv); } /* ok, we've reaped a child */ current_workers--; CV_BROADCAST(&worker_cv); } return NULL; }
void valgrindFreeObject(MM_GCExtensionsBase *extensions, uintptr_t baseAddress) { int objSize; if (MM_ForwardedHeader((omrobjectptr_t)baseAddress).isForwardedPointer()) { /* In scavanger an object may act as pointer to another object(it's replica in another region). In this case, getConsumedSizeInBytesWithHeader returns some junk value. So instead we calculate the size of the object (replica) it is pointing to and use it for freeing original object. */ omrobjectptr_t fwObject = MM_ForwardedHeader((omrobjectptr_t)baseAddress).getForwardedObject(); objSize = (int)((GC_ObjectModel)extensions->objectModel).getConsumedSizeInBytesWithHeader(fwObject); } else { objSize = (int)((GC_ObjectModel)extensions->objectModel).getConsumedSizeInBytesWithHeader((omrobjectptr_t)baseAddress); } #if defined(VALGRIND_REQUEST_LOGS) VALGRIND_PRINTF_BACKTRACE("Clearing an object at 0x%lx of size %d\n", baseAddress, objSize); #endif /* defined(VALGRIND_REQUEST_LOGS) */ VALGRIND_CHECK_MEM_IS_DEFINED(baseAddress, objSize); VALGRIND_MEMPOOL_FREE(extensions->valgrindMempoolAddr, baseAddress); MUTEX_ENTER(extensions->memcheckHashTableMutex); hashTableRemove(extensions->memcheckHashTable, &baseAddress); MUTEX_EXIT(extensions->memcheckHashTableMutex); }
/* Return the user's connection index of the most recently ready call; that is, a call that has received at least one reply packet */ int multi_Select(struct multi_handle *mh) { int index; SPLVAR; NETPRI; #ifdef RX_ENABLE_LOCKS MUTEX_ENTER(&mh->lock); #endif /* RX_ENABLE_LOCKS */ while (mh->nextReady == mh->firstNotReady) { if (mh->nReady == mh->nConns) { #ifdef RX_ENABLE_LOCKS MUTEX_EXIT(&mh->lock); #endif /* RX_ENABLE_LOCKS */ USERPRI; return -1; } #ifdef RX_ENABLE_LOCKS CV_WAIT(&mh->cv, &mh->lock); #else /* RX_ENABLE_LOCKS */ osi_rxSleep(mh); #endif /* RX_ENABLE_LOCKS */ } index = *(mh->nextReady); (mh->nextReady) += 1; #ifdef RX_ENABLE_LOCKS MUTEX_EXIT(&mh->lock); #endif /* RX_ENABLE_LOCKS */ USERPRI; return index; }
/** * wait for a node to complete; dequeue from done list. * * @param[in] node work queue node * @param[out] retcode return code from work unit * * @return operation status * @retval 0 sucess * * @pre ref held on node */ int afs_wq_node_wait(struct afs_work_queue_node * node, int * retcode) { int ret = 0; MUTEX_ENTER(&node->lock); if (node->state == AFS_WQ_NODE_STATE_INIT) { /* not sure what to do in this case */ goto done_sync; } while ((node->state != AFS_WQ_NODE_STATE_DONE) && (node->state != AFS_WQ_NODE_STATE_ERROR)) { CV_WAIT(&node->state_cv, &node->lock); } if (retcode) { *retcode = node->retcode; } if (node->queue == NULL) { /* nothing we can do */ goto done_sync; } ret = _afs_wq_node_list_remove(node, AFS_WQ_NODE_STATE_INIT); done_sync: MUTEX_EXIT(&node->lock); return ret; }
/** * unblock a work node for execution. * * this can be used to allow external events to influence work queue flow. * * @param[in] node work queue node to be blocked * * @return operation status * @retval 0 success * * @post external block count decremented */ int afs_wq_node_unblock(struct afs_work_queue_node * node) { int ret = 0; int end; MUTEX_ENTER(&node->lock); ret = _afs_wq_node_state_wait_busy(node); if (ret) { goto error_sync; } end = --node->block_count; if (!end && (node->qidx == AFS_WQ_NODE_LIST_BLOCKED)) { /* blocked->unblock transition, and we're ready to be scheduled */ ret = _afs_wq_node_list_remove(node, AFS_WQ_NODE_STATE_BUSY); if (ret) { goto error_sync; } ret = _afs_wq_node_list_enqueue(&node->queue->ready_list, node, AFS_WQ_NODE_STATE_SCHEDULED); } error_sync: MUTEX_EXIT(&node->lock); return ret; }
/** * wait for all pending nodes to finish. * * @param[in] queue work queue * * @return operation status * @retval 0 success * * @post the specified queue was empty at some point; it may not be empty by * the time this function returns, but at some point after the function was * called, there were no nodes in the ready queue or blocked queue. */ int afs_wq_wait_all(struct afs_work_queue *queue) { int ret = 0; MUTEX_ENTER(&queue->lock); while (queue->pend_count > 0 && !queue->shutdown) { CV_WAIT(&queue->empty_cv, &queue->lock); } if (queue->shutdown) { /* queue has been shut down, but there may still be some threads * running e.g. in the middle of their callback. ensure they have * stopped before we return. */ while (queue->running_count > 0) { CV_WAIT(&queue->running_cv, &queue->lock); } ret = EINTR; goto done; } done: MUTEX_EXIT(&queue->lock); /* technically this doesn't really guarantee that the work queue is empty * after we return, but we do guarantee that it was empty at some point */ return ret; }
void _tdispInit(void) { MUTEX_INIT(); MUTEX_ENTER(); tdisp_lld_init(); MUTEX_LEAVE(); }
/** * block a work node from execution. * * this can be used to allow external events to influence work queue flow. * * @param[in] node work queue node to be blocked * * @return operation status * @retval 0 success * * @post external block count incremented */ int afs_wq_node_block(struct afs_work_queue_node * node) { int ret = 0; int start; MUTEX_ENTER(&node->lock); ret = _afs_wq_node_state_wait_busy(node); if (ret) { goto error_sync; } start = node->block_count++; if (!start && (node->qidx == AFS_WQ_NODE_LIST_READY)) { /* unblocked->blocked transition, and we're already scheduled */ ret = _afs_wq_node_list_remove(node, AFS_WQ_NODE_STATE_BUSY); if (ret) { goto error_sync; } ret = _afs_wq_node_list_enqueue(&node->queue->blocked_list, node, AFS_WQ_NODE_STATE_BLOCKED); } error_sync: MUTEX_EXIT(&node->lock); return ret; }
void hxge_hw_init_niu_common(p_hxge_t hxgep) { p_hxge_hw_list_t hw_p; HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_init_niu_common")); if ((hw_p = hxgep->hxge_hw_p) == NULL) { return; } MUTEX_ENTER(&hw_p->hxge_cfg_lock); if (hw_p->flags & COMMON_INIT_DONE) { HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common" " already done for dip $%p exiting", hw_p->parent_devp)); MUTEX_EXIT(&hw_p->hxge_cfg_lock); return; } hw_p->flags = COMMON_INIT_START; HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common Started for device id %x", hw_p->parent_devp)); (void) hxge_pfc_hw_reset(hxgep); hw_p->flags = COMMON_INIT_DONE; MUTEX_EXIT(&hw_p->hxge_cfg_lock); HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common Done for device id %x", hw_p->parent_devp)); HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_init_niu_common")); }
afs_int32 canWrite(int fid) { #ifndef AFS_PTHREAD_ENV afs_int32 code = 0; #endif extern dumpSyncP dumpSyncPtr; ObtainWriteLock(&dumpSyncPtr->ds_lock); /* let the pipe drain */ while (dumpSyncPtr->ds_bytes > 0) { if (dumpSyncPtr->ds_readerStatus == DS_WAITING) { dumpSyncPtr->ds_readerStatus = 0; #ifdef AFS_PTHREAD_ENV CV_BROADCAST(&dumpSyncPtr->ds_readerStatus_cond); #else code = LWP_SignalProcess(&dumpSyncPtr->ds_readerStatus); if (code) LogError(code, "canWrite: Signal delivery failed\n"); #endif } dumpSyncPtr->ds_writerStatus = DS_WAITING; ReleaseWriteLock(&dumpSyncPtr->ds_lock); #ifdef AFS_PTHREAD_ENV MUTEX_ENTER(&dumpSyncPtr->ds_writerStatus_mutex); CV_WAIT(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex); MUTEX_EXIT(&dumpSyncPtr->ds_writerStatus_mutex); #else LWP_WaitProcess(&dumpSyncPtr->ds_writerStatus); #endif ObtainWriteLock(&dumpSyncPtr->ds_lock); } return (1); }
void afs_cv_timedwait(afs_kcondvar_t * cv, afs_kmutex_t * l, int waittime) { int seq, isAFSGlocked = ISAFS_GLOCK(); long t = waittime * HZ / 1000; #ifdef DECLARE_WAITQUEUE DECLARE_WAITQUEUE(wait, current); #else struct wait_queue wait = { current, NULL }; #endif seq = cv->seq; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&cv->waitq, &wait); if (isAFSGlocked) AFS_GUNLOCK(); MUTEX_EXIT(l); while(seq == cv->seq) { t = schedule_timeout(t); if (!t) /* timeout */ break; } remove_wait_queue(&cv->waitq, &wait); set_current_state(TASK_RUNNING); if (isAFSGlocked) AFS_GLOCK(); MUTEX_ENTER(l); }
void tdispSetBacklight(uint16_t percentage) { if (percentage > 100) percentage = 100; MUTEX_ENTER(); tdisp_lld_set_backlight(percentage); MUTEX_LEAVE(); }
void valgrindClearRange(MM_GCExtensionsBase *extensions, uintptr_t baseAddress, uintptr_t size) { if (size == 0) { return; } uintptr_t topInclusiveAddr = baseAddress + size - 1; #if defined(VALGRIND_REQUEST_LOGS) VALGRIND_PRINTF_BACKTRACE("Clearing objects in range b/w 0x%lx and 0x%lx\n", baseAddress, topInclusiveAddr); #endif /* defined(VALGRIND_REQUEST_LOGS) */ MUTEX_ENTER(extensions->memcheckHashTableMutex); GC_HashTableIterator it(extensions->memcheckHashTable); uintptr_t *currentSlotPointer = (uintptr_t *)it.nextSlot(); while (currentSlotPointer != NULL) { if (baseAddress <= *currentSlotPointer && topInclusiveAddr >= *currentSlotPointer) { valgrindFreeObjectDirect(extensions, *currentSlotPointer); it.removeSlot(); } currentSlotPointer = (uintptr_t *)it.nextSlot(); } MUTEX_EXIT(extensions->memcheckHashTableMutex); /* Valgrind automatically marks free objects as noaccess. We still mark the entire region as no access for any left out areas */ valgrindMakeMemNoaccess(baseAddress, size); }
/** * wakeup all threads waiting in dequeue. * * @param[in] list list object * * @return operation status * @retval 0 success * * @internal */ static int _afs_wq_node_list_shutdown(struct afs_work_queue_node_list * list) { int ret = 0; struct afs_work_queue_node *node, *nnode; MUTEX_ENTER(&list->lock); list->shutdown = 1; for (queue_Scan(&list->list, node, nnode, afs_work_queue_node)) { _afs_wq_node_state_change(node, AFS_WQ_NODE_STATE_ERROR); queue_Remove(node); node->qidx = AFS_WQ_NODE_LIST_NONE; node->queue = NULL; if (node->detached) { /* if we are detached, we hold the reference on the node; * otherwise, it is some other caller that holds the reference. * So don't put the node if we are not detached; the node will * get freed when someone else calls afs_wq_node_put */ afs_wq_node_put(node); } } CV_BROADCAST(&list->cv); MUTEX_EXIT(&list->lock); return ret; }
static int ipf_read_random(void *dest, int length) { if (length > inpot) return 0; MUTEX_ENTER(&arc4_mtx); if (pothead + length > pot + sizeof(pot)) { int left, numbytes; left = length; numbytes = pot + sizeof(pot) - pothead; bcopy(pothead, dest, numbytes); left -= numbytes; pothead = pot; bcopy(pothead, dest + length - left, left); pothead += left; } else { bcopy(pothead, dest, length); pothead += length; } inpot -= length; if (inpot == 0) pothead = pottail = pot; MUTEX_EXIT(&arc4_mtx); return length; }
/** * append to a node list object. * * @param[in] list list object * @param[in] node node object * @param[in] state new node state * * @return operation status * @retval 0 success * @retval AFS_WQ_ERROR raced to enqueue node * * @pre * - node lock held * - node is not on a list * - node is either not busy, or it is marked as busy by the calling thread * * @post * - enqueued on list * - node lock dropped * * @internal */ static int _afs_wq_node_list_enqueue(struct afs_work_queue_node_list * list, struct afs_work_queue_node * node, afs_wq_work_state_t state) { int code, ret = 0; if (node->qidx != AFS_WQ_NODE_LIST_NONE) { /* raced */ ret = AFS_WQ_ERROR; goto error; } /* deal with lock inversion */ code = MUTEX_TRYENTER(&list->lock); if (!code) { /* contended */ _afs_wq_node_state_change(node, AFS_WQ_NODE_STATE_BUSY); MUTEX_EXIT(&node->lock); MUTEX_ENTER(&list->lock); MUTEX_ENTER(&node->lock); /* assert state of the world (we set busy, so this should never happen) */ osi_Assert(queue_IsNotOnQueue(node)); } if (list->shutdown) { ret = AFS_WQ_ERROR; goto error_unlock; } osi_Assert(node->qidx == AFS_WQ_NODE_LIST_NONE); if (queue_IsEmpty(&list->list)) { /* wakeup a dequeue thread */ CV_SIGNAL(&list->cv); } queue_Append(&list->list, node); node->qidx = list->qidx; _afs_wq_node_state_change(node, state); error_unlock: MUTEX_EXIT(&node->lock); MUTEX_EXIT(&list->lock); error: return ret; }
void tdispCreateChar(uint8_t address, uint8_t *charmap) { /* make sure we don't write somewhere we're not supposed to */ if (address < TDISP.maxCustomChars) { MUTEX_ENTER(); tdisp_lld_create_char(address, charmap); MUTEX_LEAVE(); } }
void tdispSetCursor(coord_t col, coord_t row) { /* Keep the input range valid */ if (row >= TDISP.rows) row = TDISP.rows - 1; MUTEX_ENTER(); tdisp_lld_set_cursor(col, row); MUTEX_LEAVE(); }
/*ARGSUSED*/ static int hxge_mmac_stat_update(kstat_t *ksp, int rw) { p_hxge_t hxgep; p_hxge_mmac_kstat_t mmac_kstatsp; hxgep = (p_hxge_t)ksp->ks_private; if (hxgep == NULL) return (-1); HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_mmac_stat_update")); if (rw == KSTAT_WRITE) { cmn_err(CE_WARN, "Can not write mmac stats"); } else { MUTEX_ENTER(hxgep->genlock); mmac_kstatsp = (p_hxge_mmac_kstat_t)ksp->ks_data; mmac_kstatsp->mmac_max_addr_cnt.value.ul = hxgep->mmac.total; mmac_kstatsp->mmac_avail_addr_cnt.value.ul = hxgep->mmac.available; mmac_kstatsp->mmac_addr1.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[0].addr); mmac_kstatsp->mmac_addr2.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[1].addr); mmac_kstatsp->mmac_addr3.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[2].addr); mmac_kstatsp->mmac_addr4.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[3].addr); mmac_kstatsp->mmac_addr5.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[4].addr); mmac_kstatsp->mmac_addr6.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[5].addr); mmac_kstatsp->mmac_addr7.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[6].addr); mmac_kstatsp->mmac_addr8.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[7].addr); mmac_kstatsp->mmac_addr9.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[8].addr); mmac_kstatsp->mmac_addr10.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[9].addr); mmac_kstatsp->mmac_addr11.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[10].addr); mmac_kstatsp->mmac_addr12.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[11].addr); mmac_kstatsp->mmac_addr13.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[12].addr); mmac_kstatsp->mmac_addr14.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[13].addr); mmac_kstatsp->mmac_addr15.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[14].addr); mmac_kstatsp->mmac_addr16.value.ul = hxge_mac_octet_to_u64(hxgep->mmac.addrs[15].addr); MUTEX_EXIT(hxgep->genlock); } HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_mmac_stat_update")); return (0); }
/** * shut down all threads in pool. * * @param[in] pool thread pool object * @param[in] block wait for all threads to terminate, if asserted * * @return operation status * @retval 0 success */ int afs_tp_shutdown(struct afs_thread_pool * pool, int block) { int ret = 0; struct afs_thread_pool_worker * worker, *nn; MUTEX_ENTER(&pool->lock); if (pool->state == AFS_TP_STATE_STOPPED || pool->state == AFS_TP_STATE_STOPPING) { goto done_stopped; } if (pool->state != AFS_TP_STATE_RUNNING) { ret = AFS_TP_ERROR; goto done_sync; } pool->state = AFS_TP_STATE_STOPPING; for (queue_Scan(&pool->thread_list, worker, nn, afs_thread_pool_worker)) { worker->req_shutdown = 1; } if (!pool->nthreads) { pool->state = AFS_TP_STATE_STOPPED; } /* need to drop lock to get a membar here */ MUTEX_EXIT(&pool->lock); ret = afs_wq_shutdown(pool->work_queue); if (ret) { goto error; } MUTEX_ENTER(&pool->lock); done_stopped: if (block) { while (pool->nthreads) { CV_WAIT(&pool->shutdown_cv, &pool->lock); } } done_sync: MUTEX_EXIT(&pool->lock); error: return ret; }
/** * get a reference to a work node. * * @param[in] node work queue node * * @return operation status * @retval 0 success */ int afs_wq_node_get(struct afs_work_queue_node * node) { MUTEX_ENTER(&node->lock); node->refcount++; MUTEX_EXIT(&node->lock); return 0; }
/** * detach work node. * * @param[in] node work queue node * * @return operation status * @retval 0 success */ int afs_wq_node_set_detached(struct afs_work_queue_node * node) { MUTEX_ENTER(&node->lock); node->detached = 1; MUTEX_EXIT(&node->lock); return 0; }