Ejemplo n.º 1
0
/*
 * thread to combine salvager child logs
 * back into the main salvageserver log
 */
static void *
SalvageLogCleanupThread(void * arg)
{
    struct log_cleanup_node * cleanup;

    MUTEX_ENTER(&worker_lock);

    while (1) {
	while (queue_IsEmpty(&log_cleanup_queue)) {
	    CV_WAIT(&log_cleanup_queue.queue_change_cv, &worker_lock);
	}

	while (queue_IsNotEmpty(&log_cleanup_queue)) {
	    cleanup = queue_First(&log_cleanup_queue, log_cleanup_node);
	    queue_Remove(cleanup);
	    MUTEX_EXIT(&worker_lock);
	    SalvageLogCleanup(cleanup->pid);
	    free(cleanup);
	    MUTEX_ENTER(&worker_lock);
	}
    }

    MUTEX_EXIT(&worker_lock);
    return NULL;
}
Ejemplo n.º 2
0
/**
 * start a thread pool.
 *
 * @param[in] pool  thread pool object
 *
 * @return operation status
 *    @retval 0 success
 *    @retval AFS_TP_ERROR thread create failure
 */
int
afs_tp_start(struct afs_thread_pool * pool)
{
    int code, ret = 0;
    struct afs_thread_pool_worker * worker;
    afs_uint32 i;

    MUTEX_ENTER(&pool->lock);
    if (pool->state != AFS_TP_STATE_INIT) {
        ret = AFS_TP_ERROR;
        goto done_sync;
    }
    pool->state = AFS_TP_STATE_STARTING;
    MUTEX_EXIT(&pool->lock);

    for (i = 0; i < pool->max_threads; i++) {
        code = _afs_tp_worker_start(pool, &worker);
        if (code) {
            ret = code;
        }
    }

    MUTEX_ENTER(&pool->lock);
    pool->state = AFS_TP_STATE_RUNNING;
done_sync:
    MUTEX_EXIT(&pool->lock);

    return ret;
}
Ejemplo n.º 3
0
/**
 * lock a file on disk for the process.
 *
 * @param[in] lf       the struct VLockFile representing the file to lock
 * @param[in] offset   the offset in the file to lock
 * @param[in] locktype READ_LOCK or WRITE_LOCK
 * @param[in] nonblock 0 to wait for conflicting locks to clear before
 *                     obtaining the lock; 1 to fail immediately if a
 *                     conflicting lock is held by someone else
 *
 * @return operation status
 *  @retval 0 success
 *  @retval EBUSY someone else is holding a conflicting lock and nonblock=1 was
 *                specified
 *  @retval EIO   error acquiring file lock
 *
 * @note DAFS only
 *
 * @note do not try to lock/unlock the same offset in the same file from
 * different threads; use VGetDiskLock to protect threads from each other in
 * addition to other processes
 */
int
VLockFileLock(struct VLockFile *lf, afs_uint32 offset, int locktype, int nonblock)
{
    int code;

    osi_Assert(locktype == READ_LOCK || locktype == WRITE_LOCK);

    MUTEX_ENTER(&lf->mutex);

    if (lf->fd == INVALID_FD) {
	lf->fd = _VOpenPath(lf->path);
	if (lf->fd == INVALID_FD) {
	    MUTEX_EXIT(&lf->mutex);
	    return EIO;
	}
    }

    lf->refcount++;

    MUTEX_EXIT(&lf->mutex);

    code = _VLockFd(lf->fd, offset, locktype, nonblock);

    if (code) {
	MUTEX_ENTER(&lf->mutex);
	if (--lf->refcount < 1) {
	    _VCloseFd(lf->fd);
	    lf->fd = INVALID_FD;
	}
	MUTEX_EXIT(&lf->mutex);
    }

    return code;
}
Ejemplo n.º 4
0
/**
 * low-level thread entry point.
 *
 * @param[in] rock opaque pointer to thread worker object
 *
 * @return opaque return pointer from pool entry function
 *
 * @internal
 */
static void *
_afs_tp_worker_run(void * rock)
{
    struct afs_thread_pool_worker * worker = rock;
    struct afs_thread_pool * pool = worker->pool;

    /* register worker with pool */
    MUTEX_ENTER(&pool->lock);
    queue_Append(&pool->thread_list, worker);
    pool->nthreads++;
    MUTEX_EXIT(&pool->lock);

    /* call high-level entry point */
    worker->ret = (*pool->entry)(pool, worker, pool->work_queue, pool->rock);

    /* adjust pool live thread count */
    MUTEX_ENTER(&pool->lock);
    osi_Assert(pool->nthreads);
    queue_Remove(worker);
    pool->nthreads--;
    if (!pool->nthreads) {
        CV_BROADCAST(&pool->shutdown_cv);
        pool->state = AFS_TP_STATE_STOPPED;
    }
    MUTEX_EXIT(&pool->lock);

    _afs_tp_worker_free(worker);

    return NULL;
}
Ejemplo n.º 5
0
/* Return the user's connection index of the most recently ready call; that is, a call that has received at least one reply packet */
int
multi_Select(struct multi_handle *mh)
{
    int index;
    SPLVAR;
    NETPRI;
#ifdef RX_ENABLE_LOCKS
    MUTEX_ENTER(&mh->lock);
#endif /* RX_ENABLE_LOCKS */
    while (mh->nextReady == mh->firstNotReady) {
	if (mh->nReady == mh->nConns) {
#ifdef RX_ENABLE_LOCKS
	    MUTEX_EXIT(&mh->lock);
#endif /* RX_ENABLE_LOCKS */
	    USERPRI;
	    return -1;
	}
#ifdef RX_ENABLE_LOCKS
	CV_WAIT(&mh->cv, &mh->lock);
#else /* RX_ENABLE_LOCKS */
	osi_rxSleep(mh);
#endif /* RX_ENABLE_LOCKS */
    }
    index = *(mh->nextReady);
    (mh->nextReady) += 1;
#ifdef RX_ENABLE_LOCKS
    MUTEX_EXIT(&mh->lock);
#endif /* RX_ENABLE_LOCKS */
    USERPRI;
    return index;
}
Ejemplo n.º 6
0
void
hxge_hw_init_niu_common(p_hxge_t hxgep)
{
	p_hxge_hw_list_t hw_p;

	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_init_niu_common"));

	if ((hw_p = hxgep->hxge_hw_p) == NULL) {
		return;
	}

	MUTEX_ENTER(&hw_p->hxge_cfg_lock);
	if (hw_p->flags & COMMON_INIT_DONE) {
		HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common"
		    " already done for dip $%p exiting", hw_p->parent_devp));
		MUTEX_EXIT(&hw_p->hxge_cfg_lock);
		return;
	}

	hw_p->flags = COMMON_INIT_START;
	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
	    "hxge_hw_init_niu_common Started for device id %x",
	    hw_p->parent_devp));

	(void) hxge_pfc_hw_reset(hxgep);
	hw_p->flags = COMMON_INIT_DONE;
	MUTEX_EXIT(&hw_p->hxge_cfg_lock);

	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
	    "hxge_hw_init_niu_common Done for device id %x",
	    hw_p->parent_devp));
	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_init_niu_common"));
}
Ejemplo n.º 7
0
/**
 * remove a dependency from a work node.
 *
 * @param[in] child  node which was dependent upon completion of parent
 * @param[in] parent node whose completion gated child's execution
 *
 * @return operation status
 *    @retval 0 success
 */
int
afs_wq_node_dep_del(struct afs_work_queue_node * child,
		    struct afs_work_queue_node * parent)
{
    int code, ret = 0;
    struct afs_work_queue_dep_node * dep, * ndep;
    struct afs_work_queue_node_multilock ml;
    int held = 0;

    memset(&ml, 0, sizeof(ml));
    ml.nodes[0].node = parent;
    ml.nodes[1].node = child;
    code = _afs_wq_node_multilock(&ml);
    if (code) {
	goto error;
    }
    held = 1;

    /* only permit changes while child is in init state
     * or running state (e.g. do a dep del when in callback func) */
    if ((child->state != AFS_WQ_NODE_STATE_INIT) &&
	(child->state != AFS_WQ_NODE_STATE_RUNNING)) {
	ret = AFS_WQ_ERROR;
	goto error;
    }

    /* locate node linking parent and child */
    for (queue_Scan(&parent->dep_children,
		    dep,
		    ndep,
		    afs_work_queue_dep_node)) {
	if ((dep->child == child) &&
	    (dep->parent == parent)) {

	    /* no need to grab an extra ref on dep->child here; the caller
	     * should already have a ref on dep->child */
	    code = _afs_wq_dep_unlink_r(dep);
	    if (code) {
		ret = code;
		goto error;
	    }

	    code = _afs_wq_dep_free(dep);
	    if (code) {
		ret = code;
		goto error;
	    }
	    break;
	}
    }

 error:
    if (held) {
	MUTEX_EXIT(&child->lock);
	MUTEX_EXIT(&parent->lock);
    }
    return ret;
}
Ejemplo n.º 8
0
/**
 * remove a node from a list.
 *
 * @param[in] node        node object
 * @param[in] next_state  node state following successful dequeue
 *
 * @return operation status
 *    @retval 0 success
 *    @retval AFS_WQ_ERROR in any of the following conditions:
 *              - node not associated with a work queue
 *              - node was not on a linked list (e.g. RUNNING state)
 *              - we raced another thread
 *
 * @pre node->lock held
 *
 * @post node removed from node list
 *
 * @note node->lock may be dropped internally
 *
 * @internal
 */
static int
_afs_wq_node_list_remove(struct afs_work_queue_node * node,
			 afs_wq_work_state_t next_state)
{
    int code, ret = 0;
    struct afs_work_queue_node_list * list = NULL;

    _afs_wq_node_state_wait_busy(node);

    if (!node->queue) {
	ret = AFS_WQ_ERROR;
	goto error;
    }
    switch (node->qidx) {
    case AFS_WQ_NODE_LIST_READY:
	list = &node->queue->ready_list;
	break;

    case AFS_WQ_NODE_LIST_BLOCKED:
	list = &node->queue->blocked_list;
	break;

    case AFS_WQ_NODE_LIST_DONE:
	list = &node->queue->done_list;
	break;

    default:
	ret = AFS_WQ_ERROR;
    }

    if (list) {
	code = MUTEX_TRYENTER(&list->lock);
	if (!code) {
	    /* contended */
	    _afs_wq_node_state_change(node,
					   AFS_WQ_NODE_STATE_BUSY);
	    MUTEX_EXIT(&node->lock);
	    MUTEX_ENTER(&list->lock);
	    MUTEX_ENTER(&node->lock);

	    if (node->qidx == AFS_WQ_NODE_LIST_NONE) {
		/* raced */
		ret= AFS_WQ_ERROR;
		goto done_sync;
	    }
	}

	queue_Remove(node);
	node->qidx = AFS_WQ_NODE_LIST_NONE;
	_afs_wq_node_state_change(node, next_state);

    done_sync:
	MUTEX_EXIT(&list->lock);
    }

 error:
    return ret;
}
Ejemplo n.º 9
0
int32_t
OMR::Monitor::exit()
   {
#ifdef WIN32
   MUTEX_EXIT(_monitor);
   return 0;
#else
   int32_t rc = MUTEX_EXIT(_monitor);
   TR_ASSERT(rc == 0, "error unlocking monitor\n");
   return rc;
#endif
   }
Ejemplo n.º 10
0
int32_t
OMR::Monitor::exit()
   {
#if defined(OMR_OS_WINDOWS)
   MUTEX_EXIT(_monitor);
   return 0;
#else
   int32_t rc = MUTEX_EXIT(_monitor);
   TR_ASSERT(rc == 0, "error unlocking monitor\n");
   return rc;
#endif /* defined(OMR_OS_WINDOWS) */
   }
Ejemplo n.º 11
0
void valgrindClearRange(MM_GCExtensionsBase *extensions, uintptr_t baseAddress, uintptr_t size)
{
    if (size == 0)
    {
        return;
    }
    uintptr_t topInclusiveAddr = baseAddress + size - 1;

#if defined(VALGRIND_REQUEST_LOGS)
    VALGRIND_PRINTF_BACKTRACE("Clearing objects in range b/w 0x%lx and  0x%lx\n", baseAddress, topInclusiveAddr);
#endif /* defined(VALGRIND_REQUEST_LOGS) */

    MUTEX_ENTER(extensions->memcheckHashTableMutex);
    GC_HashTableIterator it(extensions->memcheckHashTable);
    uintptr_t *currentSlotPointer = (uintptr_t *)it.nextSlot();
    while (currentSlotPointer != NULL)
    {
        if (baseAddress <= *currentSlotPointer && topInclusiveAddr >= *currentSlotPointer)
        {
            valgrindFreeObjectDirect(extensions, *currentSlotPointer);
            it.removeSlot();
        }
        currentSlotPointer = (uintptr_t *)it.nextSlot();
    }
    MUTEX_EXIT(extensions->memcheckHashTableMutex);

    /* Valgrind automatically marks free objects as noaccess.
    We still mark the entire region as no access for any left out areas */
    valgrindMakeMemNoaccess(baseAddress, size);
}
Ejemplo n.º 12
0
static int
ipf_read_random(void *dest, int length)
{
	if (length > inpot)
		return 0;

	MUTEX_ENTER(&arc4_mtx);
	if (pothead + length > pot + sizeof(pot)) {
		int left, numbytes;

		left = length;
		numbytes = pot + sizeof(pot) - pothead;
		bcopy(pothead, dest, numbytes);
		left -= numbytes;
		pothead = pot;
		bcopy(pothead, dest + length - left, left);
		pothead += left;
	} else {
		bcopy(pothead, dest, length);
		pothead += length;
	}
	inpot -= length;
	if (inpot == 0)
		pothead = pottail = pot;
	MUTEX_EXIT(&arc4_mtx);

	return length;
}
Ejemplo n.º 13
0
void valgrindFreeObject(MM_GCExtensionsBase *extensions, uintptr_t baseAddress)
{
    int objSize;
    if (MM_ForwardedHeader((omrobjectptr_t)baseAddress).isForwardedPointer())
    {
        /* In scavanger an object may act as pointer to another object(it's replica in another region).
           In this case, getConsumedSizeInBytesWithHeader returns some junk value.
           So instead we calculate the size of the object (replica) it is pointing to 
           and use it for freeing original object.
        */
        omrobjectptr_t fwObject = MM_ForwardedHeader((omrobjectptr_t)baseAddress).getForwardedObject();
        objSize = (int)((GC_ObjectModel)extensions->objectModel).getConsumedSizeInBytesWithHeader(fwObject);
    }
    else
    {
        objSize = (int)((GC_ObjectModel)extensions->objectModel).getConsumedSizeInBytesWithHeader((omrobjectptr_t)baseAddress);
    }

#if defined(VALGRIND_REQUEST_LOGS)
    VALGRIND_PRINTF_BACKTRACE("Clearing an object at 0x%lx of size %d\n", baseAddress, objSize);
#endif /* defined(VALGRIND_REQUEST_LOGS) */

    VALGRIND_CHECK_MEM_IS_DEFINED(baseAddress, objSize);
    VALGRIND_MEMPOOL_FREE(extensions->valgrindMempoolAddr, baseAddress);

    MUTEX_ENTER(extensions->memcheckHashTableMutex);
    hashTableRemove(extensions->memcheckHashTable, &baseAddress);
    MUTEX_EXIT(extensions->memcheckHashTableMutex);
}
Ejemplo n.º 14
0
afs_int32
canWrite(int fid)
{
#ifndef AFS_PTHREAD_ENV
    afs_int32 code = 0;
#endif
    extern dumpSyncP dumpSyncPtr;

    ObtainWriteLock(&dumpSyncPtr->ds_lock);

    /* let the pipe drain */
    while (dumpSyncPtr->ds_bytes > 0) {
	if (dumpSyncPtr->ds_readerStatus == DS_WAITING) {
	    dumpSyncPtr->ds_readerStatus = 0;
#ifdef AFS_PTHREAD_ENV
	    CV_BROADCAST(&dumpSyncPtr->ds_readerStatus_cond);
#else
	    code = LWP_SignalProcess(&dumpSyncPtr->ds_readerStatus);
	    if (code)
		LogError(code, "canWrite: Signal delivery failed\n");
#endif
	}
	dumpSyncPtr->ds_writerStatus = DS_WAITING;
	ReleaseWriteLock(&dumpSyncPtr->ds_lock);
#ifdef AFS_PTHREAD_ENV
	MUTEX_ENTER(&dumpSyncPtr->ds_writerStatus_mutex);
	CV_WAIT(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex);
	MUTEX_EXIT(&dumpSyncPtr->ds_writerStatus_mutex);
#else
	LWP_WaitProcess(&dumpSyncPtr->ds_writerStatus);
#endif
	ObtainWriteLock(&dumpSyncPtr->ds_lock);
    }
    return (1);
}
Ejemplo n.º 15
0
/**
 * wait for a node to complete; dequeue from done list.
 *
 * @param[in]  node     work queue node
 * @param[out] retcode  return code from work unit
 *
 * @return operation status
 *    @retval 0 sucess
 *
 * @pre ref held on node
 */
int
afs_wq_node_wait(struct afs_work_queue_node * node,
		 int * retcode)
{
    int ret = 0;

    MUTEX_ENTER(&node->lock);
    if (node->state == AFS_WQ_NODE_STATE_INIT) {
	/* not sure what to do in this case */
	goto done_sync;
    }

    while ((node->state != AFS_WQ_NODE_STATE_DONE) &&
	   (node->state != AFS_WQ_NODE_STATE_ERROR)) {
	CV_WAIT(&node->state_cv, &node->lock);
    }
    if (retcode) {
	*retcode = node->retcode;
    }

    if (node->queue == NULL) {
	/* nothing we can do */
	goto done_sync;
    }

    ret = _afs_wq_node_list_remove(node,
					AFS_WQ_NODE_STATE_INIT);

 done_sync:
    MUTEX_EXIT(&node->lock);

    return ret;
}
Ejemplo n.º 16
0
/**
 * wakeup all threads waiting in dequeue.
 *
 * @param[in] list list object
 *
 * @return operation status
 *    @retval 0 success
 *
 * @internal
 */
static int
_afs_wq_node_list_shutdown(struct afs_work_queue_node_list * list)
{
    int ret = 0;
    struct afs_work_queue_node *node, *nnode;

    MUTEX_ENTER(&list->lock);
    list->shutdown = 1;

    for (queue_Scan(&list->list, node, nnode, afs_work_queue_node)) {
	_afs_wq_node_state_change(node, AFS_WQ_NODE_STATE_ERROR);
	queue_Remove(node);
	node->qidx = AFS_WQ_NODE_LIST_NONE;
	node->queue = NULL;

	if (node->detached) {
	    /* if we are detached, we hold the reference on the node;
	     * otherwise, it is some other caller that holds the reference.
	     * So don't put the node if we are not detached; the node will
	     * get freed when someone else calls afs_wq_node_put */
	    afs_wq_node_put(node);
	}
    }

    CV_BROADCAST(&list->cv);
    MUTEX_EXIT(&list->lock);

    return ret;
}
Ejemplo n.º 17
0
/**
 * unblock a work node for execution.
 *
 * this can be used to allow external events to influence work queue flow.
 *
 * @param[in] node  work queue node to be blocked
 *
 * @return operation status
 *    @retval 0 success
 *
 * @post external block count decremented
 */
int
afs_wq_node_unblock(struct afs_work_queue_node * node)
{
    int ret = 0;
    int end;

    MUTEX_ENTER(&node->lock);
    ret = _afs_wq_node_state_wait_busy(node);
    if (ret) {
	goto error_sync;
    }

    end = --node->block_count;

    if (!end &&
	(node->qidx == AFS_WQ_NODE_LIST_BLOCKED)) {
	/* blocked->unblock transition, and we're ready to be scheduled */
	ret = _afs_wq_node_list_remove(node,
					    AFS_WQ_NODE_STATE_BUSY);
	if (ret) {
	    goto error_sync;
	}

	ret = _afs_wq_node_list_enqueue(&node->queue->ready_list,
					     node,
					     AFS_WQ_NODE_STATE_SCHEDULED);
    }

 error_sync:
    MUTEX_EXIT(&node->lock);

    return ret;
}
Ejemplo n.º 18
0
/**
 * wait for all pending nodes to finish.
 *
 * @param[in] queue  work queue
 *
 * @return operation status
 *   @retval 0 success
 *
 * @post the specified queue was empty at some point; it may not be empty by
 * the time this function returns, but at some point after the function was
 * called, there were no nodes in the ready queue or blocked queue.
 */
int
afs_wq_wait_all(struct afs_work_queue *queue)
{
    int ret = 0;

    MUTEX_ENTER(&queue->lock);

    while (queue->pend_count > 0 && !queue->shutdown) {
	CV_WAIT(&queue->empty_cv, &queue->lock);
    }

    if (queue->shutdown) {
	/* queue has been shut down, but there may still be some threads
	 * running e.g. in the middle of their callback. ensure they have
	 * stopped before we return. */
	while (queue->running_count > 0) {
	    CV_WAIT(&queue->running_cv, &queue->lock);
	}
	ret = EINTR;
	goto done;
    }

 done:
    MUTEX_EXIT(&queue->lock);

    /* technically this doesn't really guarantee that the work queue is empty
     * after we return, but we do guarantee that it was empty at some point */

    return ret;
}
Ejemplo n.º 19
0
void
afs_cv_timedwait(afs_kcondvar_t * cv, afs_kmutex_t * l, int waittime)
{
    int seq, isAFSGlocked = ISAFS_GLOCK();
    long t = waittime * HZ / 1000;
#ifdef DECLARE_WAITQUEUE
    DECLARE_WAITQUEUE(wait, current);
#else
    struct wait_queue wait = { current, NULL };
#endif
    seq = cv->seq;

    set_current_state(TASK_INTERRUPTIBLE);
    add_wait_queue(&cv->waitq, &wait);

    if (isAFSGlocked)
	AFS_GUNLOCK();
    MUTEX_EXIT(l);

    while(seq == cv->seq) {
	t = schedule_timeout(t);
	if (!t)         /* timeout */
	    break;
    }
    
    remove_wait_queue(&cv->waitq, &wait);
    set_current_state(TASK_RUNNING);

    if (isAFSGlocked)
	AFS_GLOCK();
    MUTEX_ENTER(l);
}
Ejemplo n.º 20
0
/**
 * block a work node from execution.
 *
 * this can be used to allow external events to influence work queue flow.
 *
 * @param[in] node  work queue node to be blocked
 *
 * @return operation status
 *    @retval 0 success
 *
 * @post external block count incremented
 */
int
afs_wq_node_block(struct afs_work_queue_node * node)
{
    int ret = 0;
    int start;

    MUTEX_ENTER(&node->lock);
    ret = _afs_wq_node_state_wait_busy(node);
    if (ret) {
	goto error_sync;
    }

    start = node->block_count++;

    if (!start &&
	(node->qidx == AFS_WQ_NODE_LIST_READY)) {
	/* unblocked->blocked transition, and we're already scheduled */
	ret = _afs_wq_node_list_remove(node,
					    AFS_WQ_NODE_STATE_BUSY);
	if (ret) {
	    goto error_sync;
	}

	ret = _afs_wq_node_list_enqueue(&node->queue->blocked_list,
					     node,
					     AFS_WQ_NODE_STATE_BLOCKED);
    }

 error_sync:
    MUTEX_EXIT(&node->lock);

    return ret;
}
Ejemplo n.º 21
0
/**
 * append to a node list object.
 *
 * @param[in] list  list object
 * @param[in] node  node object
 * @param[in] state new node state
 *
 * @return operation status
 *    @retval 0 success
 *    @retval AFS_WQ_ERROR raced to enqueue node
 *
 * @pre
 *   - node lock held
 *   - node is not on a list
 *   - node is either not busy, or it is marked as busy by the calling thread
 *
 * @post
 *   - enqueued on list
 *   - node lock dropped
 *
 * @internal
 */
static int
_afs_wq_node_list_enqueue(struct afs_work_queue_node_list * list,
			  struct afs_work_queue_node * node,
			  afs_wq_work_state_t state)
{
    int code, ret = 0;

    if (node->qidx != AFS_WQ_NODE_LIST_NONE) {
	/* raced */
	ret = AFS_WQ_ERROR;
	goto error;
    }

    /* deal with lock inversion */
    code = MUTEX_TRYENTER(&list->lock);
    if (!code) {
	/* contended */
	_afs_wq_node_state_change(node, AFS_WQ_NODE_STATE_BUSY);
	MUTEX_EXIT(&node->lock);
	MUTEX_ENTER(&list->lock);
	MUTEX_ENTER(&node->lock);

	/* assert state of the world (we set busy, so this should never happen) */
	osi_Assert(queue_IsNotOnQueue(node));
    }

    if (list->shutdown) {
	ret = AFS_WQ_ERROR;
	goto error_unlock;
    }

    osi_Assert(node->qidx == AFS_WQ_NODE_LIST_NONE);
    if (queue_IsEmpty(&list->list)) {
	/* wakeup a dequeue thread */
	CV_SIGNAL(&list->cv);
    }
    queue_Append(&list->list, node);
    node->qidx = list->qidx;
    _afs_wq_node_state_change(node, state);

 error_unlock:
    MUTEX_EXIT(&node->lock);
    MUTEX_EXIT(&list->lock);

 error:
    return ret;
}
Ejemplo n.º 22
0
/*ARGSUSED*/
static int
hxge_mmac_stat_update(kstat_t *ksp, int rw)
{
	p_hxge_t		hxgep;
	p_hxge_mmac_kstat_t	mmac_kstatsp;

	hxgep = (p_hxge_t)ksp->ks_private;
	if (hxgep == NULL)
		return (-1);

	HXGE_DEBUG_MSG((hxgep, KST_CTL, "==> hxge_mmac_stat_update"));

	if (rw == KSTAT_WRITE) {
		cmn_err(CE_WARN, "Can not write mmac stats");
	} else {
		MUTEX_ENTER(hxgep->genlock);
		mmac_kstatsp = (p_hxge_mmac_kstat_t)ksp->ks_data;
		mmac_kstatsp->mmac_max_addr_cnt.value.ul = hxgep->mmac.total;
		mmac_kstatsp->mmac_avail_addr_cnt.value.ul =
		    hxgep->mmac.available;
		mmac_kstatsp->mmac_addr1.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[0].addr);
		mmac_kstatsp->mmac_addr2.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[1].addr);
		mmac_kstatsp->mmac_addr3.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[2].addr);
		mmac_kstatsp->mmac_addr4.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[3].addr);
		mmac_kstatsp->mmac_addr5.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[4].addr);
		mmac_kstatsp->mmac_addr6.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[5].addr);
		mmac_kstatsp->mmac_addr7.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[6].addr);
		mmac_kstatsp->mmac_addr8.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[7].addr);
		mmac_kstatsp->mmac_addr9.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[8].addr);
		mmac_kstatsp->mmac_addr10.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[9].addr);
		mmac_kstatsp->mmac_addr11.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[10].addr);
		mmac_kstatsp->mmac_addr12.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[11].addr);
		mmac_kstatsp->mmac_addr13.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[12].addr);
		mmac_kstatsp->mmac_addr14.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[13].addr);
		mmac_kstatsp->mmac_addr15.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[14].addr);
		mmac_kstatsp->mmac_addr16.value.ul =
		    hxge_mac_octet_to_u64(hxgep->mmac.addrs[15].addr);
		MUTEX_EXIT(hxgep->genlock);
	}

	HXGE_DEBUG_MSG((hxgep, KST_CTL, "<== hxge_mmac_stat_update"));
	return (0);
}
Ejemplo n.º 23
0
int
rxkad_InitKeytabDecrypt(const char *csdb, const char *ktname)
{
    int code;
    static int keytab_init;
    INIT_PTHREAD_LOCKS;
    MUTEX_ENTER(&krb5_lock);
    if (keytab_init) {
	MUTEX_EXIT(&krb5_lock);
	return 0;
    }
    checkfile_path = strdup(csdb);
    if (checkfile_path == NULL) {
	code = ENOMEM;
	goto cleanup;
    }
    k5ctx = NULL;
    keytab_name = NULL;
    code = krb5_init_context(&k5ctx);
    if (code != 0)
	goto cleanup;
    if (ktname != NULL) {
	keytab_name = strdup(ktname);
	if (keytab_name == NULL) {
	    code = KRB5_KT_BADNAME;
	    goto cleanup;
	}
    }
    keytab_init=1;
    reload_keys();
    MUTEX_EXIT(&krb5_lock);
    return 0;
cleanup:
    if (checkfile_path != NULL) {
	free(checkfile_path);
    }
    if (keytab_name != NULL) {
	free(keytab_name);
    }
    if (k5ctx != NULL) {
	krb5_free_context(k5ctx);
    }
    MUTEX_EXIT(&krb5_lock);
    return code;
}
Ejemplo n.º 24
0
/**
 * shut down all threads in pool.
 *
 * @param[in] pool  thread pool object
 * @param[in] block wait for all threads to terminate, if asserted
 *
 * @return operation status
 *    @retval 0 success
 */
int
afs_tp_shutdown(struct afs_thread_pool * pool,
                int block)
{
    int ret = 0;
    struct afs_thread_pool_worker * worker, *nn;

    MUTEX_ENTER(&pool->lock);
    if (pool->state == AFS_TP_STATE_STOPPED
            || pool->state == AFS_TP_STATE_STOPPING) {
        goto done_stopped;
    }
    if (pool->state != AFS_TP_STATE_RUNNING) {
        ret = AFS_TP_ERROR;
        goto done_sync;
    }
    pool->state = AFS_TP_STATE_STOPPING;

    for (queue_Scan(&pool->thread_list, worker, nn, afs_thread_pool_worker)) {
        worker->req_shutdown = 1;
    }
    if (!pool->nthreads) {
        pool->state = AFS_TP_STATE_STOPPED;
    }
    /* need to drop lock to get a membar here */
    MUTEX_EXIT(&pool->lock);

    ret = afs_wq_shutdown(pool->work_queue);
    if (ret) {
        goto error;
    }

    MUTEX_ENTER(&pool->lock);
done_stopped:
    if (block) {
        while (pool->nthreads) {
            CV_WAIT(&pool->shutdown_cv, &pool->lock);
        }
    }
done_sync:
    MUTEX_EXIT(&pool->lock);

error:
    return ret;
}
Ejemplo n.º 25
0
/*!
 * Return the internal statistics collected by rx
 *
 * @return
 * 	A statistics structure which must be freed using rx_FreeStatistics
 * @notes
 * 	Takes, and releases rx_stats_mutex
 */
struct rx_statistics *
rx_GetStatistics(void) {
    struct rx_statistics *stats = rxi_Alloc(sizeof(struct rx_statistics));
    MUTEX_ENTER(&rx_stats_mutex);
    memcpy(stats, &rx_stats, sizeof(struct rx_statistics));
    MUTEX_EXIT(&rx_stats_mutex);

    return stats;
}
Ejemplo n.º 26
0
/**
 * detach work node.
 *
 * @param[in] node  work queue node
 *
 * @return operation status
 *    @retval 0 success
 */
int
afs_wq_node_set_detached(struct afs_work_queue_node * node)
{
    MUTEX_ENTER(&node->lock);
    node->detached = 1;
    MUTEX_EXIT(&node->lock);

    return 0;
}
Ejemplo n.º 27
0
/**
 * get a reference to a work node.
 *
 * @param[in] node  work queue node
 *
 * @return operation status
 *    @retval 0 success
 */
int
afs_wq_node_get(struct afs_work_queue_node * node)
{
    MUTEX_ENTER(&node->lock);
    node->refcount++;
    MUTEX_EXIT(&node->lock);

    return 0;
}
Ejemplo n.º 28
0
void SAMP_ThreadComplete()
{
	// free mutex
	MUTEX_EXIT(MultiThreadMutex);
	// sleep
	SLEEP(5);
	// close mutex
	MUTEX_ENTER(MultiThreadMutex);
}
Ejemplo n.º 29
0
/**
 * check whether thread pool is online.
 *
 * @param[in] pool  thread pool object
 *
 * @return whether pool is online
 *    @retval 1 pool is online
 *    @retval 0 pool is not online
 */
int
afs_tp_is_online(struct afs_thread_pool * pool)
{
    int ret;

    MUTEX_ENTER(&pool->lock);
    ret = (pool->state == AFS_TP_STATE_RUNNING);
    MUTEX_EXIT(&pool->lock);

    return ret;
}
Ejemplo n.º 30
0
bool valgrindCheckObjectInPool(MM_GCExtensionsBase *extensions, uintptr_t baseAddress)
{
#if defined(VALGRIND_REQUEST_LOGS)
    VALGRIND_PRINTF("Checking for an object at 0x%lx\n", baseAddress);
#endif /* defined(VALGRIND_REQUEST_LOGS) */

    MUTEX_ENTER(extensions->memcheckHashTableMutex);
    bool exists = hashTableFind(extensions->memcheckHashTable, &baseAddress) != NULL ? true : false;
    MUTEX_EXIT(extensions->memcheckHashTableMutex);
    return exists;
}