示例#1
0
/*
 * igb_free_rcb_lists - Free the receive control blocks of one ring.
 */
static void
igb_free_rcb_lists(igb_rx_data_t *rx_data)
{
	igb_t *igb;
	rx_control_block_t *rcb;
	uint32_t rcb_count;
	uint32_t ref_cnt;
	int i;

	igb = rx_data->rx_ring->igb;

	mutex_enter(&igb->rx_pending_lock);

	rcb = rx_data->rcb_area;
	rcb_count = rx_data->ring_size + rx_data->free_list_size;

	for (i = 0; i < rcb_count; i++, rcb++) {
		ASSERT(rcb != NULL);

		ref_cnt = atomic_dec_32_nv(&rcb->ref_cnt);
		if (ref_cnt == 0) {
			if (rcb->mp != NULL) {
				freemsg(rcb->mp);
				rcb->mp = NULL;
			}
			igb_free_dma_buffer(&rcb->rx_buf);
		} else {
			atomic_inc_32(&rx_data->rcb_pending);
			atomic_inc_32(&igb->rcb_pending);
		}
	}

	mutex_exit(&igb->rx_pending_lock);
}
示例#2
0
void
dpfree(devplcy_t *dp)
{
    ASSERT(dp->dp_ref != 0xdeadbeef && dp->dp_ref != 0);
    if (atomic_dec_32_nv(&dp->dp_ref) == 0)
        kmem_free(dp, sizeof (*dp));
}
 void weak_release() // nothrow
 {
     if( atomic_dec_32_nv( &weak_count_ ) == 0 )
     {
         destroy();
     }
 }
 void release() // nothrow
 {
     if( atomic_dec_32_nv( &use_count_ ) == 0 )
     {
         dispose();
         weak_release();
     }
 }
示例#5
0
inline uint32_t dec_and_fetch(volatile uint32_t *ptr)
{
#if ELEVELDB_IS_SOLARIS
    return atomic_dec_32_nv(ptr);
#else
    return __sync_sub_and_fetch(ptr, 1);
#endif
}
示例#6
0
/**
 * Destroy a thread-safe global variable
 *
 * It is the caller's responsibility to ensure that no thread is using
 * this var and that none will use it again.
 *
 * @param [in] var The thread-safe global variable to destroy
 */
void
pthread_var_destroy_np(pthread_var_np_t vp)
{
    if (vp == 0)
        return;
    if (atomic_dec_32_nv(&vp->slots_in_use) > 0)
        return;     /* defer to last reader slot release via thread key dtor */
    destroy_var(vp);/* we're the last, destroy now */
}
示例#7
0
/*
 * Our version of vfs_rele() that stops at 1 instead of 0, and calls
 * freelfsnode() instead of kmem_free().
 */
static void
lfs_rele(struct lfsnode *lfs, struct loinfo *li)
{
    vfs_t *vfsp = &lfs->lfs_vfs;

    ASSERT(MUTEX_HELD(&li->li_lfslock));
    ASSERT(vfsp->vfs_count > 1);
    if (atomic_dec_32_nv(&vfsp->vfs_count) == 1)
        freelfsnode(lfs, li);
}
示例#8
0
void
crklpd_rele(credklpd_t *crkpd)
{
	if (atomic_dec_32_nv(&crkpd->crkl_ref) == 0) {
		if (crkpd->crkl_reg != NULL)
			klpd_rele(crkpd->crkl_reg);
		mutex_destroy(&crkpd->crkl_lock);
		kmem_free(crkpd, sizeof (*crkpd));
	}
}
示例#9
0
static void
wrapper_free(struct vwrapper *wrapper)
{
    if (wrapper == NULL)
        return;
    if (atomic_dec_32_nv(&wrapper->nref) > 0)
        return;
    if (wrapper->dtor != NULL)
        wrapper->dtor(wrapper->ptr);
    free(wrapper);
}
示例#10
0
void
klpd_rele(klpd_reg_t *p)
{
	if (atomic_dec_32_nv(&p->klpd_ref) == 0) {
		if (p->klpd_refp != NULL)
			klpd_unlink(p);
		if (p->klpd_cred != NULL)
			crfree(p->klpd_cred);
		door_ki_rele(p->klpd_door);
		kmem_free(p, sizeof (*p));
	}
}
示例#11
0
void
zrl_remove(zrlock_t *zrl)
{
	uint32_t n;

	n = atomic_dec_32_nv((uint32_t *)&zrl->zr_refcount);
	ASSERT((int32_t)n >= 0);
#ifdef	ZFS_DEBUG
	if (zrl->zr_owner == curthread) {
		zrl->zr_owner = NULL;
		zrl->zr_caller = NULL;
	}
#endif
}
示例#12
0
/*
 * prop_object_release_emergency
 *	A direct free with prop_object_release failed.
 *	Walk down the tree until a leaf is found and
 *	free that. Do not recurse to avoid stack overflows.
 *
 *	This is a slow edge condition, but necessary to
 *	guarantee that an object can always be freed.
 */
static void
prop_object_release_emergency(prop_object_t obj)
{
	struct _prop_object *po;
	void (*unlock)(void);
	prop_object_t parent = NULL;
	uint32_t ocnt;

	for (;;) {
		po = obj;
		_PROP_ASSERT(obj);

		if (po->po_type->pot_lock != NULL)
			po->po_type->pot_lock();

		/* Save pointerto unlock function */
		unlock = po->po_type->pot_unlock;
		
		/* Dance a bit to make sure we always get the non-racy ocnt */
		ocnt = atomic_dec_32_nv(&po->po_refcnt);
		ocnt++;
		_PROP_ASSERT(ocnt != 0);

		if (ocnt != 1) {
			if (unlock != NULL)
				unlock();
			break;
		}
		
		_PROP_ASSERT(po->po_type);		
		if ((po->po_type->pot_free)(NULL, &obj) ==
		    _PROP_OBJECT_FREE_DONE) {
			if (unlock != NULL)
				unlock();
			break;
		}

		if (unlock != NULL)
			unlock();
		
		parent = po;
		atomic_inc_32(&po->po_refcnt);
	}
	_PROP_ASSERT(parent);
	/* One object was just freed. */
	po = parent;
	(*po->po_type->pot_emergency_free)(parent);
}
示例#13
0
/*
 * Free rxbuf.
 */
static void
vmxnet3s_free_rxbuf(vmxnet3s_softc_t *dp, vmxnet3s_rxbuf_t *rxbuf)
{

	vmxnet3s_free(&rxbuf->dma);
	kmem_free(rxbuf, sizeof (vmxnet3s_rxbuf_t));

#ifndef DEBUG
	atomic_dec_32(&dp->rxnumbufs);
#else
	{
		uint32_t nv = atomic_dec_32_nv(&dp->rxnumbufs);
		ASSERT(nv != (uint32_t)-1);
	}
#endif
}
示例#14
0
/*
 * prop_object_release --
 *	Decrement the reference count on an object.
 *
 *	Free the object if we are releasing the final
 *	reference.
 */
void
prop_object_release(prop_object_t obj)
{
	struct _prop_object *po;
	struct _prop_stack stack;
	void (*unlock)(void); 
	int ret;
	uint32_t ocnt;

	_prop_stack_init(&stack);

	do {
		do {
			po = obj;
			_PROP_ASSERT(obj);

			if (po->po_type->pot_lock != NULL)
				po->po_type->pot_lock();

			/* Save pointer to object unlock function */
			unlock = po->po_type->pot_unlock;
			
			ocnt = atomic_dec_32_nv(&po->po_refcnt);
			ocnt++;
			_PROP_ASSERT(ocnt != 0);

			if (ocnt != 1) {
				ret = 0;
				if (unlock != NULL)
					unlock();
				break;
			}
			
			ret = (po->po_type->pot_free)(&stack, &obj);

			if (unlock != NULL)
				unlock();

			if (ret == _PROP_OBJECT_FREE_DONE)
				break;
			
			atomic_inc_32(&po->po_refcnt);
		} while (ret == _PROP_OBJECT_FREE_RECURSE);
		if (ret == _PROP_OBJECT_FREE_FAILED)
			prop_object_release_emergency(obj);
	} while (_prop_stack_pop(&stack, &obj, NULL, NULL, NULL));
}
示例#15
0
int do_umount(vfs_t *vfs, boolean_t force)
{
	VFS_SYNC(vfs, 0, kcred);

	int ret = VFS_UNMOUNT(vfs, force ? MS_FORCE : 0, kcred);
	if(ret != 0)
		return ret;

	ASSERT(force || vfs->vfs_count == 1);
	VFS_RELE(vfs);

#ifdef DEBUG
	fprintf(stderr, "mounted filesystems: %i\n", atomic_dec_32_nv(&mounted));
#endif

	return 0;
}
示例#16
0
/* Thread specific key destructor for handling thread exit */
void
release_slot(void *data)
{
    struct slot *slot = data;

    if (slot == NULL)
        return;

    /* Release value */
    atomic_write_ptr((volatile void **)&slot->value, NULL);

    /* Release slot */
    atomic_write_32(&slot->in_use, 0);

    /*
     * If the thread-safe global was destroyed while we held the last
     * slot then it falls to us to complete the destruction.
     */
    if (atomic_dec_32_nv(&slot->vp->slots_in_use) == 0)
        destroy_var(slot->vp);
}
示例#17
0
/*
 * Drop the prom lock if it is held by the current CPU.  If the lock is held
 * recursively, return without clearing prom_cpu.  If the hold count is now
 * zero, clear prom_cpu and cv_signal any waiting CPU.
 */
void
kern_postprom(void)
{
	processorid_t cpuid = getprocessorid();
	cpu_t *cp = cpu[cpuid];

	if (panicstr)
		return; /* do not modify lock further if we have panicked */

	if (prom_cpu != cp)
		panic("kern_postprom: not owner, cp=%p owner=%p",
		    (void *)cp, (void *)prom_cpu);

	if (prom_holdcnt == 0)
		panic("kern_postprom: prom_holdcnt == 0, owner=%p",
		    (void *)prom_cpu);

	if (atomic_dec_32_nv(&prom_holdcnt) != 0)
		return; /* prom lock is held recursively by this CPU */

	if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
		kmdb_enter();

	prom_thread = NULL;
	membar_producer();

	prom_cpu = NULL;
	membar_producer();

	if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
		mutex_enter(&prom_mutex);
		cv_signal(&prom_cv);
		mutex_exit(&prom_mutex);
		kpreempt_enable();
	}
}
示例#18
0
 long operator--()
 {
     return atomic_dec_32_nv( &value_ );
 }
示例#19
0
/**
 * Get the most up to date value of the given cf var.
 *
 * @param [in] var Pointer to a cf var
 * @param [out] res Pointer to location where the variable's value will be output
 * @param [out] version Pointer (may be NULL) to 64-bit integer where the current version will be output
 *
 * @return Zero on success, a system error code otherwise
 */
int
pthread_var_get_np(pthread_var_np_t vp, void **res, uint64_t *version)
{
    int err = 0;
    int err2 = 0;
    uint32_t nref;
    struct var *v;
    uint64_t vers, vers2;
    struct vwrapper *wrapper;
    int got_both_slots = 0; /* Whether we incremented both slots' nreaders */
    int do_signal_writer = 0;

    if (version == NULL)
        version = &vers;
    *version = 0;

    *res = NULL;

    if ((wrapper = pthread_getspecific(vp->tkey)) != NULL &&
        wrapper->version == atomic_read_64(&vp->next_version) - 1) {

        /* Fast path */
        *version = wrapper->version;
        *res = wrapper->ptr;
        return 0;
    }

    /* Get the current next version */
    *version = atomic_read_64(&vp->next_version);
    if (*version == 0) {
        /* Not set yet */
        assert(*version == 0 || *res != NULL);
        return 0;
    }
    (*version)--; /* make it the current version */

    /* Get what we hope is still the current slot */
    v = &vp->vars[(*version) & 0x1];

    /*
     * We picked a slot, but we could just have lost against one or more
     * writers.  So far nothing we've done would block any number of
     * them.
     *
     * We increment nreaders for the slot we picked to keep out
     * subsequent writers; we can then lose one more race at most.
     *
     * But we still need to learn whether we lost the race.
     */
    (void) atomic_inc_32_nv(&v->nreaders);

    /* See if we won any race */
    if ((vers2 = atomic_read_64(&vp->next_version)) == *version) {
        /*
         * We won, or didn't race at all.  We can now safely
         * increment nref for the wrapped value in the current slot.
         *
         * We can still have lost one race, but this slot is now ours.
         *
         * The key here is that we updated nreaders for one slot,
         * which might not keep the one writer we might have been
         * racing with from making the then current slot the now
         * previous slot, but because writers are serialized it will
         * keep the next writer from touching the slot we thought
         * was the current slot.  Thus here we either have the
         * current slot or the previous slot, and either way it's OK
         * for us to grab a reference to the wrapped value in the
         * slot we took.
         */
        goto got_a_slot;
    }

    /*
     * We may have incremented nreaders for the wrong slot.  Any number
     * of writers could have written between our getting
     * vp->next_version the first time, and our incrementing nreaders
     * for the corresponding slot.  We can't incref the nref of the
     * value wrapper found at the slot we picked.  We first have to find
     * the correct current slot, or ensure that no writer will release
     * the other slot.
     *
     * We increment the reader count on the other slot, but we do it
     * *before* decrementing the reader count on this one.  This should
     * guarantee that we find the other one present by keeping
     * subsequent writers (subsequent to the second writer we might be
     * racing with) out of both slots for the time between the update of
     * one slot's nreaders and the other's.
     *
     * We then have to repeat the race loss detection.  We need only do
     * this at most once.
     */
    atomic_inc_32_nv(&v->other->nreaders);

    /* We hold both slots */
    got_both_slots = 1;

    /*
     * vp->next_version can now increment by at most one, and we're
     * guaranteed to have one usable slot (whichever one we _now_ see as
     * the current slot, and which can still become the previous slot).
     */
    vers2 = atomic_read_64(&vp->next_version);
    assert(vers2 > *version);
    *version = vers2 - 1;

    /* Select a slot that looks current in this thread */
    v = &vp->vars[(*version) & 0x1];

got_a_slot:
    if (v->wrapper == NULL) {
        /* Whoa, nothing there; shouldn't happen; assert? */
        assert(*version == 0);
        assert(*version == 0 || *res != NULL);
        if (got_both_slots && atomic_dec_32_nv(&v->other->nreaders) == 0) {
            /* Last reader of a slot -> signal writer. */
            do_signal_writer = 1;
        }
        /*
         * Optimization TODO:
         *
         *    If vp->next_version hasn't changed since earlier then we
         *    should be able to avoid having to signal a writer when we
         *    decrement what we know is the current slot's nreaders to
         *    zero.  This should read:
         *
         *    if ((atomic_dec_32_nv(&v->nreaders) == 0 &&
         *         atomic_read_64(&vp->next_version) == vers2) ||
         *        do_signal_writer)
         *        err2 = signal_writer(vp);
         */
        if (atomic_dec_32_nv(&v->nreaders) == 0 || do_signal_writer)
            err2 = signal_writer(vp);
        return (err2 == 0) ? err : err2;
    }

    assert(vers2 == atomic_read_64(&vp->next_version) ||
           (vers2 + 1) == atomic_read_64(&vp->next_version));

    /* Take the wrapped value for the slot we chose */
    nref = atomic_inc_32_nv(&v->wrapper->nref);
    assert(nref > 1);
    *version = v->wrapper->version;
    *res = atomic_read_ptr((volatile void **)&v->wrapper->ptr);
    assert(*res != NULL);

    /*
     * We'll release the previous wrapper and save the new one in
     * vp->tkey below, after releasing the slot it came from.
     */
    wrapper = v->wrapper;

    /*
     * Release the slot(s) and signal any possible waiting writer if
     * either slot's nreaders drops to zero (that's what the writer will
     * be waiting for).
     *
     * The one blocking operation done by readers happens in
     * signal_writer(), but that one blocking operation is for a lock
     * that the writer will have or will soon have released, so it's
     * a practically uncontended blocking operation.
     */
    if (got_both_slots && atomic_dec_32_nv(&v->other->nreaders) == 0)
        do_signal_writer = 1;
    if (atomic_dec_32_nv(&v->nreaders) == 0 || do_signal_writer)
        err2 = signal_writer(vp);

    /*
     * Release the value previously read in this thread, if any.
     *
     * Note that we call free() here, which means that we might take a
     * lock in free().  The application's value destructor also can do
     * the same.
     *
     * TODO We could use a lock-less queue/stack to queue up wrappers
     *      for destruction by writers, then readers could be even more
     *      light-weight.
     */
    if (*res != pthread_getspecific(vp->tkey))
        pthread_var_release_np(vp);

    /* Recall this value we just read */
    err = pthread_setspecific(vp->tkey, wrapper);
    return (err2 == 0) ? err : err2;
}
template<typename T> static T decrease_nv(T *ptr) { return atomic_dec_32_nv(ptr); }
示例#21
0
/*
 * e1000g_rxfree_func - the call-back function to reclaim rx buffer
 *
 * This function is called when an mp is freed by the user thru
 * freeb call (Only for mp constructed through desballoc call)
 * It returns back the freed buffer to the freelist
 */
void
e1000g_rxfree_func(p_rx_sw_packet_t packet)
{
	e1000g_rx_data_t *rx_data;
	private_devi_list_t *devi_node;
	struct e1000g *Adapter;
	uint32_t ring_cnt;
	uint32_t ref_cnt;
	unsigned char *address;

	if (packet->ref_cnt == 0) {
		/*
		 * This case only happens when rx buffers are being freed
		 * in e1000g_stop() and freemsg() is called.
		 */
		return;
	}

	rx_data = (e1000g_rx_data_t *)(uintptr_t)packet->rx_data;

	if (packet->mp == NULL) {
		/*
		 * Allocate a mblk that binds to the data buffer
		 */
		address = (unsigned char *)packet->rx_buf->address;
		if (address != NULL) {
			packet->mp = desballoc((unsigned char *)
			    address, packet->rx_buf->size,
			    BPRI_MED, &packet->free_rtn);
		}
	}

	/*
	 * Enqueue the recycled packets in a recycle queue. When freelist
	 * dries up, move the entire chain of packets from recycle queue
	 * to freelist. This helps in avoiding per packet mutex contention
	 * around freelist.
	 */
	mutex_enter(&rx_data->recycle_lock);
	QUEUE_PUSH_TAIL(&rx_data->recycle_list, &packet->Link);
	rx_data->recycle_freepkt++;
	mutex_exit(&rx_data->recycle_lock);

	ref_cnt = atomic_dec_32_nv(&packet->ref_cnt);
	if (ref_cnt == 0) {
		mutex_enter(&e1000g_rx_detach_lock);
		e1000g_free_rx_sw_packet(packet, B_FALSE);

		atomic_dec_32(&rx_data->pending_count);
		atomic_dec_32(&e1000g_mblks_pending);

		if ((rx_data->pending_count == 0) &&
		    (rx_data->flag & E1000G_RX_STOPPED)) {
			devi_node = rx_data->priv_devi_node;

			if (devi_node != NULL) {
				ring_cnt = atomic_dec_32_nv(
				    &devi_node->pending_rx_count);
				if ((ring_cnt == 0) &&
				    (devi_node->flag &
				    E1000G_PRIV_DEVI_DETACH)) {
					e1000g_free_priv_devi_node(
					    devi_node);
				}
			} else {
				Adapter = rx_data->rx_ring->adapter;
				atomic_dec_32(
				    &Adapter->pending_rx_count);
			}

			e1000g_free_rx_pending_buffers(rx_data);
			e1000g_free_rx_data(rx_data);
		}
		mutex_exit(&e1000g_rx_detach_lock);
	}
}