Beispiel #1
0
/*
 * Allocate icl_pdu with empty BHS to fill up by the caller.
 */
struct icl_pdu *
icl_soft_conn_new_pdu(struct icl_conn *ic, int flags)
{
	struct icl_pdu *ip;

#ifdef DIAGNOSTIC
	refcount_acquire(&ic->ic_outstanding_pdus);
#endif
	ip = uma_zalloc(icl_pdu_zone, flags | M_ZERO);
	if (ip == NULL) {
		ICL_WARN("failed to allocate %zd bytes", sizeof(*ip));
#ifdef DIAGNOSTIC
		refcount_release(&ic->ic_outstanding_pdus);
#endif
		return (NULL);
	}
	ip->ip_conn = ic;

	CTASSERT(sizeof(struct iscsi_bhs) <= MHLEN);
	ip->ip_bhs_mbuf = m_gethdr(flags, MT_DATA);
	if (ip->ip_bhs_mbuf == NULL) {
		ICL_WARN("failed to allocate BHS mbuf");
		icl_soft_conn_pdu_free(ic, ip);
		return (NULL);
	}
	ip->ip_bhs = mtod(ip->ip_bhs_mbuf, struct iscsi_bhs *);
	memset(ip->ip_bhs, 0, sizeof(struct iscsi_bhs));
	ip->ip_bhs_mbuf->m_len = sizeof(struct iscsi_bhs);

	return (ip);
}
Beispiel #2
0
static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
{
	struct ttm_object_file *tfile = *p_tfile;

	*p_tfile = NULL;
	if (refcount_release(&tfile->refcount))
		ttm_object_file_destroy(tfile);
}
Beispiel #3
0
void
drm_gem_object_unreference(struct drm_gem_object *obj)
{

	if (obj == NULL)
		return;
	if (refcount_release(&obj->refcount))
		drm_gem_object_free(obj);
}
Beispiel #4
0
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
	struct ttm_base_object *base = *p_base;

	*p_base = NULL;

	if (refcount_release(&base->refcount))
		ttm_release_base(base);
}
Beispiel #5
0
/*
 * Free a cred structure.  Throws away space when ref count gets to 0.
 */
void
crfree(struct ucred *cr)
{

	KASSERT(cr->cr_ref > 0, ("bad ucred refcount: %d", cr->cr_ref));
	KASSERT(cr->cr_ref != 0xdeadc0de, ("dangling reference to ucred"));
	if (refcount_release(&cr->cr_ref)) {

		free(cr, M_CRED);
	}
}
Beispiel #6
0
/**
 * radeon_fence_unref - remove a ref on a fence
 *
 * @fence: radeon fence object
 *
 * Remove a reference on a fence (all asics).
 */
void radeon_fence_unref(struct radeon_fence **fence)
{
    struct radeon_fence *tmp = *fence;

    *fence = NULL;
    if (tmp) {
        if (refcount_release(&tmp->kref)) {
            radeon_fence_destroy(tmp);
        }
    }
}
Beispiel #7
0
/**
 * Release a reference to the given data instance.
 *
 * If this is the last reference, the data instance and its associated
 * resources will be freed.
 * 
 * @param nv The reference to be released.
 */
void
bhnd_nvram_data_release(struct bhnd_nvram_data *nv)
{
	if (!refcount_release(&nv->refs))
		return;

	/* Free any internal resources */
	nv->cls->op_free(nv);

	/* Free the instance allocation */
	bhnd_nv_free(nv);
}
Beispiel #8
0
void
vtfont_unref(struct vt_font *vf)
{
    unsigned int i;

    if (refcount_release(&vf->vf_refcount)) {
        for (i = 0; i < VFNT_MAPS; i++)
            free(vf->vf_map[i], M_VTFONT);
        free(vf->vf_bytes, M_VTFONT);
        free(vf, M_VTFONT);
    }
}
Beispiel #9
0
static void
shm_drop(struct shmfd *shmfd)
{

	if (refcount_release(&shmfd->shm_refs)) {
#ifdef MAC
		mac_posixshm_destroy(shmfd);
#endif
		vm_object_deallocate(shmfd->shm_object);
		free(shmfd, M_SHMFD);
	}
}
Beispiel #10
0
int
fuse_ticket_drop(struct fuse_ticket *ftick)
{
	int die;

	die = refcount_release(&ftick->tk_refcount);
	debug_printf("ftick=%p refcount=%d\n", ftick, ftick->tk_refcount);
	if (die)
		fticket_destroy(ftick);

	return die;
}
Beispiel #11
0
static void
icl_soft_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip)
{

	m_freem(ip->ip_bhs_mbuf);
	m_freem(ip->ip_ahs_mbuf);
	m_freem(ip->ip_data_mbuf);
	uma_zfree(icl_pdu_zone, ip);
#ifdef DIAGNOSTIC
	refcount_release(&ic->ic_outstanding_pdus);
#endif
}
Beispiel #12
0
void
icl_pdu_free(struct icl_pdu *ip)
{
	struct icl_conn *ic;

	ic = ip->ip_conn;

	m_freem(ip->ip_bhs_mbuf);
	m_freem(ip->ip_ahs_mbuf);
	m_freem(ip->ip_data_mbuf);
	uma_zfree(icl_pdu_zone, ip);
	refcount_release(&ic->ic_outstanding_pdus);
}
Beispiel #13
0
/*
 * Deferred release must be used when in a context that is not safe to
 * allocate/free.  This places any unreferenced sets on the list 'head'.
 */
static void
cpuset_rel_defer(struct setlist *head, struct cpuset *set)
{

	if (refcount_release(&set->cs_ref) == 0)
		return;
	mtx_lock_spin(&cpuset_lock);
	LIST_REMOVE(set, cs_siblings);
	if (set->cs_id != CPUSET_INVALID)
		LIST_REMOVE(set, cs_link);
	LIST_INSERT_HEAD(head, set, cs_link);
	mtx_unlock_spin(&cpuset_lock);
}
Beispiel #14
0
void
free_toepcb(struct toepcb *toep)
{

	if (refcount_release(&toep->refcount) == 0)
		return;

	KASSERT(!(toep->flags & TPF_ATTACHED),
	    ("%s: attached to an inpcb", __func__));
	KASSERT(!(toep->flags & TPF_CPL_PENDING),
	    ("%s: CPL pending", __func__));

	ddp_uninit_toep(toep);
	free(toep, M_CXGBE);
}
Beispiel #15
0
static void
ksem_drop(struct ksem *ks)
{

	if (refcount_release(&ks->ks_ref)) {
#ifdef MAC
		mac_posixsem_destroy(ks);
#endif
		cv_destroy(&ks->ks_cv);
		free(ks, M_KSEM);
		mtx_lock(&ksem_count_lock);
		nsems--;
		mtx_unlock(&ksem_count_lock);
	}
}
Beispiel #16
0
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
	struct ttm_base_object *base = *p_base;
	struct ttm_object_device *tdev = base->tfile->tdev;

	*p_base = NULL;

	/*
	 * Need to take the lock here to avoid racing with
	 * users trying to look up the object.
	 */

	rw_wlock(&tdev->object_lock);
	if (refcount_release(&base->refcount))
		ttm_release_base(base);
	rw_wunlock(&tdev->object_lock);
}
Beispiel #17
0
static struct icl_pdu *
icl_pdu_new(struct icl_conn *ic, int flags)
{
	struct icl_pdu *ip;

	refcount_acquire(&ic->ic_outstanding_pdus);
	ip = uma_zalloc(icl_pdu_zone, flags | M_ZERO);
	if (ip == NULL) {
		ICL_WARN("failed to allocate %zd bytes", sizeof(*ip));
		refcount_release(&ic->ic_outstanding_pdus);
		return (NULL);
	}

	ip->ip_conn = ic;

	return (ip);
}
Beispiel #18
0
/*
 * Release a reference and free on the last one.
 */
static void
filemon_release(struct filemon *filemon)
{

	if (refcount_release(&filemon->refcnt) == 0)
		return;
	/*
	 * There are valid cases of releasing while locked, such as in
	 * filemon_untrack_processes, but none which are done where there
	 * is not at least 1 reference remaining.
	 */
	sx_assert(&filemon->lock, SA_UNLOCKED);

	if (filemon->cred != NULL)
		crfree(filemon->cred);
	sx_destroy(&filemon->lock);
	free(filemon, M_FILEMON);
}
Beispiel #19
0
/*
 * Release a reference in a context where it is safe to allocate.
 */
void
cpuset_rel(struct cpuset *set)
{
	cpusetid_t id;

	if (refcount_release(&set->cs_ref) == 0)
		return;
	mtx_lock_spin(&cpuset_lock);
	LIST_REMOVE(set, cs_siblings);
	id = set->cs_id;
	if (id != CPUSET_INVALID)
		LIST_REMOVE(set, cs_link);
	mtx_unlock_spin(&cpuset_lock);
	cpuset_rel(set->cs_parent);
	uma_zfree(cpuset_zone, set);
	if (id != CPUSET_INVALID)
		free_unr(cpuset_unr, id);
}
void
loginclass_free(struct loginclass *lc)
{

	if (refcount_release_if_not_last(&lc->lc_refcount))
		return;

	rw_wlock(&loginclasses_lock);
	if (!refcount_release(&lc->lc_refcount)) {
		rw_wunlock(&loginclasses_lock);
		return;
	}

	racct_destroy(&lc->lc_racct);
	LIST_REMOVE(lc, lc_next);
	rw_wunlock(&loginclasses_lock);

	free(lc, M_LOGINCLASS);
}
Beispiel #21
0
void
icl_cxgbei_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip)
{
#ifdef INVARIANTS
	struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
#endif

	MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
	MPASS(ic == ip->ip_conn);
	MPASS(ip->ip_bhs_mbuf != NULL);

	m_freem(ip->ip_ahs_mbuf);
	m_freem(ip->ip_data_mbuf);
	m_freem(ip->ip_bhs_mbuf);	/* storage for icl_cxgbei_pdu itself */

#ifdef DIAGNOSTIC
	if (__predict_true(ic != NULL))
		refcount_release(&ic->ic_outstanding_pdus);
#endif
}
Beispiel #22
0
void
loginclass_free(struct loginclass *lc)
{
	int old;

	old = lc->lc_refcount;
	if (old > 1 && atomic_cmpset_int(&lc->lc_refcount, old, old - 1))
		return;

	rw_wlock(&loginclasses_lock);
	if (!refcount_release(&lc->lc_refcount)) {
		rw_wunlock(&loginclasses_lock);
		return;
	}

	racct_destroy(&lc->lc_racct);
	LIST_REMOVE(lc, lc_next);
	rw_wunlock(&loginclasses_lock);

	free(lc, M_LOGINCLASS);
}
Beispiel #23
0
static void multiplex_bio_done(struct bio *b)
{
	struct bio *bio = b->bio_caller1;
	bool error = b->bio_flags & BIO_ERROR;
	destroy_bio(b);


	// If there is an error, we store it in the original bio flags.
	// This path gets slower because then we need to end up taking the
	// bio_mutex twice. But that should be fine.
	if (error) {
		pthread_mutex_lock(&bio->bio_mutex);
		bio->bio_flags |= BIO_ERROR;
		pthread_mutex_lock(&bio->bio_mutex);
	}

	// Last one releases it. We set the biodone to always be "ok", because
	// if an error exists, we have already set that in the previous operation
	if (refcount_release(&bio->bio_refcnt))
		biodone(bio, true);
}