Beispiel #1
0
int vn_fromfd(int fd, char *path, int flags, struct vnode **vpp, boolean_t fromfd)
{
	vnode_t *vp;

	*vpp = vp = kmem_cache_alloc(vnode_cache, KM_SLEEP);
	memset(vp, 0, sizeof(vnode_t));

	if (fstat64(fd, &vp->v_stat) == -1) {
		close(fd);
		return (errno);
	}

	(void) fcntl(fd, F_SETFD, FD_CLOEXEC);

	vp->v_fd = fd;
	if(S_ISBLK(vp->v_stat.st_mode)) {
		/* LINUX */
		if(ioctl(fd, BLKGETSIZE64, &vp->v_size) != 0)
			return errno;
	} else
		vp->v_size = vp->v_stat.st_size;
	vp->v_path = strdup(path);

	vp->v_type = VNON;

	if(fromfd)
		vn_setops(vp, fd_fvnodeops);
	else
		vn_setops(vp, root_fvnodeops);

	if(S_ISREG(vp->v_stat.st_mode)) {
		vp->v_type = VREG;
		if (flags & FREAD)
			atomic_add_32(&((*vpp)->v_rdcnt), 1);
		if (flags & FWRITE)
			atomic_add_32(&((*vpp)->v_wrcnt), 1);
	} else if(S_ISDIR(vp->v_stat.st_mode))
		vp->v_type = VDIR;
	else if(S_ISCHR(vp->v_stat.st_mode))
		vp->v_type = VCHR;
	else if(S_ISBLK(vp->v_stat.st_mode))
		vp->v_type = VBLK;
	else if(S_ISFIFO(vp->v_stat.st_mode))
		vp->v_type = VFIFO;
	else if(S_ISLNK(vp->v_stat.st_mode))
		vp->v_type = VLNK;
	else if(S_ISSOCK(vp->v_stat.st_mode))
		vp->v_type = VSOCK;

	VERIFY(vp->v_type != VNON);

	zmutex_init(&vp->v_lock);
	rwst_init(&vp->v_vfsmhlock.ve_lock, NULL, RW_DEFAULT, NULL);

	vp->v_count = 1;
	vp->v_vfsp = rootvfs;

	/*fprintf(stderr, "VNode %p created at vn_open (%s)\n", *vpp, path);*/
	return (0);
}
Beispiel #2
0
int
fop_close(
    vnode_t *vp,
    int flag,
    int count,
    offset_t offset,
    cred_t *cr,
    caller_context_t *ct)
{
    int err;

    err = (*(vp)->v_op->vop_close)(vp, flag, count, offset, cr, ct);
    VOPSTATS_UPDATE(vp, close);
    /*
     * Check passed in count to handle possible dups. Vnode counts are only
     * kept on regular files
     */
    if ((vp->v_type == VREG) && (count == 1))  {
        if (flag & FREAD) {
            ASSERT(vp->v_rdcnt > 0);
            atomic_add_32(&(vp->v_rdcnt), -1);
        }
        if (flag & FWRITE) {
            ASSERT(vp->v_wrcnt > 0);
            atomic_add_32(&(vp->v_wrcnt), -1);
        }
    }
    return (err);
}
Beispiel #3
0
static void
profile_create(hrtime_t interval, const char *name, int kind)
{
	profile_probe_t *prof;

	if (interval < profile_interval_min)
		return;

	if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0)
		return;

	atomic_add_32(&profile_total, 1);
	if (profile_total > profile_max) {
		atomic_add_32(&profile_total, -1);
		return;
	}

	if (PROF_TICK == kind)
		prof = kmem_zalloc(sizeof (profile_probe_t), KM_SLEEP);
	else
		prof = kmem_zalloc(sizeof (profile_probe_t) + NCPU*sizeof(profile_probe_percpu_t), KM_SLEEP);

	(void) strlcpy(prof->prof_name, name, sizeof(prof->prof_name));
	prof->prof_interval = interval;
	prof->prof_cyclic = CYCLIC_NONE;
	prof->prof_kind = kind;
	prof->prof_id = dtrace_probe_create(profile_id,
	    NULL, NULL, name,
	    profile_aframes ? profile_aframes : PROF_ARTIFICIAL_FRAMES, prof);
}
Beispiel #4
0
static void
profile_create(hrtime_t interval, const char *name, int kind)
{
	profile_probe_t *prof;
	int nr_frames = PROF_ARTIFICIAL_FRAMES + dtrace_mach_aframes();

	if (profile_aframes)
		nr_frames = profile_aframes;

	if (interval < profile_interval_min)
		return;

	if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0)
		return;

	atomic_add_32(&profile_total, 1);
	if (profile_total > profile_max) {
		atomic_add_32(&profile_total, -1);
		return;
	}

	prof = kmem_zalloc(sizeof (profile_probe_t), KM_SLEEP);
	(void) strcpy(prof->prof_name, name);
	prof->prof_interval = interval;
	prof->prof_cyclic = CYCLIC_NONE;
	prof->prof_kind = kind;
	prof->prof_id = dtrace_probe_create(profile_id,
	    NULL, NULL, name, nr_frames, prof);
}
Beispiel #5
0
static int
pmu_intr(void *arg)
{
#ifdef HWPMC_HOOKS
	struct trapframe *tf;
#endif
	uint32_t r;
#if defined(__arm__) && (__ARM_ARCH > 6)
	u_int cpu;

	cpu = PCPU_GET(cpuid);

	r = cp15_pmovsr_get();
	if (r & PMU_OVSR_C) {
		atomic_add_32(&ccnt_hi[cpu], 1);
		/* Clear the event. */
		r &= ~PMU_OVSR_C;
		cp15_pmovsr_set(PMU_OVSR_C);
	}
#else
	r = 1;
#endif

#ifdef HWPMC_HOOKS
	/* Only call into the HWPMC framework if we know there is work. */
	if (r != 0 && pmc_intr) {
		tf = arg;
		(*pmc_intr)(PCPU_GET(cpuid), tf);
	}
#endif

	return (FILTER_HANDLED);
}
Beispiel #6
0
// private
void mcs_rwlock::_add_when_writer_leaves(int delta) 
{
    // we always have the parent lock to do this
    _spin_on_writer();
    atomic_add_32(&_holders, delta);
    // callers do membar_enter
}
Beispiel #7
0
void mcs_rwlock::release_read() 
{
    w_assert2(has_reader());
    membar_exit(); // flush protected modified data before releasing lock;
    // update and complete any loads by others before I do this write 
    atomic_add_32(&_holders, -READER);
}
Beispiel #8
0
void
rdsv3_ib_send_add_credits(struct rdsv3_connection *conn, unsigned int credits)
{
	struct rdsv3_ib_connection *ic = conn->c_transport_data;

	if (credits == 0)
		return;

	RDSV3_DPRINTF5("rdsv3_ib_send_add_credits",
	    "credits (%u): current=%u%s\n",
	    credits,
	    IB_GET_SEND_CREDITS(atomic_get(&ic->i_credits)),
	    test_bit(RDSV3_LL_SEND_FULL, &conn->c_flags) ?
	    ", ll_send_full" : "");

	atomic_add_32(&ic->i_credits, IB_SET_SEND_CREDITS(credits));
	if (test_and_clear_bit(RDSV3_LL_SEND_FULL, &conn->c_flags))
		rdsv3_queue_delayed_work(rdsv3_wq, &conn->c_send_w, 0);

	ASSERT(!(IB_GET_SEND_CREDITS(credits) >= 16384));

	rdsv3_ib_stats_inc(s_ib_rx_credit_updates);

	RDSV3_DPRINTF4("rdsv3_ib_send_add_credits",
	    "Return: conn: %p, credits: %d",
	    conn, credits);
}
Beispiel #9
0
/*
 * function to stop the RX
 *
 * rq - pointer to RQ structure
 *
 * return none
 */
void
oce_clean_rq(struct oce_rq *rq)
{
	uint16_t num_cqe = 0;
	struct oce_cq  *cq;
	struct oce_dev *dev;
	struct oce_nic_rx_cqe *cqe;
	int32_t ti = 0;

	dev = rq->parent;
	cq = rq->cq;
	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
	/* dequeue till you reach an invalid cqe */
	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {

		while (RQ_CQE_VALID(cqe)) {
			DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
			oce_rx_drop_pkt(rq, cqe);
			atomic_add_32(&rq->buf_avail,
			    -(cqe->u0.s.num_fragments & 0x7));
			oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
			RQ_CQE_INVALIDATE(cqe);
			RING_GET(cq->ring, 1);
			cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
			    struct oce_nic_rx_cqe);
			num_cqe++;
		}
		OCE_MSDELAY(1);
	}
} /* oce_clean_rq */
Beispiel #10
0
void
rdsv3_ib_advertise_credits(struct rdsv3_connection *conn, unsigned int posted)
{
	struct rdsv3_ib_connection *ic = conn->c_transport_data;

	RDSV3_DPRINTF4("rdsv3_ib_advertise_credits", "conn: %p, posted: %d",
	    conn, posted);

	if (posted == 0)
		return;

	atomic_add_32(&ic->i_credits, IB_SET_POST_CREDITS(posted));

	/*
	 * Decide whether to send an update to the peer now.
	 * If we would send a credit update for every single buffer we
	 * post, we would end up with an ACK storm (ACK arrives,
	 * consumes buffer, we refill the ring, send ACK to remote
	 * advertising the newly posted buffer... ad inf)
	 *
	 * Performance pretty much depends on how often we send
	 * credit updates - too frequent updates mean lots of ACKs.
	 * Too infrequent updates, and the peer will run out of
	 * credits and has to throttle.
	 * For the time being, 16 seems to be a good compromise.
	 */
	if (IB_GET_POST_CREDITS(atomic_get(&ic->i_credits)) >= 16)
		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
}
Beispiel #11
0
int
eph_gid_alloc(zone_t *zone, int flags, gid_t *start, int count)
{
	ephemeral_zsd_t *eph_zsd = get_ephemeral_zsd(zone);

	ASSERT(eph_zsd != NULL);

	mutex_enter(&eph_zsd->eph_lock);

	/* Test for unsigned integer wrap around */
	if (eph_zsd->last_gid + count < eph_zsd->last_gid) {
		mutex_exit(&eph_zsd->eph_lock);
		return (-1);
	}

	/* first call or idmap crashed and state corrupted */
	if (flags != 0)
		eph_zsd->min_gid = eph_zsd->last_gid;

	hasephids = B_TRUE;
	*start = eph_zsd->last_gid + 1;
	atomic_add_32(&eph_zsd->last_gid, count);
	mutex_exit(&eph_zsd->eph_lock);
	return (0);
}
Beispiel #12
0
/*
 * Clear the fault handler with the given identifier, or return ENOENT if none
 * exists.
 */
int
zio_clear_fault(int id)
{
	inject_handler_t *handler;

	rw_enter(&inject_lock, RW_WRITER);

	for (handler = list_head(&inject_handlers); handler != NULL;
	    handler = list_next(&inject_handlers, handler))
		if (handler->zi_id == id)
			break;

	if (handler == NULL) {
		rw_exit(&inject_lock);
		return (SET_ERROR(ENOENT));
	}

	list_remove(&inject_handlers, handler);
	rw_exit(&inject_lock);

	spa_inject_delref(handler->zi_spa);
	kmem_free(handler, sizeof (inject_handler_t));
	atomic_add_32(&zio_injection_enabled, -1);

	return (0);
}
Beispiel #13
0
/*
 * function to free mblk databuffer to the RQ pool
 *
 * arg - pointer to the receive buffer descriptor
 *
 * return none
 */
void
oce_rx_pool_free(char *arg)
{
	oce_rq_bdesc_t *rqbd;
	struct oce_rq  *rq;

	/* During destroy, arg will be NULL */
	if (arg == NULL) {
		return;
	}

	/* retrieve the pointers from arg */
	rqbd = (oce_rq_bdesc_t *)(void *)arg;
	rq = rqbd->rq;
	rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
	    rqbd->rqb->size, 0, &rqbd->fr_rtn);

	if (rqbd->mp) {
		rqbd->mp->b_rptr =
		    (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
	}

	oce_rqb_free(rq, rqbd);
	(void) atomic_add_32(&rq->pending, -1);
} /* rx_pool_free */
Beispiel #14
0
/*
 * function to process a single packet
 *
 * dev - software handle to the device
 * rq - pointer to the RQ to charge
 * cqe - Pointer to Completion Q entry
 *
 * return mblk pointer =>  success, NULL  => error
 */
static inline mblk_t *
oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
{
	mblk_t *mp;
	int pkt_len;
	int32_t frag_cnt = 0;
	mblk_t **mblk_tail;
	mblk_t	*mblk_head;
	int frag_size;
	oce_rq_bdesc_t *rqbd;
	uint16_t cur_index;
	oce_ring_buffer_t *ring;
	int i;

	frag_cnt  = cqe->u0.s.num_fragments & 0x7;
	mblk_head = NULL;
	mblk_tail = &mblk_head;

	ring = rq->ring;
	cur_index = ring->cidx;

	/* Get the relevant Queue pointers */
	pkt_len = cqe->u0.s.pkt_size;
	for (i = 0; i < frag_cnt; i++) {
		rqbd = rq->shadow_ring[cur_index];
		if (rqbd->mp == NULL) {
			rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
			    rqbd->rqb->size, 0, &rqbd->fr_rtn);
			if (rqbd->mp == NULL) {
				return (NULL);
			}

			rqbd->mp->b_rptr =
			    (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
		}

		mp = rqbd->mp;
		frag_size  = (pkt_len > rq->cfg.frag_size) ?
		    rq->cfg.frag_size : pkt_len;
		mp->b_wptr = mp->b_rptr + frag_size;
		pkt_len   -= frag_size;
		mp->b_next = mp->b_cont = NULL;
		/* Chain the message mblks */
		*mblk_tail = mp;
		mblk_tail = &mp->b_cont;
		(void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
		cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
	}

	if (mblk_head == NULL) {
		oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
		return (NULL);
	}

	/* replace the buffer with new ones */
	(void) oce_rq_charge(rq, frag_cnt, B_FALSE);
	atomic_add_32(&rq->pending, frag_cnt);
	return (mblk_head);
} /* oce_rx */
/*
 * Return a looped back vnode for the given vnode.
 * If no lnode exists for this vnode create one and put it
 * in a table hashed by vnode.  If the lnode for
 * this vnode is already in the table return it (ref count is
 * incremented by lfind).  The lnode will be flushed from the
 * table when lo_inactive calls freelonode.  The creation of
 * a new lnode can be forced via the LOF_FORCE flag even if
 * the vnode exists in the table.  This is used in the creation
 * of a terminating lnode when looping is detected.  A unique
 * lnode is required for the correct evaluation of the current
 * working directory.
 * NOTE: vp is assumed to be a held vnode.
 */
struct vnode *
makelonode(struct vnode *vp, struct loinfo *li, int flag)
{
	lnode_t *lp, *tlp;
	struct vfs *vfsp;
	vnode_t *nvp;

	lp = NULL;
	TABLE_LOCK_ENTER(vp, li);
	if (flag != LOF_FORCE)
		lp = lfind(vp, li);
	if ((flag == LOF_FORCE) || (lp == NULL)) {
		/*
		 * Optimistically assume that we won't need to sleep.
		 */
		lp = kmem_cache_alloc(lnode_cache, KM_NOSLEEP);
		nvp = vn_alloc(KM_NOSLEEP);
		if (lp == NULL || nvp == NULL) {
			TABLE_LOCK_EXIT(vp, li);
			/* The lnode allocation may have succeeded, save it */
			tlp = lp;
			if (tlp == NULL) {
				tlp = kmem_cache_alloc(lnode_cache, KM_SLEEP);
			}
			if (nvp == NULL) {
				nvp = vn_alloc(KM_SLEEP);
			}
			lp = NULL;
			TABLE_LOCK_ENTER(vp, li);
			if (flag != LOF_FORCE)
				lp = lfind(vp, li);
			if (lp != NULL) {
				kmem_cache_free(lnode_cache, tlp);
				vn_free(nvp);
				VN_RELE(vp);
				goto found_lnode;
			}
			lp = tlp;
		}
		atomic_add_32(&li->li_refct, 1);
		vfsp = makelfsnode(vp->v_vfsp, li);
		lp->lo_vnode = nvp;
		VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev);
		nvp->v_flag |= (vp->v_flag & (VNOMOUNT|VNOMAP|VDIROPEN));
		vn_setops(nvp, lo_vnodeops);
		nvp->v_data = (caddr_t)lp;
		lp->lo_vp = vp;
		lp->lo_looping = 0;
		lsave(lp, li);
		vn_exists(vp);
	} else {
		VN_RELE(vp);
	}

found_lnode:
	TABLE_LOCK_EXIT(vp, li);
	return (ltov(lp));
}
Beispiel #16
0
/*
 * Atomically looks for a non-default DCE, and if not found tries to create one.
 * If there is no memory it returns NULL.
 * When an entry is created we increase the generation number on
 * the default DCE so that conn_ip_output will detect there is a new DCE.
 * ifindex should only be used with link-local addresses.
 */
dce_t *
dce_lookup_and_add_v6(const in6_addr_t *dst, uint_t ifindex, ip_stack_t *ipst)
{
	uint_t		hash;
	dcb_t		*dcb;
	dce_t		*dce;

	/* We should not create entries for link-locals w/o an ifindex */
	ASSERT(!(IN6_IS_ADDR_LINKSCOPE(dst)) || ifindex != 0);

	hash = IRE_ADDR_HASH_V6(*dst, ipst->ips_dce_hashsize);
	dcb = &ipst->ips_dce_hash_v6[hash];
	rw_enter(&dcb->dcb_lock, RW_WRITER);
	for (dce = dcb->dcb_dce; dce != NULL; dce = dce->dce_next) {
		if (IN6_ARE_ADDR_EQUAL(&dce->dce_v6addr, dst) &&
		    dce->dce_ifindex == ifindex) {
			mutex_enter(&dce->dce_lock);
			if (!DCE_IS_CONDEMNED(dce)) {
				dce_refhold(dce);
				mutex_exit(&dce->dce_lock);
				rw_exit(&dcb->dcb_lock);
				return (dce);
			}
			mutex_exit(&dce->dce_lock);
		}
	}

	dce = kmem_cache_alloc(dce_cache, KM_NOSLEEP);
	if (dce == NULL) {
		rw_exit(&dcb->dcb_lock);
		return (NULL);
	}
	bzero(dce, sizeof (dce_t));
	dce->dce_ipst = ipst;	/* No netstack_hold */
	dce->dce_v6addr = *dst;
	dce->dce_ifindex = ifindex;
	dce->dce_generation = DCE_GENERATION_INITIAL;
	dce->dce_ipversion = IPV6_VERSION;
	dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64());
	dce_refhold(dce);	/* For the hash list */

	/* Link into list */
	if (dcb->dcb_dce != NULL)
		dcb->dcb_dce->dce_ptpn = &dce->dce_next;
	dce->dce_next = dcb->dcb_dce;
	dce->dce_ptpn = &dcb->dcb_dce;
	dcb->dcb_dce = dce;
	dce->dce_bucket = dcb;
	atomic_add_32(&dcb->dcb_cnt, 1);
	dce_refhold(dce);	/* For the caller */
	rw_exit(&dcb->dcb_lock);

	/* Initialize dce_ident to be different than for the last packet */
	dce->dce_ident = ipst->ips_dce_default->dce_ident + 1;
	dce_increment_generation(ipst->ips_dce_default);
	return (dce);
}
Beispiel #17
0
static void
zfs_freevfs(vfs_t *vfsp)
{
	zfsvfs_t *zfsvfs = vfsp->vfs_data;

	kmem_free(zfsvfs, sizeof (zfsvfs_t));

	atomic_add_32(&zfs_active_fs_count, -1);
}
Beispiel #18
0
static int
clock_intr(void *arg)
{
	struct trapframe *fp = arg;

	atomic_add_32(&s3c24x0_base, timer4_reload_value);

	hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
	return (FILTER_HANDLED);
}
Beispiel #19
0
static void
rdsv3_ib_cm_fill_conn_param(struct rdsv3_connection *conn,
    struct rdma_conn_param *conn_param,
    struct rdsv3_ib_connect_private *dp,
    uint32_t protocol_version,
    uint32_t max_responder_resources,
    uint32_t max_initiator_depth)
{
	struct rdsv3_ib_connection *ic = conn->c_transport_data;
	struct rdsv3_ib_device *rds_ibdev;

	RDSV3_DPRINTF2("rdsv3_ib_cm_fill_conn_param",
	    "Enter conn: %p conn_param: %p private: %p version: %d",
	    conn, conn_param, dp, protocol_version);

	(void) memset(conn_param, 0, sizeof (struct rdma_conn_param));

	rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rdsv3_ib_client);

	conn_param->responder_resources =
	    MIN(rds_ibdev->max_responder_resources, max_responder_resources);
	conn_param->initiator_depth =
	    MIN(rds_ibdev->max_initiator_depth, max_initiator_depth);
	conn_param->retry_count = min(rdsv3_ib_retry_count, 7);
	conn_param->rnr_retry_count = 7;

	if (dp) {
		(void) memset(dp, 0, sizeof (*dp));
		dp->dp_saddr = conn->c_laddr;
		dp->dp_daddr = conn->c_faddr;
		dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
		dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
		dp->dp_protocol_minor_mask =
		    htons(RDSV3_IB_SUPPORTED_PROTOCOLS);
		dp->dp_ack_seq = rdsv3_ib_piggyb_ack(ic);

		/* Advertise flow control */
		if (ic->i_flowctl) {
			unsigned int credits;

			credits = IB_GET_POST_CREDITS(
			    atomic_get(&ic->i_credits));
			dp->dp_credit = htonl(credits);
			atomic_add_32(&ic->i_credits,
			    -IB_SET_POST_CREDITS(credits));
		}

		conn_param->private_data = dp;
		conn_param->private_data_len = sizeof (*dp);
	}

	RDSV3_DPRINTF2("rdsv3_ib_cm_fill_conn_param",
	    "Return conn: %p conn_param: %p private: %p version: %d",
	    conn, conn_param, dp, protocol_version);
}
Beispiel #20
0
static bool_t
svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
    struct sockaddr *addr, struct mbuf *m, uint32_t *seq)
{
	XDR xdrs;
	struct mbuf *mrep;
	bool_t stat = TRUE;
	int error, len;

	/*
	 * Leave space for record mark.
	 */
	mrep = m_gethdr(M_WAITOK, MT_DATA);
	mrep->m_data += sizeof(uint32_t);

	xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);

	if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
	    msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
		if (!xdr_replymsg(&xdrs, msg))
			stat = FALSE;
		else
			xdrmbuf_append(&xdrs, m);
	} else {
		stat = xdr_replymsg(&xdrs, msg);
	}

	if (stat) {
		m_fixhdr(mrep);

		/*
		 * Prepend a record marker containing the reply length.
		 */
		M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK);
		len = mrep->m_pkthdr.len;
		*mtod(mrep, uint32_t *) =
			htonl(0x80000000 | (len - sizeof(uint32_t)));
		atomic_add_32(&xprt->xp_snd_cnt, len);
		error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
		    0, curthread);
		if (!error) {
			atomic_add_rel_32(&xprt->xp_snt_cnt, len);
			if (seq)
				*seq = xprt->xp_snd_cnt;
			stat = TRUE;
		} else
			atomic_subtract_32(&xprt->xp_snd_cnt, len);
	} else {
		m_freem(mrep);
	}

	XDR_DESTROY(&xdrs);

	return (stat);
}
Beispiel #21
0
/*ARGSUSED*/
static void
profile_destroy(void *arg, dtrace_id_t id, void *parg)
{
	profile_probe_t *prof = parg;

	ASSERT(prof->prof_cyclic == CYCLIC_NONE);
	kmem_free(prof, sizeof (profile_probe_t));

	ASSERT(profile_total >= 1);
	atomic_add_32(&profile_total, -1);
}
Beispiel #22
0
/*
 * function to free the RQ buffer
 *
 * rq - pointer to RQ structure
 * rqbd - pointer to recieve buffer descriptor
 *
 * return none
 */
static inline void
oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
{
	uint32_t free_index;
	mutex_enter(&rq->rc_lock);
	free_index = rq->rqb_rc_head;
	rq->rqb_freelist[free_index] = rqbd;
	rq->rqb_rc_head = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
	mutex_exit(&rq->rc_lock);
	atomic_add_32(&rq->rqb_free, 1);
} /* oce_rqb_free */
/*
 * ixpclk_intr:
 *
 *	Handle the hardclock interrupt.
 */
static int
ixpclk_intr(void *arg)
{

	bus_space_write_4(ixpclk_sc->sc_iot, ixpclk_sc->sc_ioh,
			  IXPCLK_CLEAR, 1);

	atomic_add_32(&ixpclk_base, ixpclk_sc->sc_coreclock_freq);

	hardclock((struct clockframe*) arg);
	return (1);
}
Beispiel #24
0
int
dls_open(const char *name, dls_channel_t *dcp)
{
	dls_impl_t	*dip;
	dls_vlan_t	*dvp;
	dls_link_t	*dlp;
	int		err;

	/*
	 * Get a reference to the named dls_vlan_t.
	 * Tagged vlans get created automatically.
	 */
	if ((err = dls_vlan_hold(name, &dvp, B_TRUE)) != 0)
		return (err);

	/*
	 * Allocate a new dls_impl_t.
	 */
	dip = kmem_cache_alloc(i_dls_impl_cachep, KM_SLEEP);
	dip->di_dvp = dvp;

	/*
	 * Cache a copy of the MAC interface handle, a pointer to the
	 * immutable MAC info and a copy of the current MAC address.
	 */
	dlp = dvp->dv_dlp;
	dip->di_mh = dlp->dl_mh;
	dip->di_mip = dlp->dl_mip;

	mac_unicst_get(dip->di_mh, dip->di_unicst_addr);

	/*
	 * Set the MAC transmit information.
	 */
	dip->di_txinfo = mac_tx_get(dip->di_mh);

	/*
	 * Add a notification function so that we get updates from the MAC.
	 */
	dip->di_mnh = mac_notify_add(dip->di_mh, i_dls_notify, (void *)dip);

	/*
	 * Bump the kmem_cache count to make sure it is not prematurely
	 * destroyed.
	 */
	atomic_add_32(&i_dls_impl_count, 1);

	/*
	 * Hand back a reference to the dls_impl_t.
	 */
	*dcp = (dls_channel_t)dip;
	return (0);
}
Beispiel #25
0
static void
fcoe_worker_frame(void *arg)
{
	fcoe_worker_t	*w = (fcoe_worker_t *)arg;
	fcoe_i_frame_t	*fmi;
	int		ret;

	atomic_add_32(&fcoe_nworkers_running, 1);
	mutex_enter(&w->worker_lock);
	w->worker_flags |= FCOE_WORKER_STARTED | FCOE_WORKER_ACTIVE;
	while ((w->worker_flags & FCOE_WORKER_TERMINATE) == 0) {
		/*
		 * loop through the frames
		 */
		while (fmi = list_head(&w->worker_frm_list)) {
			list_remove(&w->worker_frm_list, fmi);
			mutex_exit(&w->worker_lock);
			/*
			 * do the checksum
			 */
			ret = fcoe_crc_verify(fmi->fmi_frame);
			if (ret == FCOE_SUCCESS) {
				fmi->fmi_mac->fm_client.ect_rx_frame(
				    fmi->fmi_frame);
			} else {
				fcoe_release_frame(fmi->fmi_frame);
			}
			mutex_enter(&w->worker_lock);
			w->worker_ntasks--;
		}
		w->worker_flags &= ~FCOE_WORKER_ACTIVE;
		cv_wait(&w->worker_cv, &w->worker_lock);
		w->worker_flags |= FCOE_WORKER_ACTIVE;
	}
	w->worker_flags &= ~(FCOE_WORKER_STARTED | FCOE_WORKER_ACTIVE);
	mutex_exit(&w->worker_lock);
	atomic_add_32(&fcoe_nworkers_running, -1);
	list_destroy(&w->worker_frm_list);
}
static void
zfs_freevfs(vfs_t *vfsp)
{
	zfsvfs_t *zfsvfs = vfsp->vfs_data;
	int i;

	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_destroy(&zfsvfs->z_hold_mtx[i]);

	zfs_fuid_destroy(zfsvfs);
	zfs_freezfsvfs(zfsvfs);

	atomic_add_32(&zfs_active_fs_count, -1);
}
Beispiel #27
0
/*
 * Create a new handler for the given record.  We add it to the list, adding
 * a reference to the spa_t in the process.  We increment zio_injection_enabled,
 * which is the switch to trigger all fault injection.
 */
int
zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
{
	inject_handler_t *handler;
	int error;
	spa_t *spa;

	/*
	 * If this is pool-wide metadata, make sure we unload the corresponding
	 * spa_t, so that the next attempt to load it will trigger the fault.
	 * We call spa_reset() to unload the pool appropriately.
	 */
	if (flags & ZINJECT_UNLOAD_SPA)
		if ((error = spa_reset(name)) != 0)
			return (error);

	if (!(flags & ZINJECT_NULL)) {
		/*
		 * spa_inject_ref() will add an injection reference, which will
		 * prevent the pool from being removed from the namespace while
		 * still allowing it to be unloaded.
		 */
		if ((spa = spa_inject_addref(name)) == NULL)
			return (SET_ERROR(ENOENT));

		handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);

		rw_enter(&inject_lock, RW_WRITER);

		*id = handler->zi_id = inject_next_id++;
		handler->zi_spa = spa;
		handler->zi_record = *record;
		list_insert_tail(&inject_handlers, handler);
		atomic_add_32(&zio_injection_enabled, 1);

		rw_exit(&inject_lock);
	}

	/*
	 * Flush the ARC, so that any attempts to read this data will end up
	 * going to the ZIO layer.  Note that this is a little overkill, but
	 * we don't have the necessary ARC interfaces to do anything else, and
	 * fault injection isn't a performance critical path.
	 */
	if (flags & ZINJECT_FLUSH_ARC)
		arc_flush(NULL);

	return (0);
}
Beispiel #28
0
/*
 * function to start  the RX
 *
 * rq - pointer to RQ structure
 *
 * return number of rqe's charges.
 */
int
oce_start_rq(struct oce_rq *rq)
{
	int ret = 0;
	int to_charge = 0;
	struct oce_dev *dev = rq->parent;
	to_charge = rq->cfg.q_len - rq->buf_avail;
	to_charge = min(to_charge, rq->rqb_free);
	atomic_add_32(&rq->rqb_free, -to_charge);
	(void) oce_rq_charge(rq, to_charge, B_FALSE);
	/* ok to do it here since Rx has not even started */
	oce_rq_post_buffer(rq, to_charge);
	oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
	return (ret);
} /* oce_start_rq */
Beispiel #29
0
/*ARGSUSED*/
static void
profile_destroy(void *arg, dtrace_id_t id, void *parg)
{
#pragma unused(arg,id) /* __APPLE__ */
	profile_probe_t *prof = parg;

	ASSERT(prof->prof_cyclic == CYCLIC_NONE);

	if (prof->prof_kind == PROF_TICK)
		kmem_free(prof, sizeof (profile_probe_t));
	else
		kmem_free(prof, sizeof (profile_probe_t) + NCPU*sizeof(profile_probe_percpu_t));

	ASSERT(profile_total >= 1);
	atomic_add_32(&profile_total, -1);
}
Beispiel #30
0
/*
 * clockhandler:
 *
 *	Handle the hardclock interrupt.
 */
int
clockhandler(void *arg)
{
	struct clockframe *frame = arg;

	/* ACK the interrupt. */
	BECC_CSR_WRITE(BECC_TSCRA, TSCRx_TE | TSCRx_CM | TSCRx_TIF);

	hardclock(frame);

	atomic_add_32(&becc_base, counts_per_hz);

	if (becc_hardclock_hook != NULL)
		(*becc_hardclock_hook)();

	return (1);
}