Example #1
0
/*
 * igb_free_rcb_lists - Free the receive control blocks of one ring.
 */
static void
igb_free_rcb_lists(igb_rx_data_t *rx_data)
{
	igb_t *igb;
	rx_control_block_t *rcb;
	uint32_t rcb_count;
	uint32_t ref_cnt;
	int i;

	igb = rx_data->rx_ring->igb;

	mutex_enter(&igb->rx_pending_lock);

	rcb = rx_data->rcb_area;
	rcb_count = rx_data->ring_size + rx_data->free_list_size;

	for (i = 0; i < rcb_count; i++, rcb++) {
		ASSERT(rcb != NULL);

		ref_cnt = atomic_dec_32_nv(&rcb->ref_cnt);
		if (ref_cnt == 0) {
			if (rcb->mp != NULL) {
				freemsg(rcb->mp);
				rcb->mp = NULL;
			}
			igb_free_dma_buffer(&rcb->rx_buf);
		} else {
			atomic_inc_32(&rx_data->rcb_pending);
			atomic_inc_32(&igb->rcb_pending);
		}
	}

	mutex_exit(&igb->rx_pending_lock);
}
Example #2
0
void
rw_downgrade(krwlock_t *rwlp)
{
    rwlp->rw_owner = NULL;
    lck_rw_lock_exclusive_to_shared((lck_rw_t *)&rwlp->rw_lock[0]);
    atomic_inc_32((volatile uint32_t *)&rwlp->rw_readers);
}
Example #3
0
/*
 * srpt_ch_post_send
 */
ibt_status_t
srpt_ch_post_send(srpt_channel_t *ch, srpt_iu_t *iu, uint32_t len,
	uint_t fence)
{
	ibt_status_t		status;
	ibt_send_wr_t		wr;
	ibt_wr_ds_t		ds;
	uint_t			posted;

	ASSERT(ch != NULL);
	ASSERT(iu != NULL);
	ASSERT(mutex_owned(&iu->iu_lock));

	rw_enter(&ch->ch_rwlock, RW_READER);
	if (ch->ch_state == SRPT_CHANNEL_DISCONNECTING) {
		rw_exit(&ch->ch_rwlock);
		SRPT_DPRINTF_L2("ch_post_send, bad ch state (%d)",
		    ch->ch_state);
		return (IBT_FAILURE);
	}
	rw_exit(&ch->ch_rwlock);

	wr.wr_id = srpt_ch_alloc_swqe_wrid(ch, SRPT_SWQE_TYPE_RESP,
	    (void *)iu);
	if (wr.wr_id == 0) {
		SRPT_DPRINTF_L2("ch_post_send, queue full");
		return (IBT_FAILURE);
	}

	atomic_inc_32(&iu->iu_sq_posted_cnt);

	wr.wr_flags = IBT_WR_SEND_SIGNAL;
	if (fence == SRPT_FENCE_SEND) {
		wr.wr_flags |= IBT_WR_SEND_FENCE;
	}
	wr.wr_opcode = IBT_WRC_SEND;
	wr.wr_trans  = IBT_RC_SRV;
	wr.wr_nds = 1;
	wr.wr_sgl = &ds;

	ds.ds_va = iu->iu_sge.ds_va;
	ds.ds_key = iu->iu_sge.ds_key;
	ds.ds_len = len;

	SRPT_DPRINTF_L4("ch_post_send, posting SRP response to channel"
	    " ds.ds_va (0x%16llx), ds.ds_key (0x%08x), "
	    " ds.ds_len (%d)",
	    (u_longlong_t)ds.ds_va, ds.ds_key, ds.ds_len);

	status = ibt_post_send(ch->ch_chan_hdl, &wr, 1, &posted);
	if (status != IBT_SUCCESS) {
		SRPT_DPRINTF_L2("ch_post_send, post_send failed (%d)",
		    status);
		atomic_dec_32(&iu->iu_sq_posted_cnt);
		srpt_ch_free_swqe_wrid(ch, wr.wr_id);
		return (status);
	}

	return (IBT_SUCCESS);
}
Example #4
0
static void
profile_create(hrtime_t interval, const char *name, int kind)
{
	profile_probe_t *prof;
	int nr_frames = PROF_ARTIFICIAL_FRAMES + dtrace_mach_aframes();

	if (profile_aframes)
		nr_frames = profile_aframes;

	if (interval < profile_interval_min)
		return;

	if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0)
		return;

	atomic_inc_32(&profile_total);
	if (profile_total > profile_max) {
		atomic_dec_32(&profile_total);
		return;
	}

	prof = kmem_zalloc(sizeof (profile_probe_t), KM_SLEEP);
	(void) strcpy(prof->prof_name, name);
	prof->prof_interval = interval;
	prof->prof_cyclic = CYCLIC_NONE;
	prof->prof_kind = kind;
	prof->prof_id = dtrace_probe_create(profile_id,
	    NULL, NULL, name, nr_frames, prof);
}
Example #5
0
/*
 * Return a looped back vnode for the given vnode.
 * If no lnode exists for this vnode create one and put it
 * in a table hashed by vnode.  If the lnode for
 * this vnode is already in the table return it (ref count is
 * incremented by lfind).  The lnode will be flushed from the
 * table when lo_inactive calls freelonode.  The creation of
 * a new lnode can be forced via the LOF_FORCE flag even if
 * the vnode exists in the table.  This is used in the creation
 * of a terminating lnode when looping is detected.  A unique
 * lnode is required for the correct evaluation of the current
 * working directory.
 * NOTE: vp is assumed to be a held vnode.
 */
struct vnode *
makelonode(struct vnode *vp, struct loinfo *li, int flag)
{
    lnode_t *lp, *tlp;
    struct vfs *vfsp;
    vnode_t *nvp;

    lp = NULL;
    TABLE_LOCK_ENTER(vp, li);
    if (flag != LOF_FORCE)
        lp = lfind(vp, li);
    if ((flag == LOF_FORCE) || (lp == NULL)) {
        /*
         * Optimistically assume that we won't need to sleep.
         */
        lp = kmem_cache_alloc(lnode_cache, KM_NOSLEEP);
        nvp = vn_alloc(KM_NOSLEEP);
        if (lp == NULL || nvp == NULL) {
            TABLE_LOCK_EXIT(vp, li);
            /* The lnode allocation may have succeeded, save it */
            tlp = lp;
            if (tlp == NULL) {
                tlp = kmem_cache_alloc(lnode_cache, KM_SLEEP);
            }
            if (nvp == NULL) {
                nvp = vn_alloc(KM_SLEEP);
            }
            lp = NULL;
            TABLE_LOCK_ENTER(vp, li);
            if (flag != LOF_FORCE)
                lp = lfind(vp, li);
            if (lp != NULL) {
                kmem_cache_free(lnode_cache, tlp);
                vn_free(nvp);
                VN_RELE(vp);
                goto found_lnode;
            }
            lp = tlp;
        }
        atomic_inc_32(&li->li_refct);
        vfsp = makelfsnode(vp->v_vfsp, li);
        lp->lo_vnode = nvp;
        VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev);
        nvp->v_flag |= (vp->v_flag & (VNOMOUNT|VNOMAP|VDIROPEN));
        vn_setops(nvp, lo_vnodeops);
        nvp->v_data = (caddr_t)lp;
        lp->lo_vp = vp;
        lp->lo_looping = 0;
        lsave(lp, li);
        vn_exists(vp);
    } else {
        VN_RELE(vp);
    }

found_lnode:
    TABLE_LOCK_EXIT(vp, li);
    return (ltov(lp));
}
Example #6
0
int
ddi_intr_dup_handler(ddi_intr_handle_t org, int dup_inum,
    ddi_intr_handle_t *dup)
{
	ddi_intr_handle_impl_t	*hdlp = (ddi_intr_handle_impl_t *)org;
	ddi_intr_handle_impl_t	*dup_hdlp;
	int			ret;

	DDI_INTR_APIDBG((CE_CONT, "ddi_intr_dup_handler: hdlp = 0x%p\n",
	    (void *)hdlp));

	/* Do some input argument checking ("dup" handle is not allocated) */
	if ((hdlp == NULL) || (*dup != NULL) || (dup_inum < 0)) {
		DDI_INTR_APIDBG((CE_CONT, "ddi_intr_dup_handler: Invalid "
		    "input args\n"));
		return (DDI_EINVAL);
	}

	rw_enter(&hdlp->ih_rwlock, RW_READER);

	/* Do some input argument checking */
	if ((hdlp->ih_state == DDI_IHDL_STATE_ALLOC) ||	/* intr handle alloc? */
	    (hdlp->ih_type != DDI_INTR_TYPE_MSIX) ||	/* only MSI-X allowed */
	    (hdlp->ih_flags & DDI_INTR_MSIX_DUP)) {	/* only dup original */
		rw_exit(&hdlp->ih_rwlock);
		return (DDI_EINVAL);
	}

	hdlp->ih_scratch1 = dup_inum;
	ret = i_ddi_intr_ops(hdlp->ih_dip, hdlp->ih_dip,
	    DDI_INTROP_DUPVEC, hdlp, NULL);

	if (ret == DDI_SUCCESS) {
		dup_hdlp = (ddi_intr_handle_impl_t *)
		    kmem_alloc(sizeof (ddi_intr_handle_impl_t), KM_SLEEP);

		atomic_inc_32(&hdlp->ih_dup_cnt);

		*dup = (ddi_intr_handle_t)dup_hdlp;
		bcopy(hdlp, dup_hdlp, sizeof (ddi_intr_handle_impl_t));

		/* These fields are unique to each dupped msi-x vector */
		rw_init(&dup_hdlp->ih_rwlock, NULL, RW_DRIVER, NULL);
		dup_hdlp->ih_state = DDI_IHDL_STATE_ADDED;
		dup_hdlp->ih_inum = dup_inum;
		dup_hdlp->ih_flags |= DDI_INTR_MSIX_DUP;
		dup_hdlp->ih_dup_cnt = 0;

		/* Point back to original vector */
		dup_hdlp->ih_main = hdlp;
	}

	rw_exit(&hdlp->ih_rwlock);
	return (ret);
}
Example #7
0
/*
 * task_hold_by_id(), task_hold_by_id_zone()
 *
 * Overview
 *   task_hold_by_id() is used to take a reference on a task by its task id,
 *   supporting the various system call interfaces for obtaining resource data,
 *   delivering signals, and so forth.
 *
 * Return values
 *   Returns a pointer to the task_t with taskid_t id.  The task is returned
 *   with its hold count incremented by one.  Returns NULL if there
 *   is no task with the requested id.
 *
 * Caller's context
 *   Caller must not be holding task_hash_lock.  No restrictions on context.
 */
task_t *
task_hold_by_id_zone(taskid_t id, zoneid_t zoneid)
{
	task_t *tk;

	mutex_enter(&task_hash_lock);
	if ((tk = task_find(id, zoneid)) != NULL)
		atomic_inc_32(&tk->tk_hold_count);
	mutex_exit(&task_hash_lock);

	return (tk);
}
Example #8
0
int
oce_create_nw_interface(struct oce_dev *dev)
{
	int ret;
	uint32_t capab_flags = OCE_CAPAB_FLAGS;
	uint32_t capab_en_flags = OCE_CAPAB_ENABLE;

	if (dev->rss_enable) {
		capab_flags |= MBX_RX_IFACE_FLAGS_RSS;
		capab_en_flags |= MBX_RX_IFACE_FLAGS_RSS;
	}

	/* create an interface for the device with out mac */
	ret = oce_if_create(dev, capab_flags, capab_en_flags,
	    0, &dev->mac_addr[0], (uint32_t *)&dev->if_id);
	if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Interface creation failed: 0x%x", ret);
		return (ret);
	}
	atomic_inc_32(&dev->nifs);

	dev->if_cap_flags = capab_en_flags;

	/* Enable VLAN Promisc on HW */
	ret = oce_config_vlan(dev, (uint8_t)dev->if_id, NULL, 0,
	    B_TRUE, B_TRUE);
	if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Config vlan failed: %d", ret);
		oce_delete_nw_interface(dev);
		return (ret);

	}

	/* set default flow control */
	ret = oce_set_flow_control(dev, dev->flow_control);
	if (ret != 0) {
		oce_log(dev, CE_NOTE, MOD_CONFIG,
		    "Set flow control failed: %d", ret);
	}
	ret = oce_set_promiscuous(dev, dev->promisc);

	if (ret != 0) {
		oce_log(dev, CE_NOTE, MOD_CONFIG,
		    "Set Promisc failed: %d", ret);
	}

	return (0);
}
Example #9
0
/*
 * prop_object_release_emergency
 *	A direct free with prop_object_release failed.
 *	Walk down the tree until a leaf is found and
 *	free that. Do not recurse to avoid stack overflows.
 *
 *	This is a slow edge condition, but necessary to
 *	guarantee that an object can always be freed.
 */
static void
prop_object_release_emergency(prop_object_t obj)
{
	struct _prop_object *po;
	void (*unlock)(void);
	prop_object_t parent = NULL;
	uint32_t ocnt;

	for (;;) {
		po = obj;
		_PROP_ASSERT(obj);

		if (po->po_type->pot_lock != NULL)
			po->po_type->pot_lock();

		/* Save pointerto unlock function */
		unlock = po->po_type->pot_unlock;
		
		/* Dance a bit to make sure we always get the non-racy ocnt */
		ocnt = atomic_dec_32_nv(&po->po_refcnt);
		ocnt++;
		_PROP_ASSERT(ocnt != 0);

		if (ocnt != 1) {
			if (unlock != NULL)
				unlock();
			break;
		}
		
		_PROP_ASSERT(po->po_type);		
		if ((po->po_type->pot_free)(NULL, &obj) ==
		    _PROP_OBJECT_FREE_DONE) {
			if (unlock != NULL)
				unlock();
			break;
		}

		if (unlock != NULL)
			unlock();
		
		parent = po;
		atomic_inc_32(&po->po_refcnt);
	}
	_PROP_ASSERT(parent);
	/* One object was just freed. */
	po = parent;
	(*po->po_type->pot_emergency_free)(parent);
}
Example #10
0
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
    if (rw == RW_READER) {
        lck_rw_lock_shared((lck_rw_t *)&rwlp->rw_lock[0]);
        atomic_inc_32((volatile uint32_t *)&rwlp->rw_readers);
        ASSERT(rwlp->rw_owner == 0);
    } else {
        if (rwlp->rw_owner == current_thread())
            panic("rw_enter: locking against myself!");
        lck_rw_lock_exclusive((lck_rw_t *)&rwlp->rw_lock[0]);
        ASSERT(rwlp->rw_owner == 0);
        ASSERT(rwlp->rw_readers == 0);
        rwlp->rw_owner = current_thread();
    }
}
Example #11
0
/*
 * prop_object_release --
 *	Decrement the reference count on an object.
 *
 *	Free the object if we are releasing the final
 *	reference.
 */
void
prop_object_release(prop_object_t obj)
{
	struct _prop_object *po;
	struct _prop_stack stack;
	void (*unlock)(void); 
	int ret;
	uint32_t ocnt;

	_prop_stack_init(&stack);

	do {
		do {
			po = obj;
			_PROP_ASSERT(obj);

			if (po->po_type->pot_lock != NULL)
				po->po_type->pot_lock();

			/* Save pointer to object unlock function */
			unlock = po->po_type->pot_unlock;
			
			ocnt = atomic_dec_32_nv(&po->po_refcnt);
			ocnt++;
			_PROP_ASSERT(ocnt != 0);

			if (ocnt != 1) {
				ret = 0;
				if (unlock != NULL)
					unlock();
				break;
			}
			
			ret = (po->po_type->pot_free)(&stack, &obj);

			if (unlock != NULL)
				unlock();

			if (ret == _PROP_OBJECT_FREE_DONE)
				break;
			
			atomic_inc_32(&po->po_refcnt);
		} while (ret == _PROP_OBJECT_FREE_RECURSE);
		if (ret == _PROP_OBJECT_FREE_FAILED)
			prop_object_release_emergency(obj);
	} while (_prop_stack_pop(&stack, &obj, NULL, NULL, NULL));
}
Example #12
0
/*
 * function to copy the packet to preallocated Tx buffer
 *
 * wq - pointer to WQ
 * wqed - Pointer to WQE descriptor
 * mp - Pointer to packet chain
 * pktlen - Size of the packet
 *
 * return 0=>success, error code otherwise
 */
static int
oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
    uint32_t pkt_len)
{
	oce_wq_bdesc_t *wqbd;
	caddr_t buf_va;
	struct oce_dev *dev = wq->parent;
	int len = 0;

	wqbd = oce_wqb_alloc(wq);
	if (wqbd == NULL) {
		atomic_inc_32(&dev->tx_noxmtbuf);
		oce_log(dev, CE_WARN, MOD_TX, "%s",
		    "wqb pool empty");
		return (ENOMEM);
	}

	/* create a fragment wqe for the packet */
	wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi = wqbd->frag_addr.dw.addr_hi;
	wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo = wqbd->frag_addr.dw.addr_lo;
	buf_va = DBUF_VA(wqbd->wqb);

	/* copy pkt into buffer */
	for (len = 0; mp != NULL && len < pkt_len; mp = mp->b_cont) {
		bcopy(mp->b_rptr, buf_va, MBLKL(mp));
		buf_va += MBLKL(mp);
		len += MBLKL(mp);
	}

	(void) ddi_dma_sync(DBUF_DHDL(wqbd->wqb), 0, pkt_len,
	    DDI_DMA_SYNC_FORDEV);

	if (oce_fm_check_dma_handle(dev, DBUF_DHDL(wqbd->wqb))) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
		/* Free the buffer */
		oce_wqb_free(wq, wqbd);
		return (EIO);
	}
	wqed->frag[wqed->frag_idx].u0.s.frag_len   =  pkt_len;
	wqed->hdesc[wqed->nhdl].hdl = (void *)(wqbd);
	wqed->hdesc[wqed->nhdl].type = COPY_WQE;
	wqed->frag_cnt++;
	wqed->frag_idx++;
	wqed->nhdl++;
	return (0);
} /* oce_bcopy_wqe */
Example #13
0
static inline int
oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
{
	struct oce_nic_tx_cqe *cqe;
	uint16_t num_cqe = 0;
	struct oce_cq *cq;
	oce_wqe_desc_t *wqed;
	int wqe_freed = 0;
	struct oce_dev *dev;

	cq  = wq->cq;
	dev = wq->parent;
	(void) ddi_dma_sync(cq->ring->dbuf->dma_handle, 0, 0,
	    DDI_DMA_SYNC_FORKERNEL);

	mutex_enter(&wq->txc_lock);
	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
	while (WQ_CQE_VALID(cqe)) {

		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe));

		/* update stats */
		if (cqe->u0.s.status != 0) {
			atomic_inc_32(&dev->tx_errors);
		}

		/* complete the WQEs */
		wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list);

		wqe_freed = wqed->wqe_cnt;
		oce_free_wqed(wq, wqed);
		RING_GET(wq->ring, wqe_freed);
		atomic_add_32(&wq->wq_free, wqe_freed);
		/* clear the valid bit and progress cqe */
		WQ_CQE_INVALIDATE(cqe);
		RING_GET(cq->ring, 1);
		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
		    struct oce_nic_tx_cqe);
		num_cqe++;
	} /* for all valid CQE */
	mutex_exit(&wq->txc_lock);
	if (num_cqe)
		oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
	return (num_cqe);
} /* oce_process_tx_completion */
Example #14
0
int
rw_tryenter(krwlock_t *rwlp, krw_t rw)
{
    int held = 0;

    if (rw == RW_READER) {
        held = lck_rw_try_lock((lck_rw_t *)&rwlp->rw_lock[0],
                               LCK_RW_TYPE_SHARED);
        if (held)
            atomic_inc_32((volatile uint32_t *)&rwlp->rw_readers);
    } else {
        if (rwlp->rw_owner == current_thread())
            panic("rw_tryenter: locking against myself!");
        held = lck_rw_try_lock((lck_rw_t *)&rwlp->rw_lock[0],
                               LCK_RW_TYPE_EXCLUSIVE);
        if (held)
            rwlp->rw_owner = current_thread();
    }

    return (held);
}
Example #15
0
/*
 * returns ENOENT, EIO, or 0.
 */
int
dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
{
	dnode_t *dn;
	dmu_buf_impl_t *db;
	int error;

	error = dnode_hold(os, object, FTAG, &dn);
	if (error)
		return (error);

	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	if (dn->dn_bonus == NULL) {
		rw_exit(&dn->dn_struct_rwlock);
		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
		if (dn->dn_bonus == NULL)
			dbuf_create_bonus(dn);
	}
	db = dn->dn_bonus;

	/* as long as the bonus buf is held, the dnode will be held */
	if (refcount_add(&db->db_holds, tag) == 1) {
		VERIFY(dnode_add_ref(dn, db));
		atomic_inc_32(&dn->dn_dbufs_count);
	}

	/*
	 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
	 * hold and incrementing the dbuf count to ensure that dnode_move() sees
	 * a dnode hold for every dbuf.
	 */
	rw_exit(&dn->dn_struct_rwlock);

	dnode_rele(dn, FTAG);

	VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH));

	*dbp = &db->db;
	return (0);
}
Example #16
0
File: foo.c Project: gwr/on-skel
/*ARGSUSED*/
static int
foo_ioctl(dev_t dev, int cmd, intptr_t arg, int flags,	/* model.h */
	cred_t *cr, int *rvalp)
{
	int err;

	err = 0;
	switch (cmd) {
	case FOO_IOC_GETCNT:
		atomic_inc_32(&foo_count);
		if (ddi_copyout(&foo_count, (void *)arg,
		    sizeof (foo_count), flags))
			err = EFAULT;
		break;

	default:
		err = ENOTTY;
		break;
	}

	return (err);
}
Example #17
0
/*
 * Allocate new rxbuf from memory. All its fields are set except
 * for its associated mblk which has to be allocated later.
 */
static vmxnet3s_rxbuf_t *
vmxnet3s_alloc_rxbuf(vmxnet3s_softc_t *dp, boolean_t cansleep)
{
	vmxnet3s_rxbuf_t *rxbuf;
	int		flag = cansleep ? KM_SLEEP : KM_NOSLEEP;

	if ((rxbuf = kmem_zalloc(sizeof (vmxnet3s_rxbuf_t), flag)) == NULL)
		return (NULL);

	if (vmxnet3s_alloc1(dp, &rxbuf->dma, (dp->cur_mtu + 18),
	    cansleep) != DDI_SUCCESS) {
		kmem_free(rxbuf, sizeof (vmxnet3s_rxbuf_t));
		return (NULL);
	}

	rxbuf->freecb.free_func = vmxnet3s_put_rxbuf;
	rxbuf->freecb.free_arg = (caddr_t)rxbuf;
	rxbuf->dp = dp;

	atomic_inc_32(&dp->rxnumbufs);

	return (rxbuf);
}
Example #18
0
void
dphold(devplcy_t *dp)
{
    ASSERT(dp->dp_ref != 0xdeadbeef && dp->dp_ref != 0);
    atomic_inc_32(&dp->dp_ref);
}
Example #19
0
/*
 * smb_ofile_open
 */
smb_ofile_t *
smb_ofile_open(
    smb_tree_t		*tree,
    smb_node_t		*node,
    uint16_t		pid,
    struct open_param	*op,
    uint16_t		ftype,
    uint32_t		uniqid,
    smb_error_t		*err)
{
	smb_ofile_t	*of;
	uint16_t	fid;
	smb_attr_t	attr;
	int		rc;

	if (smb_idpool_alloc(&tree->t_fid_pool, &fid)) {
		err->status = NT_STATUS_TOO_MANY_OPENED_FILES;
		err->errcls = ERRDOS;
		err->errcode = ERROR_TOO_MANY_OPEN_FILES;
		return (NULL);
	}

	of = kmem_cache_alloc(tree->t_server->si_cache_ofile, KM_SLEEP);
	bzero(of, sizeof (smb_ofile_t));
	of->f_magic = SMB_OFILE_MAGIC;
	of->f_refcnt = 1;
	of->f_fid = fid;
	of->f_uniqid = uniqid;
	of->f_opened_by_pid = pid;
	of->f_granted_access = op->desired_access;
	of->f_share_access = op->share_access;
	of->f_create_options = op->create_options;
	of->f_cr = (op->create_options & FILE_OPEN_FOR_BACKUP_INTENT) ?
	    smb_user_getprivcred(tree->t_user) : tree->t_user->u_cred;
	crhold(of->f_cr);
	of->f_ftype = ftype;
	of->f_server = tree->t_server;
	of->f_session = tree->t_user->u_session;
	of->f_user = tree->t_user;
	of->f_tree = tree;
	of->f_node = node;

	mutex_init(&of->f_mutex, NULL, MUTEX_DEFAULT, NULL);
	of->f_state = SMB_OFILE_STATE_OPEN;

	if (ftype == SMB_FTYPE_MESG_PIPE) {
		of->f_pipe = smb_opipe_alloc(tree->t_server);
		smb_server_inc_pipes(of->f_server);
	} else {
		ASSERT(ftype == SMB_FTYPE_DISK); /* Regular file, not a pipe */
		ASSERT(node);

		if (of->f_granted_access == FILE_EXECUTE)
			of->f_flags |= SMB_OFLAGS_EXECONLY;

		bzero(&attr, sizeof (smb_attr_t));
		attr.sa_mask = SMB_AT_UID | SMB_AT_DOSATTR;
		rc = smb_node_getattr(NULL, node, of->f_cr, NULL, &attr);
		if (rc != 0) {
			of->f_magic = 0;
			mutex_destroy(&of->f_mutex);
			crfree(of->f_cr);
			smb_idpool_free(&tree->t_fid_pool, of->f_fid);
			kmem_cache_free(tree->t_server->si_cache_ofile, of);
			err->status = NT_STATUS_INTERNAL_ERROR;
			err->errcls = ERRDOS;
			err->errcode = ERROR_INTERNAL_ERROR;
			return (NULL);
		}
		if (crgetuid(of->f_cr) == attr.sa_vattr.va_uid) {
			/*
			 * Add this bit for the file's owner even if it's not
			 * specified in the request (Windows behavior).
			 */
			of->f_granted_access |= FILE_READ_ATTRIBUTES;
		}

		if (smb_node_is_file(node)) {
			of->f_mode =
			    smb_fsop_amask_to_omode(of->f_granted_access);
			if (smb_fsop_open(node, of->f_mode, of->f_cr) != 0) {
				of->f_magic = 0;
				mutex_destroy(&of->f_mutex);
				crfree(of->f_cr);
				smb_idpool_free(&tree->t_fid_pool, of->f_fid);
				kmem_cache_free(tree->t_server->si_cache_ofile,
				    of);
				err->status = NT_STATUS_ACCESS_DENIED;
				err->errcls = ERRDOS;
				err->errcode = ERROR_ACCESS_DENIED;
				return (NULL);
			}
		}

		if (tree->t_flags & SMB_TREE_READONLY)
			of->f_flags |= SMB_OFLAGS_READONLY;

		/*
		 * Note that if we created_readonly, that
		 * will _not_ yet show in attr.sa_dosattr
		 * so creating a readonly file gives the
		 * caller a writable handle as it should.
		 */
		if (attr.sa_dosattr & FILE_ATTRIBUTE_READONLY)
			of->f_flags |= SMB_OFLAGS_READONLY;

		smb_node_inc_open_ofiles(node);
		smb_node_add_ofile(node, of);
		smb_node_ref(node);
		smb_server_inc_files(of->f_server);
	}
	smb_llist_enter(&tree->t_ofile_list, RW_WRITER);
	smb_llist_insert_tail(&tree->t_ofile_list, of);
	smb_llist_exit(&tree->t_ofile_list);
	atomic_inc_32(&tree->t_open_files);
	atomic_inc_32(&of->f_session->s_file_cnt);
	return (of);
}
template<typename T> static void increase(T *ptr) { atomic_inc_32(ptr); }
Example #21
0
/*
 * Windows XP and 2000 use this mechanism to write spool files.
 * Create a spool file fd to be used by spoolss_s_WritePrinter
 * and add it to the tail of the spool list.
 */
static int
spoolss_s_StartDocPrinter(void *arg, ndr_xa_t *mxa)
{
	struct spoolss_StartDocPrinter *param = arg;
	ndr_hdid_t *id = (ndr_hdid_t *)&param->handle;
	smb_spooldoc_t *spfile;
	spoolss_DocInfo_t *docinfo;
	char g_path[MAXPATHLEN];
	smb_share_t si;
	int rc;
	int fd;

	if (ndr_hdlookup(mxa, id) == NULL) {
		smb_tracef("spoolss_s_StartDocPrinter: invalid handle");
		param->status = ERROR_INVALID_HANDLE;
		return (NDR_DRC_OK);
	}

	if ((docinfo = param->dinfo.DocInfoContainer) == NULL) {
		param->status = ERROR_INVALID_PARAMETER;
		return (NDR_DRC_OK);
	}

	if ((rc = smb_shr_get(SMB_SHARE_PRINT, &si)) != NERR_Success) {
		smb_tracef("spoolss_s_StartDocPrinter: %s error=%d",
		    SMB_SHARE_PRINT, rc);
		param->status = rc;
		return (NDR_DRC_OK);
	}

	if ((spfile = calloc(1, sizeof (smb_spooldoc_t))) == NULL) {
		param->status = ERROR_NOT_ENOUGH_MEMORY;
		return (NDR_DRC_OK);
	}

	if (docinfo->doc_name != NULL)
		(void) strlcpy(spfile->sd_doc_name,
		    (char *)docinfo->doc_name, MAXNAMELEN);
	else
		(void) strlcpy(spfile->sd_doc_name, "document", MAXNAMELEN);

	if (docinfo->printer_name != NULL)
		(void) strlcpy(spfile->sd_printer_name,
		    (char *)docinfo->printer_name, MAXPATHLEN);
	else
		(void) strlcpy(spfile->sd_printer_name, "printer", MAXPATHLEN);

	spfile->sd_ipaddr = mxa->pipe->np_user->ui_ipaddr;
	(void) strlcpy((char *)spfile->sd_username,
	    mxa->pipe->np_user->ui_account, MAXNAMELEN);
	(void) memcpy(&spfile->sd_handle, &param->handle, sizeof (ndr_hdid_t));

	/*
	 *	write temporary spool file to print$
	 */
	(void) snprintf(g_path, MAXPATHLEN, "%s/%s%d", si.shr_path,
	    spfile->sd_username, spoolss_cnt);
	atomic_inc_32(&spoolss_cnt);

	fd = open(g_path, O_CREAT | O_RDWR, 0600);
	if (fd == -1) {
		smb_tracef("spoolss_s_StartDocPrinter: %s: %s",
		    g_path, strerror(errno));
		param->status = ERROR_OPEN_FAILED;
		free(spfile);
	} else {
		(void) strlcpy((char *)spfile->sd_path, g_path, MAXPATHLEN);
		spfile->sd_fd = (uint16_t)fd;

		/*
		 * Add the document to the spool list.
		 */
		(void) rw_wrlock(&spoolss_splist.sp_rwl);
		list_insert_tail(&spoolss_splist.sp_list, spfile);
		spoolss_splist.sp_cnt++;
		(void) rw_unlock(&spoolss_splist.sp_rwl);

		/*
		 * JobId isn't used now, but if printQ management is added
		 * this will have to be incremented per job submitted.
		 */
		param->JobId = 46;
		param->status = ERROR_SUCCESS;
	}
	return (NDR_DRC_OK);
}
Example #22
0
int
klpd_call(const cred_t *cr, const priv_set_t *req, va_list ap)
{
	klpd_reg_t *p;
	int rv = -1;
	credklpd_t *ckp;
	zone_t *ckzone;

	/*
	 * These locks must not be held when this code is called;
	 * callbacks to userland with these locks held will result
	 * in issues.  That said, the code at the call sides was
	 * restructured not to call with any of the locks held and
	 * no policies operate by default on most processes.
	 */
	if (mutex_owned(&pidlock) || mutex_owned(&curproc->p_lock) ||
	    mutex_owned(&curproc->p_crlock)) {
		atomic_inc_32(&klpd_bad_locks);
		return (-1);
	}

	/*
	 * Enforce the limit set for the call process (still).
	 */
	if (!priv_issubset(req, &CR_LPRIV(cr)))
		return (-1);

	/* Try 1: get the credential specific klpd */
	if ((ckp = crgetcrklpd(cr)) != NULL) {
		mutex_enter(&ckp->crkl_lock);
		if ((p = ckp->crkl_reg) != NULL &&
		    p->klpd_indel == 0 &&
		    priv_issubset(req, &p->klpd_pset)) {
			klpd_hold(p);
			mutex_exit(&ckp->crkl_lock);
			rv = klpd_do_call(p, req, ap);
			mutex_enter(&ckp->crkl_lock);
			klpd_rele(p);
			mutex_exit(&ckp->crkl_lock);
			if (rv != -1)
				return (rv == 0 ? 0 : -1);
		} else {
			mutex_exit(&ckp->crkl_lock);
		}
	}

	/* Try 2: get the project specific klpd */
	mutex_enter(&klpd_mutex);

	if ((p = curproj->kpj_klpd) != NULL) {
		klpd_hold(p);
		mutex_exit(&klpd_mutex);
		if (p->klpd_indel == 0 &&
		    priv_issubset(req, &p->klpd_pset)) {
			rv = klpd_do_call(p, req, ap);
		}
		mutex_enter(&klpd_mutex);
		klpd_rele(p);
		mutex_exit(&klpd_mutex);

		if (rv != -1)
			return (rv == 0 ? 0 : -1);
	} else {
		mutex_exit(&klpd_mutex);
	}

	/* Try 3: get the global klpd list */
	ckzone = crgetzone(cr);
	mutex_enter(&klpd_mutex);

	for (p = klpd_list; p != NULL; ) {
		zone_t *kkzone = crgetzone(p->klpd_cred);
		if ((kkzone == &zone0 || kkzone == ckzone) &&
		    p->klpd_indel == 0 &&
		    priv_issubset(req, &p->klpd_pset)) {
			klpd_hold(p);
			mutex_exit(&klpd_mutex);
			rv = klpd_do_call(p, req, ap);
			mutex_enter(&klpd_mutex);

			p = klpd_rele_next(p);

			if (rv != -1)
				break;
		} else {
			p = p->klpd_next;
		}
	}
	mutex_exit(&klpd_mutex);
	return (rv == 0 ? 0 : -1);
}
Example #23
0
static void
klpd_hold(klpd_reg_t *p)
{
	atomic_inc_32(&p->klpd_ref);
}
 void add_ref_copy()
 {
     atomic_inc_32( &use_count_ );
 }
Example #25
0
/*
 * smb_ofile_open
 */
smb_ofile_t *
smb_ofile_open(
    smb_request_t	*sr,
    smb_node_t		*node,
    struct open_param	*op,
    uint16_t		ftype,
    uint32_t		uniqid,
    smb_error_t		*err)
{
	smb_tree_t	*tree = sr->tid_tree;
	smb_ofile_t	*of;
	uint16_t	fid;
	smb_attr_t	attr;
	int		rc;
	enum errstates { EMPTY, FIDALLOC, CRHELD, MUTEXINIT };
	enum errstates	state = EMPTY;

	if (smb_idpool_alloc(&tree->t_fid_pool, &fid)) {
		err->status = NT_STATUS_TOO_MANY_OPENED_FILES;
		err->errcls = ERRDOS;
		err->errcode = ERROR_TOO_MANY_OPEN_FILES;
		return (NULL);
	}
	state = FIDALLOC;

	of = kmem_cache_alloc(smb_cache_ofile, KM_SLEEP);
	bzero(of, sizeof (smb_ofile_t));
	of->f_magic = SMB_OFILE_MAGIC;
	of->f_refcnt = 1;
	of->f_fid = fid;
	of->f_uniqid = uniqid;
	of->f_opened_by_pid = sr->smb_pid;
	of->f_granted_access = op->desired_access;
	of->f_share_access = op->share_access;
	of->f_create_options = op->create_options;
	of->f_cr = (op->create_options & FILE_OPEN_FOR_BACKUP_INTENT) ?
	    smb_user_getprivcred(sr->uid_user) : sr->uid_user->u_cred;
	crhold(of->f_cr);
	state = CRHELD;
	of->f_ftype = ftype;
	of->f_server = tree->t_server;
	of->f_session = tree->t_session;
	/*
	 * grab a ref for of->f_user
	 * released in smb_ofile_delete()
	 */
	smb_user_hold_internal(sr->uid_user);
	of->f_user = sr->uid_user;
	of->f_tree = tree;
	of->f_node = node;

	mutex_init(&of->f_mutex, NULL, MUTEX_DEFAULT, NULL);
	state = MUTEXINIT;
	of->f_state = SMB_OFILE_STATE_OPEN;

	if (ftype == SMB_FTYPE_MESG_PIPE) {
		/* See smb_opipe_open. */
		of->f_pipe = op->pipe;
		smb_server_inc_pipes(of->f_server);
	} else {
		ASSERT(ftype == SMB_FTYPE_DISK); /* Regular file, not a pipe */
		ASSERT(node);

		/*
		 * Note that the common open path often adds bits like
		 * READ_CONTROL, so the logic "is this open exec-only"
		 * needs to look at only the FILE_DATA_ALL bits.
		 */
		if ((of->f_granted_access & FILE_DATA_ALL) == FILE_EXECUTE)
			of->f_flags |= SMB_OFLAGS_EXECONLY;

		bzero(&attr, sizeof (smb_attr_t));
		attr.sa_mask = SMB_AT_UID | SMB_AT_DOSATTR;
		rc = smb_node_getattr(NULL, node, of->f_cr, NULL, &attr);
		if (rc != 0) {
			err->status = NT_STATUS_INTERNAL_ERROR;
			err->errcls = ERRDOS;
			err->errcode = ERROR_INTERNAL_ERROR;
			goto errout;
		}
		if (crgetuid(of->f_cr) == attr.sa_vattr.va_uid) {
			/*
			 * Add this bit for the file's owner even if it's not
			 * specified in the request (Windows behavior).
			 */
			of->f_granted_access |= FILE_READ_ATTRIBUTES;
		}

		if (smb_node_is_file(node)) {
			of->f_mode =
			    smb_fsop_amask_to_omode(of->f_granted_access);
			if (smb_fsop_open(node, of->f_mode, of->f_cr) != 0) {
				err->status = NT_STATUS_ACCESS_DENIED;
				err->errcls = ERRDOS;
				err->errcode = ERROR_ACCESS_DENIED;
				goto errout;
			}
		}

		if (tree->t_flags & SMB_TREE_READONLY)
			of->f_flags |= SMB_OFLAGS_READONLY;

		/*
		 * Note that if we created_readonly, that
		 * will _not_ yet show in attr.sa_dosattr
		 * so creating a readonly file gives the
		 * caller a writable handle as it should.
		 */
		if (attr.sa_dosattr & FILE_ATTRIBUTE_READONLY)
			of->f_flags |= SMB_OFLAGS_READONLY;

		smb_node_inc_open_ofiles(node);
		smb_node_add_ofile(node, of);
		smb_node_ref(node);
		smb_server_inc_files(of->f_server);
	}
	smb_llist_enter(&tree->t_ofile_list, RW_WRITER);
	smb_llist_insert_tail(&tree->t_ofile_list, of);
	smb_llist_exit(&tree->t_ofile_list);
	atomic_inc_32(&tree->t_open_files);
	atomic_inc_32(&of->f_session->s_file_cnt);
	return (of);

errout:
	switch (state) {
	case MUTEXINIT:
		mutex_destroy(&of->f_mutex);
		smb_user_release(of->f_user);
		/*FALLTHROUGH*/
	case CRHELD:
		crfree(of->f_cr);
		of->f_magic = 0;
		kmem_cache_free(smb_cache_ofile, of);
		/*FALLTHROUGH*/
	case FIDALLOC:
		smb_idpool_free(&tree->t_fid_pool, fid);
		/*FALLTHROUGH*/
	case EMPTY:
		break;
	}
	return (NULL);
}
Example #26
0
/*
 * All versions of windows use this function to spool files to a printer
 * via the cups interface
 */
static void
smbd_spool_copyfile(smb_inaddr_t *ipaddr, char *username, char *path,
                    char *doc_name)
{
    smb_cups_ops_t	*cups;
    http_t		*http = NULL;		/* HTTP connection to server */
    ipp_t		*request = NULL;	/* IPP Request */
    ipp_t		*response = NULL;	/* IPP Response */
    cups_lang_t	*language = NULL;	/* Default language */
    char		uri[HTTP_MAX_URI];	/* printer-uri attribute */
    char		new_jobname[SMBD_PJOBLEN];
    smbd_printjob_t	pjob;
    char		clientname[INET6_ADDRSTRLEN];
    struct stat 	sbuf;
    int		rc = 1;

    if (stat(path, &sbuf)) {
        syslog(LOG_INFO, "smbd_spool_copyfile: %s: %s",
               path, strerror(errno));
        return;
    }

    /*
     * Remove zero size files and return; these were inadvertantly
     * created by XP or 2000.
     */
    if (sbuf.st_size == 0) {
        if (remove(path) != 0)
            syslog(LOG_INFO,
                   "smbd_spool_copyfile: cannot remove %s: %s",
                   path, strerror(errno));
        return;
    }

    if ((cups = smbd_cups_ops()) == NULL)
        return;

    if ((http = cups->httpConnect("localhost", 631)) == NULL) {
        syslog(LOG_INFO,
               "smbd_spool_copyfile: cupsd not running");
        return;
    }

    if ((request = cups->ippNew()) == NULL) {
        syslog(LOG_INFO,
               "smbd_spool_copyfile: ipp not running");
        return;
    }

    request->request.op.operation_id = IPP_PRINT_JOB;
    request->request.op.request_id = 1;
    language = cups->cupsLangDefault();

    cups->ippAddString(request, IPP_TAG_OPERATION, IPP_TAG_CHARSET,
                       "attributes-charset", NULL, cups->cupsLangEncoding(language));

    cups->ippAddString(request, IPP_TAG_OPERATION, IPP_TAG_LANGUAGE,
                       "attributes-natural-language", NULL, language->language);

    (void) snprintf(uri, sizeof (uri), "ipp://localhost/printers/%s",
                    SMBD_PRINTER);
    pjob.pj_pid = pthread_self();
    pjob.pj_sysjob = 10;
    (void) strlcpy(pjob.pj_filename, path, SMBD_PJOBLEN);
    pjob.pj_start_time = time(NULL);
    pjob.pj_status = 2;
    pjob.pj_size = sbuf.st_blocks * 512;
    pjob.pj_page_count = 1;
    pjob.pj_isspooled = B_TRUE;
    pjob.pj_jobnum = smbd_cups_jobnum;

    (void) strlcpy(pjob.pj_jobname, doc_name, SMBD_PJOBLEN);
    (void) strlcpy(pjob.pj_username, username, SMBD_PJOBLEN);
    (void) strlcpy(pjob.pj_queuename, SMBD_CUPS_SPOOL_DIR, SMBD_PJOBLEN);

    cups->ippAddString(request, IPP_TAG_OPERATION, IPP_TAG_URI,
                       "printer-uri", NULL, uri);

    cups->ippAddString(request, IPP_TAG_OPERATION, IPP_TAG_NAME,
                       "requesting-user-name", NULL, pjob.pj_username);

    if (smb_inet_ntop(ipaddr, clientname,
                      SMB_IPSTRLEN(ipaddr->a_family)) == NULL) {
        syslog(LOG_INFO,
               "smbd_spool_copyfile: %s: unknown client", clientname);
        goto out;
    }

    cups->ippAddString(request, IPP_TAG_OPERATION, IPP_TAG_NAME,
                       "job-originating-host-name", NULL, clientname);

    (void) snprintf(new_jobname, SMBD_PJOBLEN, "%s%d",
                    SMBD_FN_PREFIX, pjob.pj_jobnum);
    cups->ippAddString(request, IPP_TAG_OPERATION, IPP_TAG_NAME,
                       "job-name", NULL, new_jobname);

    (void) snprintf(uri, sizeof (uri) - 1, "/printers/%s", SMBD_PRINTER);

    response = cups->cupsDoFileRequest(http, request, uri,
                                       pjob.pj_filename);
    if (response != NULL) {
        if (response->request.status.status_code >= IPP_OK_CONFLICT) {
            syslog(LOG_ERR,
                   "smbd_spool_copyfile: printer %s: %s",
                   SMBD_PRINTER,
                   cups->ippErrorString(cups->cupsLastError()));
        } else {
            atomic_inc_32(&smbd_cups_jobnum);
            rc = 0;
        }
    } else {
        syslog(LOG_ERR,
               "smbd_spool_copyfile: unable to print to %s",
               cups->ippErrorString(cups->cupsLastError()));
    }

    if (rc == 0)
        (void) unlink(pjob.pj_filename);

out:
    if (response)
        cups->ippDelete(response);

    if (language)
        cups->cupsLangFree(language);

    if (http)
        cups->httpClose(http);
}
/*
 * e1000g_receive - main receive routine
 *
 * This routine will process packets received in an interrupt
 */
mblk_t *
e1000g_receive(e1000g_rx_ring_t *rx_ring, mblk_t **tail, uint_t sz)
{
	struct e1000_hw *hw;
	mblk_t *nmp;
	mblk_t *ret_mp;
	mblk_t *ret_nmp;
	struct e1000_rx_desc *current_desc;
	struct e1000_rx_desc *last_desc;
	p_rx_sw_packet_t packet;
	p_rx_sw_packet_t newpkt;
	uint16_t length;
	uint32_t pkt_count;
	uint32_t desc_count;
	boolean_t accept_frame;
	boolean_t end_of_packet;
	boolean_t need_copy;
	struct e1000g *Adapter;
	dma_buffer_t *rx_buf;
	uint16_t cksumflags;
	uint_t chain_sz = 0;
	e1000g_rx_data_t *rx_data;
	uint32_t max_size;
	uint32_t min_size;

	ret_mp = NULL;
	ret_nmp = NULL;
	pkt_count = 0;
	desc_count = 0;
	cksumflags = 0;

	Adapter = rx_ring->adapter;
	rx_data = rx_ring->rx_data;
	hw = &Adapter->shared;

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORKERNEL);

	if (e1000g_check_dma_handle(rx_data->rbd_dma_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
		return (NULL);
	}

	current_desc = rx_data->rbd_next;
	if (!(current_desc->status & E1000_RXD_STAT_DD)) {
		/*
		 * don't send anything up. just clear the RFD
		 */
		E1000G_DEBUG_STAT(rx_ring->stat_none);
		return (NULL);
	}

	max_size = Adapter->max_frame_size - ETHERFCSL - VLAN_TAGSZ;
	min_size = ETHERMIN;

	/*
	 * Loop through the receive descriptors starting at the last known
	 * descriptor owned by the hardware that begins a packet.
	 */
	while ((current_desc->status & E1000_RXD_STAT_DD) &&
	    (pkt_count < Adapter->rx_limit_onintr) &&
	    ((sz == E1000G_CHAIN_NO_LIMIT) || (chain_sz <= sz))) {

		desc_count++;
		/*
		 * Now this can happen in Jumbo frame situation.
		 */
		if (current_desc->status & E1000_RXD_STAT_EOP) {
			/* packet has EOP set */
			end_of_packet = B_TRUE;
		} else {
			/*
			 * If this received buffer does not have the
			 * End-Of-Packet bit set, the received packet
			 * will consume multiple buffers. We won't send this
			 * packet upstack till we get all the related buffers.
			 */
			end_of_packet = B_FALSE;
		}

		/*
		 * Get a pointer to the actual receive buffer
		 * The mp->b_rptr is mapped to The CurrentDescriptor
		 * Buffer Address.
		 */
		packet =
		    (p_rx_sw_packet_t)QUEUE_GET_HEAD(&rx_data->recv_list);
		ASSERT(packet != NULL);

		rx_buf = packet->rx_buf;

		length = current_desc->length;

#ifdef __sparc
		if (packet->dma_type == USE_DVMA)
			dvma_sync(rx_buf->dma_handle, 0,
			    DDI_DMA_SYNC_FORKERNEL);
		else
			(void) ddi_dma_sync(rx_buf->dma_handle,
			    E1000G_IPALIGNROOM, length,
			    DDI_DMA_SYNC_FORKERNEL);
#else
		(void) ddi_dma_sync(rx_buf->dma_handle,
		    E1000G_IPALIGNROOM, length,
		    DDI_DMA_SYNC_FORKERNEL);
#endif

		if (e1000g_check_dma_handle(
		    rx_buf->dma_handle) != DDI_FM_OK) {
			ddi_fm_service_impact(Adapter->dip,
			    DDI_SERVICE_DEGRADED);
			Adapter->e1000g_state |= E1000G_ERROR;

			goto rx_drop;
		}

		accept_frame = (current_desc->errors == 0) ||
		    ((current_desc->errors &
		    (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) != 0);

		if (hw->mac.type == e1000_82543) {
			unsigned char last_byte;

			last_byte =
			    *((unsigned char *)rx_buf->address + length - 1);

			if (TBI_ACCEPT(hw,
			    current_desc->status, current_desc->errors,
			    current_desc->length, last_byte,
			    Adapter->min_frame_size, Adapter->max_frame_size)) {

				e1000_tbi_adjust_stats(Adapter,
				    length, hw->mac.addr);

				length--;
				accept_frame = B_TRUE;
			} else if (e1000_tbi_sbp_enabled_82543(hw) &&
			    (current_desc->errors == E1000_RXD_ERR_CE)) {
				accept_frame = B_TRUE;
			}
		}

		/*
		 * Indicate the packet to the NOS if it was good.
		 * Normally, hardware will discard bad packets for us.
		 * Check for the packet to be a valid Ethernet packet
		 */
		if (!accept_frame) {
			/*
			 * error in incoming packet, either the packet is not a
			 * ethernet size packet, or the packet has an error. In
			 * either case, the packet will simply be discarded.
			 */
			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
			    "Process Receive Interrupts: Error in Packet\n");

			E1000G_STAT(rx_ring->stat_error);
			/*
			 * Returning here as we are done here. There is
			 * no point in waiting for while loop to elapse
			 * and the things which were done. More efficient
			 * and less error prone...
			 */
			goto rx_drop;
		}

		/*
		 * If the Ethernet CRC is not stripped by the hardware,
		 * we need to strip it before sending it up to the stack.
		 */
		if (end_of_packet && !Adapter->strip_crc) {
			if (length > ETHERFCSL) {
				length -= ETHERFCSL;
			} else {
				/*
				 * If the fragment is smaller than the CRC,
				 * drop this fragment, do the processing of
				 * the end of the packet.
				 */
				ASSERT(rx_data->rx_mblk_tail != NULL);
				rx_data->rx_mblk_tail->b_wptr -=
				    ETHERFCSL - length;
				rx_data->rx_mblk_len -=
				    ETHERFCSL - length;

				QUEUE_POP_HEAD(&rx_data->recv_list);

				goto rx_end_of_packet;
			}
		}

		need_copy = B_TRUE;

		if (length <= Adapter->rx_bcopy_thresh)
			goto rx_copy;

		/*
		 * Get the pre-constructed mblk that was associated
		 * to the receive data buffer.
		 */
		if (packet->mp == NULL) {
			packet->mp = desballoc((unsigned char *)
			    rx_buf->address, length,
			    BPRI_MED, &packet->free_rtn);
		}

		if (packet->mp != NULL) {
			/*
			 * We have two sets of buffer pool. One associated with
			 * the Rxdescriptors and other a freelist buffer pool.
			 * Each time we get a good packet, Try to get a buffer
			 * from the freelist pool using e1000g_get_buf. If we
			 * get free buffer, then replace the descriptor buffer
			 * address with the free buffer we just got, and pass
			 * the pre-constructed mblk upstack. (note no copying)
			 *
			 * If we failed to get a free buffer, then try to
			 * allocate a new buffer(mp) and copy the recv buffer
			 * content to our newly allocated buffer(mp). Don't
			 * disturb the desriptor buffer address. (note copying)
			 */
			newpkt = e1000g_get_buf(rx_data);

			if (newpkt != NULL) {
				/*
				 * Get the mblk associated to the data,
				 * and strip it off the sw packet.
				 */
				nmp = packet->mp;
				packet->mp = NULL;
				atomic_inc_32(&packet->ref_cnt);

				/*
				 * Now replace old buffer with the new
				 * one we got from free list
				 * Both the RxSwPacket as well as the
				 * Receive Buffer Descriptor will now
				 * point to this new packet.
				 */
				packet = newpkt;

				current_desc->buffer_addr =
				    newpkt->rx_buf->dma_address;

				need_copy = B_FALSE;
			} else {
				/* EMPTY */
				E1000G_DEBUG_STAT(rx_ring->stat_no_freepkt);
			}
		}

rx_copy:
		if (need_copy) {
			/*
			 * No buffers available on free list,
			 * bcopy the data from the buffer and
			 * keep the original buffer. Dont want to
			 * do this.. Yack but no other way
			 */
			if ((nmp = allocb(length + E1000G_IPALIGNROOM,
			    BPRI_MED)) == NULL) {
				/*
				 * The system has no buffers available
				 * to send up the incoming packet, hence
				 * the packet will have to be processed
				 * when there're more buffers available.
				 */
				E1000G_STAT(rx_ring->stat_allocb_fail);
				goto rx_drop;
			}
			nmp->b_rptr += E1000G_IPALIGNROOM;
			nmp->b_wptr += E1000G_IPALIGNROOM;
			/*
			 * The free list did not have any buffers
			 * available, so, the received packet will
			 * have to be copied into a mp and the original
			 * buffer will have to be retained for future
			 * packet reception.
			 */
			bcopy(rx_buf->address, nmp->b_wptr, length);
		}

		/*
		 * The rx_sw_packet MUST be popped off the
		 * RxSwPacketList before either a putnext or freemsg
		 * is done on the mp that has now been created by the
		 * desballoc. If not, it is possible that the free
		 * routine will get called from the interrupt context
		 * and try to put this packet on the free list
		 */
		(p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list);

		ASSERT(nmp != NULL);
		nmp->b_wptr += length;

		if (rx_data->rx_mblk == NULL) {
			/*
			 *  TCP/UDP checksum offload and
			 *  IP checksum offload
			 */
			if (!(current_desc->status & E1000_RXD_STAT_IXSM)) {
				/*
				 * Check TCP/UDP checksum
				 */
				if ((current_desc->status &
				    E1000_RXD_STAT_TCPCS) &&
				    !(current_desc->errors &
				    E1000_RXD_ERR_TCPE))
					cksumflags |= HCK_FULLCKSUM |
					    HCK_FULLCKSUM_OK;
				/*
				 * Check IP Checksum
				 */
				if ((current_desc->status &
				    E1000_RXD_STAT_IPCS) &&
				    !(current_desc->errors &
				    E1000_RXD_ERR_IPE))
					cksumflags |= HCK_IPV4_HDRCKSUM;
			}
		}

		/*
		 * We need to maintain our packet chain in the global
		 * Adapter structure, for the Rx processing can end
		 * with a fragment that has no EOP set.
		 */
		if (rx_data->rx_mblk == NULL) {
			/* Get the head of the message chain */
			rx_data->rx_mblk = nmp;
			rx_data->rx_mblk_tail = nmp;
			rx_data->rx_mblk_len = length;
		} else {	/* Not the first packet */
			/* Continue adding buffers */
			rx_data->rx_mblk_tail->b_cont = nmp;
			rx_data->rx_mblk_tail = nmp;
			rx_data->rx_mblk_len += length;
		}
		ASSERT(rx_data->rx_mblk != NULL);
		ASSERT(rx_data->rx_mblk_tail != NULL);
		ASSERT(rx_data->rx_mblk_tail->b_cont == NULL);

		/*
		 * Now this MP is ready to travel upwards but some more
		 * fragments are coming.
		 * We will send packet upwards as soon as we get EOP
		 * set on the packet.
		 */
		if (!end_of_packet) {
			/*
			 * continue to get the next descriptor,
			 * Tail would be advanced at the end
			 */
			goto rx_next_desc;
		}

rx_end_of_packet:
		if (E1000G_IS_VLAN_PACKET(rx_data->rx_mblk->b_rptr))
			max_size = Adapter->max_frame_size - ETHERFCSL;

		if ((rx_data->rx_mblk_len > max_size) ||
		    (rx_data->rx_mblk_len < min_size)) {
			E1000G_STAT(rx_ring->stat_size_error);
			goto rx_drop;
		}

		/*
		 * Found packet with EOP
		 * Process the last fragment.
		 */
		if (cksumflags != 0) {
			(void) hcksum_assoc(rx_data->rx_mblk,
			    NULL, NULL, 0, 0, 0, 0, cksumflags, 0);
			cksumflags = 0;
		}

		/*
		 * Count packets that span multi-descriptors
		 */
		E1000G_DEBUG_STAT_COND(rx_ring->stat_multi_desc,
		    (rx_data->rx_mblk->b_cont != NULL));

		/*
		 * Append to list to send upstream
		 */
		if (ret_mp == NULL) {
			ret_mp = ret_nmp = rx_data->rx_mblk;
		} else {
			ret_nmp->b_next = rx_data->rx_mblk;
			ret_nmp = rx_data->rx_mblk;
		}
		ret_nmp->b_next = NULL;
		*tail = ret_nmp;
		chain_sz += length;

		rx_data->rx_mblk = NULL;
		rx_data->rx_mblk_tail = NULL;
		rx_data->rx_mblk_len = 0;

		pkt_count++;

rx_next_desc:
		/*
		 * Zero out the receive descriptors status
		 */
		current_desc->status = 0;

		if (current_desc == rx_data->rbd_last)
			rx_data->rbd_next = rx_data->rbd_first;
		else
			rx_data->rbd_next++;

		last_desc = current_desc;
		current_desc = rx_data->rbd_next;

		/*
		 * Put the buffer that we just indicated back
		 * at the end of our list
		 */
		QUEUE_PUSH_TAIL(&rx_data->recv_list,
		    &packet->Link);
	}	/* while loop */

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORDEV);

	/*
	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(last_desc - rx_data->rbd_first));

	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
	}

	Adapter->rx_pkt_cnt = pkt_count;

	return (ret_mp);

rx_drop:
	/*
	 * Zero out the receive descriptors status
	 */
	current_desc->status = 0;

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORDEV);

	if (current_desc == rx_data->rbd_last)
		rx_data->rbd_next = rx_data->rbd_first;
	else
		rx_data->rbd_next++;

	last_desc = current_desc;

	(p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list);

	QUEUE_PUSH_TAIL(&rx_data->recv_list, &packet->Link);
	/*
	 * Reclaim all old buffers already allocated during
	 * Jumbo receives.....for incomplete reception
	 */
	if (rx_data->rx_mblk != NULL) {
		freemsg(rx_data->rx_mblk);
		rx_data->rx_mblk = NULL;
		rx_data->rx_mblk_tail = NULL;
		rx_data->rx_mblk_len = 0;
	}
	/*
	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(last_desc - rx_data->rbd_first));

	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
	}

	return (ret_mp);
}
Example #28
0
void
crklpd_hold(credklpd_t *crkpd)
{
	atomic_inc_32(&crkpd->crkl_ref);
}
 void weak_add_ref() // nothrow
 {
     atomic_inc_32( &weak_count_ );
 }
Example #30
0
/*
 * void task_hold(task_t *)
 *
 * Overview
 *   task_hold() is used to take an additional reference to the given task.
 *
 * Return values
 *   None.
 *
 * Caller's context
 *   No restriction on context.
 */
void
task_hold(task_t *tk)
{
	atomic_inc_32(&tk->tk_hold_count);
}