Пример #1
0
int
oce_start_group(oce_group_t *grp, boolean_t alloc_buffer)
{
	struct oce_dev *dev = grp->parent;
	int qidx;
	int max_frame_sz;

	max_frame_sz = dev->mtu + sizeof (struct ether_vlan_header) + VTAG_SIZE;
	/* allocate Rx buffers */
	if (alloc_buffer && !(grp->state & GROUP_INIT)) {
		for (qidx = 0; qidx < grp->num_rings; qidx++) {
			if (oce_rq_init(dev, grp->ring[qidx].rx,
			    dev->rx_ring_size, dev->rq_frag_size,
			    max_frame_sz) != DDI_SUCCESS) {
				goto group_fail;
			}
		}
		grp->state |= GROUP_INIT;
	}

	if (grp->state & GROUP_MAC_STARTED) {

		if (oce_create_group(dev, grp, MBX_ASYNC_MQ) != DDI_SUCCESS) {
			goto group_fail;
		}
		oce_log(dev, CE_NOTE, MOD_CONFIG,
		    "group %d started", grp->grp_num);
	}
	return (DDI_SUCCESS);

group_fail:
	oce_log(dev, CE_WARN, MOD_CONFIG,
	    "Failed to setup group %x", grp->grp_num);
	return (DDI_FAILURE);
}
Пример #2
0
/*
 * function to read the PCI Bus, Device, and function numbers for the
 * device instance.
 *
 * dev - handle to device private data
 */
int
oce_get_bdf(struct oce_dev *dev)
{
	pci_regspec_t *pci_rp;
	uint32_t length;
	int rc;

	/* Get "reg" property */
	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dev->dip,
	    0, "reg", (int **)&pci_rp, (uint_t *)&length);

	if ((rc != DDI_SUCCESS) ||
	    (length < (sizeof (pci_regspec_t) / sizeof (int)))) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Failed to read \"reg\" property, Status = 0x%x", rc);
		return (rc);
	}

	dev->pci_bus = PCI_REG_BUS_G(pci_rp->pci_phys_hi);
	dev->pci_device = PCI_REG_DEV_G(pci_rp->pci_phys_hi);
	dev->pci_function = PCI_REG_FUNC_G(pci_rp->pci_phys_hi);

	oce_log(dev, CE_NOTE, MOD_CONFIG,
	    "\"reg\" property num=%d, Bus=%d, Device=%d, Function=%d",
	    length, dev->pci_bus, dev->pci_device, dev->pci_function);

	/* Free the memory allocated by ddi_prop_lookup_int_array() */
	ddi_prop_free(pci_rp);
	return (rc);
}
Пример #3
0
/* Internally resume the rings on group basis (Eg IRM) */
int
oce_resume_group_rings(oce_group_t *grp)
{
	struct oce_dev *dev = grp->parent;
	int qidx, pmac_idx, ret = DDI_SUCCESS;

	if (grp->state & GROUP_MAC_STARTED) {

		if (grp->grp_num == 0) {
			if (dev->num_mca > OCE_MAX_MCA) {
				ret = oce_set_multicast_table(dev, dev->if_id,
				    &dev->multi_cast[0], OCE_MAX_MCA, B_TRUE,
				    MBX_BOOTSTRAP);
			} else {
				ret = oce_set_multicast_table(dev, dev->if_id,
				    &dev->multi_cast[0], dev->num_mca, B_FALSE,
				    MBX_BOOTSTRAP);
			}
			if (ret != 0) {
				oce_log(dev, CE_WARN, MOD_CONFIG,
				    "set mcast failed 0x%x", ret);
				return (ret);
			}
		}

		/* Add the group based MACs */
		for (pmac_idx = 0; pmac_idx < grp->num_pmac; pmac_idx++) {
			if (grp->pmac_ids[pmac_idx] != INVALID_PMAC_ID) {
				ret = oce_add_mac(dev, grp->if_id,
				    (uint8_t *)&grp->mac_addr[pmac_idx],
				    &grp->pmac_ids[pmac_idx], MBX_BOOTSTRAP);
				if (ret != DDI_SUCCESS) {
					oce_log(dev, CE_WARN, MOD_CONFIG,
					    "MAC addition failed grp = %p, "
					    "idx = %d, ret = %x",
					    (void *)grp, pmac_idx, ret);
					return (ret);
				}
			}
		}

		for (qidx = 0; qidx < grp->num_rings; qidx++) {
			mac_ring_intr_set(grp->ring[qidx].rx->handle,
			    dev->htable[grp->ring[qidx].rx->cq->eq->idx]);
			(void) oce_start_rq(grp->ring[qidx].rx);
		}
		grp->state &= ~GROUP_SUSPEND;
	}
	return (ret);
}
Пример #4
0
int
oce_create_nw_interface(struct oce_dev *dev)
{
	int ret;
	uint32_t capab_flags = OCE_CAPAB_FLAGS;
	uint32_t capab_en_flags = OCE_CAPAB_ENABLE;

	if (dev->rss_enable) {
		capab_flags |= MBX_RX_IFACE_FLAGS_RSS;
		capab_en_flags |= MBX_RX_IFACE_FLAGS_RSS;
	}

	/* create an interface for the device with out mac */
	ret = oce_if_create(dev, capab_flags, capab_en_flags,
	    0, &dev->mac_addr[0], (uint32_t *)&dev->if_id);
	if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Interface creation failed: 0x%x", ret);
		return (ret);
	}
	atomic_inc_32(&dev->nifs);

	dev->if_cap_flags = capab_en_flags;

	/* Enable VLAN Promisc on HW */
	ret = oce_config_vlan(dev, (uint8_t)dev->if_id, NULL, 0,
	    B_TRUE, B_TRUE);
	if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Config vlan failed: %d", ret);
		oce_delete_nw_interface(dev);
		return (ret);

	}

	/* set default flow control */
	ret = oce_set_flow_control(dev, dev->flow_control);
	if (ret != 0) {
		oce_log(dev, CE_NOTE, MOD_CONFIG,
		    "Set flow control failed: %d", ret);
	}
	ret = oce_set_promiscuous(dev, dev->promisc);

	if (ret != 0) {
		oce_log(dev, CE_NOTE, MOD_CONFIG,
		    "Set Promisc failed: %d", ret);
	}

	return (0);
}
Пример #5
0
/*
 * function to copy the packet or dma map on the fly depending on size
 *
 * wq - pointer to WQ
 * wqed - Pointer to WQE descriptor
 * mp - Pointer to packet chain
 *
 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 */
static  int
oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
    uint32_t pkt_len)
{
	ddi_dma_cookie_t cookie;
	oce_wq_mdesc_t *wqmd;
	uint32_t ncookies;
	int ret;
	struct oce_dev *dev = wq->parent;

	wqmd = oce_wqm_alloc(wq);
	if (wqmd == NULL) {
		oce_log(dev, CE_WARN, MOD_TX, "%s",
		    "wqm pool empty");
		return (ENOMEM);
	}

	ret = ddi_dma_addr_bind_handle(wqmd->dma_handle,
	    (struct as *)0, (caddr_t)mp->b_rptr,
	    pkt_len, DDI_DMA_WRITE | DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		oce_log(dev, CE_WARN, MOD_TX, "MAP FAILED %d",
		    ret);
		/* free the last one */
		oce_wqm_free(wq, wqmd);
		return (ENOMEM);
	}
	do {
		wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi =
		    ADDR_HI(cookie.dmac_laddress);
		wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo =
		    ADDR_LO(cookie.dmac_laddress);
		wqed->frag[wqed->frag_idx].u0.s.frag_len =
		    (uint32_t)cookie.dmac_size;
		wqed->frag_cnt++;
		wqed->frag_idx++;
		if (--ncookies > 0)
			ddi_dma_nextcookie(wqmd->dma_handle,
			    &cookie);
			else break;
	} while (ncookies > 0);

	wqed->hdesc[wqed->nhdl].hdl = (void *)wqmd;
	wqed->hdesc[wqed->nhdl].type = MAPPED_WQE;
	wqed->nhdl++;
	return (0);
} /* oce_map_wqe */
Пример #6
0
/*
 * function to create a ring buffer
 *
 * dev - software handle to the device
 * num_items - number of items in the ring
 * item_size - size of an individual item in the ring
 * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING for ring memory
 *
 * return pointer to a ring_buffer structure, NULL on failure
 */
oce_ring_buffer_t *
create_ring_buffer(struct oce_dev *dev,
    uint32_t num_items, uint32_t item_size, uint32_t flags)
{
	oce_ring_buffer_t *ring;
	uint32_t size;

	/* allocate the ring buffer */
	ring = kmem_zalloc(sizeof (oce_ring_buffer_t), KM_NOSLEEP);
	if (ring == NULL) {
		return (NULL);
	}

	/* get the dbuf defining the ring */
	size = num_items * item_size;
	ring->dbuf = oce_alloc_dma_buffer(dev, size, NULL, flags);
	if (ring->dbuf  == NULL) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Ring buffer allocation failed");
		goto dbuf_fail;
	}

	/* fill the rest of the ring */
	ring->num_items = num_items;
	ring->item_size = item_size;
	ring->num_used  = 0;
	return (ring);

dbuf_fail:
	kmem_free(ring, sizeof (oce_ring_buffer_t));
	return (NULL);
} /* create_ring_buffer */
Пример #7
0
/*
 * function to process a single packet
 *
 * dev - software handle to the device
 * rq - pointer to the RQ to charge
 * cqe - Pointer to Completion Q entry
 *
 * return mblk pointer =>  success, NULL  => error
 */
static inline mblk_t *
oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
{
	mblk_t *mp;
	int pkt_len;
	int32_t frag_cnt = 0;
	mblk_t **mblk_tail;
	mblk_t	*mblk_head;
	int frag_size;
	oce_rq_bdesc_t *rqbd;
	uint16_t cur_index;
	oce_ring_buffer_t *ring;
	int i;

	frag_cnt  = cqe->u0.s.num_fragments & 0x7;
	mblk_head = NULL;
	mblk_tail = &mblk_head;

	ring = rq->ring;
	cur_index = ring->cidx;

	/* Get the relevant Queue pointers */
	pkt_len = cqe->u0.s.pkt_size;
	for (i = 0; i < frag_cnt; i++) {
		rqbd = rq->shadow_ring[cur_index];
		if (rqbd->mp == NULL) {
			rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
			    rqbd->rqb->size, 0, &rqbd->fr_rtn);
			if (rqbd->mp == NULL) {
				return (NULL);
			}

			rqbd->mp->b_rptr =
			    (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
		}

		mp = rqbd->mp;
		frag_size  = (pkt_len > rq->cfg.frag_size) ?
		    rq->cfg.frag_size : pkt_len;
		mp->b_wptr = mp->b_rptr + frag_size;
		pkt_len   -= frag_size;
		mp->b_next = mp->b_cont = NULL;
		/* Chain the message mblks */
		*mblk_tail = mp;
		mblk_tail = &mp->b_cont;
		(void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
		cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
	}

	if (mblk_head == NULL) {
		oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
		return (NULL);
	}

	/* replace the buffer with new ones */
	(void) oce_rq_charge(rq, frag_cnt, B_FALSE);
	atomic_add_32(&rq->pending, frag_cnt);
	return (mblk_head);
} /* oce_rx */
Пример #8
0
int
oce_group_addmac(void *group_handle, const uint8_t *mac)
{
	oce_group_t *grp = group_handle;
	struct oce_dev *dev;
	int pmac_index = 0;
	int ret;

	dev = grp->parent;

	oce_log(dev, CE_NOTE, MOD_CONFIG,
	    "oce_group_addmac , grp_type = %d, grp_num = %d, "
	    "mac = %x:%x:%x:%x:%x:%x",
	    grp->grp_type, grp->grp_num, mac[0], mac[1], mac[2],
	    mac[3], mac[4], mac[5]);

	while ((pmac_index < OCE_MAX_PMAC_PER_GRP) &&
	    (grp->pmac_ids[pmac_index] != INVALID_PMAC_ID)) {
		pmac_index++;
	}
	if ((pmac_index >= OCE_MAX_PMAC_PER_GRP) ||
	    (grp->num_pmac >= OCE_MAX_PMAC_PER_GRP) ||
	    (dev->num_pmac >= OCE_MAX_SMAC_PER_DEV)) {
		oce_log(dev, CE_NOTE, MOD_CONFIG,
		    "PMAC exceeding limits, num_pmac=%d, num_pmac=%d, index=%d",
		    grp->num_pmac, dev->num_pmac, pmac_index);
		return (ENOSPC);
	}

	/* Add the New MAC */
	ret = oce_add_mac(dev, grp->if_id, mac, &grp->pmac_ids[pmac_index],
	    MBX_BOOTSTRAP);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "MAC addition failed ");
		return (EIO);
	}

	grp->num_pmac++;
	dev->num_pmac++;
	bcopy(mac, &grp->mac_addr[pmac_index], ETHERADDRL);
	return (0);
}
Пример #9
0
int
oce_setup_adapter(struct oce_dev *dev)
{
	int ret;
	char itbl[OCE_ITBL_SIZE];
	char hkey[OCE_HKEY_SIZE];

	/* disable the interrupts here and enable in start */
	oce_chip_di(dev);

	ret = oce_create_nw_interface(dev);
	if (ret != DDI_SUCCESS) {
		return (DDI_FAILURE);
	}
	ret = oce_create_queues(dev);
	if (ret != DDI_SUCCESS) {
		oce_delete_nw_interface(dev);
		return (DDI_FAILURE);
	}
	if (dev->rss_enable) {
		(void) oce_create_itbl(dev, itbl);
		(void) oce_gen_hkey(hkey, OCE_HKEY_SIZE);
		ret = oce_config_rss(dev, dev->if_id, hkey, itbl, OCE_ITBL_SIZE,
		    OCE_DEFAULT_RSS_TYPE, B_TRUE);
		if (ret != DDI_SUCCESS) {
			oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
			    "Failed to Configure RSS");
			oce_delete_queues(dev);
			oce_delete_nw_interface(dev);
			return (ret);
		}
	}
	ret = oce_setup_handlers(dev);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
		    "Failed to Setup handlers");
		oce_delete_queues(dev);
		oce_delete_nw_interface(dev);
		return (ret);
	}
	return (DDI_SUCCESS);
}
Пример #10
0
int
oce_group_remmac(void *group_handle, const uint8_t *mac)
{
	oce_group_t *grp = group_handle;
	struct oce_dev *dev;
	int ret;
	int pmac_index = 0;

	dev = grp->parent;

	while ((pmac_index < OCE_MAX_PMAC_PER_GRP)) {
		if (bcmp(mac, &grp->mac_addr[pmac_index], ETHERADDRL) == 0) {
			break;
		}
		pmac_index++;
	}

	if (pmac_index >= OCE_MAX_PMAC_PER_GRP) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Could not find the MAC: %x:%x:%x:%x:%x:%x",
		    mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
		return (EINVAL);
	}

	/* Delete previous one */
	ret = oce_del_mac(dev, grp->if_id, &grp->pmac_ids[pmac_index],
	    MBX_BOOTSTRAP);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Failed to delete MAC: %x:%x:%x:%x:%x:%x, ret=0x%x",
		    mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], ret);
		return (EIO);
	}

	grp->num_pmac--;
	dev->num_pmac--;
	grp->pmac_ids[pmac_index] = INVALID_PMAC_ID;
	bzero(&grp->mac_addr[pmac_index], ETHERADDRL);
	return (0);
}
Пример #11
0
void
oce_stop_group(oce_group_t *grp, boolean_t free_buffer)
{
	struct oce_dev *dev = grp->parent;
	struct oce_rq *rq;
	int qidx;
	int pending = 0;

	if (grp->state & GROUP_MAC_STARTED) {
		oce_delete_group(dev, grp);
		/* wait for receive buffers to be freed by stack */
		while (oce_check_pending(grp) != 0) {
			oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
			    "Wait if buffers are pending with stack\n");
			if (pending++ >= 2) {
				break;
			}
		}
	}

	/* free Rx buffers */
	if (free_buffer && (grp->state & GROUP_INIT)) {
		for (qidx = 0; qidx < grp->num_rings; qidx++) {
			rq = grp->ring[qidx].rx;
			mutex_enter(&rq->rq_fini_lock);
			if (rq->pending == 0) {
				if (rq->qstate == QDELETED) {
					oce_rq_fini(dev, rq);
				}
			} else {
				rq->qstate = QFINI_PENDING;
			}
			mutex_exit(&rq->rq_fini_lock);
		}
		grp->state &= ~GROUP_INIT;
	}
	oce_log(dev, CE_NOTE, MOD_CONFIG, "group %d stopped", grp->grp_num);
}
Пример #12
0
/*
 * function to trigger a POST on the device
 *
 * dev - software handle to the device
 *
 */
int
oce_POST(struct oce_dev *dev)
{
	mpu_ep_semaphore_t post_status;
	clock_t tmo;
	clock_t earlier = ddi_get_lbolt();

	/* read semaphore CSR */
	post_status.dw0 = OCE_CSR_READ32(dev, MPU_EP_SEMAPHORE);
	if (oce_fm_check_acc_handle(dev, dev->csr_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
		return (DDI_FAILURE);
	}
	/* if host is ready then wait for fw ready else send POST */
	if (post_status.bits.stage <= POST_STAGE_AWAITING_HOST_RDY) {
		post_status.bits.stage = POST_STAGE_CHIP_RESET;
		OCE_CSR_WRITE32(dev, MPU_EP_SEMAPHORE, post_status.dw0);
		if (oce_fm_check_acc_handle(dev, dev->csr_handle) !=
		    DDI_FM_OK) {
			ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
			return (DDI_FAILURE);
		}
	}

	/* wait for FW ready */
	tmo = drv_usectohz(60000000); /* 1.0min */
	for (;;) {
		if ((ddi_get_lbolt() - earlier) > tmo) {
			tmo = 0;
			break;
		}

		post_status.dw0 = OCE_CSR_READ32(dev, MPU_EP_SEMAPHORE);
		if (oce_fm_check_acc_handle(dev, dev->csr_handle) !=
		    DDI_FM_OK) {
			ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
			return (DDI_FAILURE);
		}
		if (post_status.bits.error) {
			oce_log(dev, CE_WARN, MOD_CONFIG,
			    "0x%x POST ERROR!!", post_status.dw0);
			return (DDI_FAILURE);
		}
		if (post_status.bits.stage == POST_STAGE_ARMFW_READY)
			return (DDI_SUCCESS);

		drv_usecwait(100);
	}
	return (DDI_FAILURE);
} /* oce_POST */
Пример #13
0
/*
 * function to copy the packet to preallocated Tx buffer
 *
 * wq - pointer to WQ
 * wqed - Pointer to WQE descriptor
 * mp - Pointer to packet chain
 * pktlen - Size of the packet
 *
 * return 0=>success, error code otherwise
 */
static int
oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
    uint32_t pkt_len)
{
	oce_wq_bdesc_t *wqbd;
	caddr_t buf_va;
	struct oce_dev *dev = wq->parent;
	int len = 0;

	wqbd = oce_wqb_alloc(wq);
	if (wqbd == NULL) {
		atomic_inc_32(&dev->tx_noxmtbuf);
		oce_log(dev, CE_WARN, MOD_TX, "%s",
		    "wqb pool empty");
		return (ENOMEM);
	}

	/* create a fragment wqe for the packet */
	wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi = wqbd->frag_addr.dw.addr_hi;
	wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo = wqbd->frag_addr.dw.addr_lo;
	buf_va = DBUF_VA(wqbd->wqb);

	/* copy pkt into buffer */
	for (len = 0; mp != NULL && len < pkt_len; mp = mp->b_cont) {
		bcopy(mp->b_rptr, buf_va, MBLKL(mp));
		buf_va += MBLKL(mp);
		len += MBLKL(mp);
	}

	(void) ddi_dma_sync(DBUF_DHDL(wqbd->wqb), 0, pkt_len,
	    DDI_DMA_SYNC_FORDEV);

	if (oce_fm_check_dma_handle(dev, DBUF_DHDL(wqbd->wqb))) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
		/* Free the buffer */
		oce_wqb_free(wq, wqbd);
		return (EIO);
	}
	wqed->frag[wqed->frag_idx].u0.s.frag_len   =  pkt_len;
	wqed->hdesc[wqed->nhdl].hdl = (void *)(wqbd);
	wqed->hdesc[wqed->nhdl].type = COPY_WQE;
	wqed->frag_cnt++;
	wqed->frag_idx++;
	wqed->nhdl++;
	return (0);
} /* oce_bcopy_wqe */
Пример #14
0
void
oce_unsetup_adapter(struct oce_dev *dev)
{
	oce_remove_handler(dev);
	if (dev->rss_enable) {
		char itbl[OCE_ITBL_SIZE] = {0};
		char hkey[OCE_HKEY_SIZE] = {0};
		int ret = 0;

		ret = oce_config_rss(dev, dev->if_id, hkey, itbl, OCE_ITBL_SIZE,
		    RSS_ENABLE_NONE, B_TRUE);

		if (ret != DDI_SUCCESS) {
			oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
			    "Failed to Disable RSS");
		}
	}
	oce_delete_queues(dev);
	oce_delete_nw_interface(dev);
}
Пример #15
0
/*
 * function to do a soft reset on the device
 *
 * dev - software handle to the device
 *
 */
int
oce_pci_soft_reset(struct oce_dev *dev)
{
	pcicfg_soft_reset_t soft_rst;
	/* struct mpu_ep_control ep_control; */
	/* struct pcicfg_online1 online1; */
	clock_t tmo;
	clock_t earlier = ddi_get_lbolt();

	ASSERT(dev != NULL);

	/* issue soft reset */
	soft_rst.dw0 = OCE_CFG_READ32(dev, PCICFG_SOFT_RESET);
	soft_rst.bits.soft_reset = 0x01;
	OCE_CFG_WRITE32(dev, PCICFG_SOFT_RESET, soft_rst.dw0);

	/* wait till soft reset bit deasserts */
	tmo = drv_usectohz(60000000); /* 1.0min */
	do {
		if ((ddi_get_lbolt() - earlier) > tmo) {
			tmo = 0;
			break;
		}

		soft_rst.dw0 = OCE_CFG_READ32(dev, PCICFG_SOFT_RESET);
		if (soft_rst.bits.soft_reset)
			drv_usecwait(100);
	} while (soft_rst.bits.soft_reset);

	if (soft_rst.bits.soft_reset) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "0x%x soft_reset"
		    "bit asserted[1]. Reset failed",
		    soft_rst.dw0);
		return (DDI_FAILURE);
	}

	return (oce_POST(dev));
} /* oce_pci_soft_reset */
Пример #16
0
/*
 * function to setup the kstat_t structure for the device and install it
 *
 * dev - software handle to the device
 *
 * return DDI_SUCCESS => success, failure otherwise
 */
int
oce_stat_init(struct oce_dev *dev)
{
	struct oce_stat *stats;
	uint32_t num_stats = sizeof (struct oce_stat) /
	    sizeof (kstat_named_t);

	/* allocate the kstat */
	dev->oce_kstats = kstat_create(OCE_MOD_NAME, dev->dev_id, "stats",
	    "net", KSTAT_TYPE_NAMED,
	    num_stats, 0);
	if (dev->oce_kstats == NULL) {
		oce_log(dev, CE_NOTE, MOD_CONFIG,
		    "kstat creation failed: 0x%p",
		    (void *)dev->oce_kstats);
		return (DDI_FAILURE);
	}

	/* allocate the device copy of the stats */
	dev->stats_dbuf = oce_alloc_dma_buffer(dev,
	    sizeof (struct mbx_get_nic_stats),
	    NULL, DDI_DMA_CONSISTENT);
	if (dev->stats_dbuf == NULL) {
		oce_log(dev, CE_NOTE, MOD_CONFIG,
		    "Could not allocate stats_dbuf: %p",
		    (void *)dev->stats_dbuf);
		kstat_delete(dev->oce_kstats);
		return (DDI_FAILURE);
	}
	dev->hw_stats = (struct mbx_get_nic_stats *)DBUF_VA(dev->stats_dbuf);

	/* initialize the counters */
	stats = (struct oce_stat *)dev->oce_kstats->ks_data;
	kstat_named_init(&stats->rx_bytes_hi, "rx bytes msd", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_bytes_lo, "rx bytes lsd", KSTAT_DATA_ULONG);

	kstat_named_init(&stats->rx_frames, "rx frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_errors, "rx errors", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_drops, "rx drops", KSTAT_DATA_ULONG);

	kstat_named_init(&stats->tx_bytes_hi, "tx bytes msd", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->tx_bytes_lo, "tx bytes lsd", KSTAT_DATA_ULONG);

	kstat_named_init(&stats->tx_frames, "tx frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->tx_errors, "tx errors", KSTAT_DATA_ULONG);

	kstat_named_init(&stats->rx_unicast_frames,
	    "rx unicast frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_multicast_frames,
	    "rx multicast frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_broadcast_frames,
	    "rx broadcast frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_crc_errors,
	    "rx crc errors", KSTAT_DATA_ULONG);

	kstat_named_init(&stats->rx_alignment_symbol_errors,
	    "rx alignment symbol errors", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_in_range_errors,
	    "rx in range errors", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_out_range_errors,
	    "rx out range errors", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_frame_too_long,
	    "rx frame too long", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_address_match_errors,
	    "rx address match errors", KSTAT_DATA_ULONG);

	kstat_named_init(&stats->rx_pause_frames,
	    "rx pause frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_control_frames,
	    "rx control frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_ip_checksum_errs,
	    "rx ip checksum errors", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_tcp_checksum_errs,
	    "rx tcp checksum errors", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_udp_checksum_errs,
	    "rx udp checksum errors", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_fifo_overflow,
	    "rx fifo overflow", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_input_fifo_overflow,
	    "rx input fifo overflow", KSTAT_DATA_ULONG);

	kstat_named_init(&stats->tx_unicast_frames,
	    "tx unicast frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->tx_multicast_frames,
	    "tx multicast frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->tx_broadcast_frames,
	    "tx broadcast frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->tx_pause_frames,
	    "tx pause frames", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->tx_control_frames,
	    "tx control frames", KSTAT_DATA_ULONG);


	kstat_named_init(&stats->rx_drops_no_pbuf,
	    "rx_drops_no_pbuf", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_drops_no_txpb,
	    "rx_drops_no_txpb", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_drops_no_erx_descr,
	    "rx_drops_no_erx_descr", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_drops_no_tpre_descr,
	    "rx_drops_no_tpre_descr", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_drops_too_many_frags,
	    "rx_drops_too_many_frags", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_drops_invalid_ring,
	    "rx_drops_invalid_ring", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_drops_mtu,
	    "rx_drops_mtu", KSTAT_DATA_ULONG);

	kstat_named_init(&stats->rx_dropped_too_small,
	    "rx_dropped_too_small", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_dropped_too_short,
	    "rx_dropped_too_short", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_dropped_header_too_small,
	    "rx_dropped_header_too_small", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_dropped_tcp_length,
	    "rx_dropped_tcp_length", KSTAT_DATA_ULONG);
	kstat_named_init(&stats->rx_dropped_runt,
	    "rx_dropped_runt", KSTAT_DATA_ULONG);

	kstat_named_init(&stats->rx_drops_no_fragments,
	    "rx_drop_no_frag", KSTAT_DATA_ULONG);


	dev->oce_kstats->ks_update = oce_update_stats;
	dev->oce_kstats->ks_private = (void *)dev;
	kstat_install(dev->oce_kstats);

	return (DDI_SUCCESS);
} /* oce_stat_init */
Пример #17
0
/*
 * function to allocate a dma buffer for mapping memory va-pa
 *
 * dev - software handle to device
 * size - size of the memory to map
 * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING
 *
 * return pointer to a oce_dma_buf_t structure handling the map
 *      NULL => failure
 */
oce_dma_buf_t *
oce_alloc_dma_buffer(struct oce_dev *dev,
    uint32_t size, ddi_dma_attr_t *dma_attr, uint32_t flags)
{
	oce_dma_buf_t  *dbuf;
	ddi_dma_cookie_t cookie;
	uint32_t count;
	size_t actual_len;
	int ret = 0;

	ASSERT(size > 0);
	/* if NULL use default */
	if (dma_attr == NULL) {
		dma_attr = &oce_dma_buf_attr;
	}

	dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_NOSLEEP);
	if (dbuf == NULL) {
		return (NULL);
	}

	/* allocate dma handle */
	ret = ddi_dma_alloc_handle(dev->dip, dma_attr,
	    DDI_DMA_DONTWAIT, NULL, &dbuf->dma_handle);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Failed to allocate DMA handle");
		goto handle_fail;
	}
	/* allocate the DMA-able memory */
	ret = ddi_dma_mem_alloc(dbuf->dma_handle, size, &oce_dma_buf_accattr,
	    flags, DDI_DMA_DONTWAIT, NULL, &dbuf->base,
	    &actual_len, &dbuf->acc_handle);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Failed to allocate DMA memory");
		goto alloc_fail;
	}

	/* bind handle */
	ret = ddi_dma_addr_bind_handle(dbuf->dma_handle,
	    (struct as *)0, dbuf->base, actual_len,
	    DDI_DMA_RDWR | flags,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &count);
	if (ret != DDI_DMA_MAPPED) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "Failed to bind dma handle");
		goto bind_fail;
	}
	bzero(dbuf->base, actual_len);
	dbuf->addr = cookie.dmac_laddress;
	dbuf->size = actual_len;
	/* usable length */
	dbuf->len  = size;
	dbuf->num_pages = OCE_NUM_PAGES(size);
	return (dbuf);

bind_fail:
	ddi_dma_mem_free(&dbuf->acc_handle);
alloc_fail:
	ddi_dma_free_handle(&dbuf->dma_handle);
handle_fail:
	kmem_free(dbuf, sizeof (oce_dma_buf_t));
	return (NULL);
} /* oce_dma_alloc_buffer */
Пример #18
0
int
oce_hw_init(struct oce_dev *dev)
{
	int  ret;
	struct mac_address_format mac_addr;

	ret = oce_POST(dev);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "!!!HW POST1 FAILED");
		/* ADD FM FAULT */
		return (DDI_FAILURE);
	}
	/* create bootstrap mailbox */
	dev->bmbx = oce_alloc_dma_buffer(dev,
	    sizeof (struct oce_bmbx), NULL, DDI_DMA_CONSISTENT);
	if (dev->bmbx == NULL) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Failed to allocate bmbx: size = %u",
		    (uint32_t)sizeof (struct oce_bmbx));
		return (DDI_FAILURE);
	}

	ret = oce_reset_fun(dev);
	if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
		    "!!!FUNCTION RESET FAILED");
		goto init_fail;
	}

	/* reset the Endianess of BMBX */
	ret = oce_mbox_init(dev);
	if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Mailbox initialization2 Failed with %d", ret);
		goto init_fail;
	}

	/* read the firmware version */
	ret = oce_get_fw_version(dev);
	if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Firmaware version read failed with %d", ret);
		goto init_fail;
	}

	/* read the fw config */
	ret = oce_get_fw_config(dev);
	if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Firmware configuration read failed with %d", ret);
		goto init_fail;
	}

	/* read the Factory MAC address */
	ret = oce_read_mac_addr(dev, 0, 1,
	    MAC_ADDRESS_TYPE_NETWORK, &mac_addr);
	if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "MAC address read failed with %d", ret);
		goto init_fail;
	}
	bcopy(&mac_addr.mac_addr[0], &dev->mac_addr[0], ETHERADDRL);
	return (DDI_SUCCESS);
init_fail:
	oce_hw_fini(dev);
	return (DDI_FAILURE);
}
Пример #19
0
static int
oce_map_regs(struct oce_dev *dev)
{
	int ret = 0;
	off_t bar_size = 0;

	ASSERT(NULL != dev);
	ASSERT(NULL != dev->dip);

	/* get number of supported bars */
	ret = ddi_dev_nregs(dev->dip, &dev->num_bars);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "%d: could not retrieve num_bars", MOD_CONFIG);
		return (DDI_FAILURE);
	}

	/* verify each bar and map it accordingly */
	/* PCI CFG */
	ret = ddi_dev_regsize(dev->dip, OCE_DEV_CFG_BAR, &bar_size);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Could not get sizeof BAR %d",
		    OCE_DEV_CFG_BAR);
		return (DDI_FAILURE);
	}

	ret = ddi_regs_map_setup(dev->dip, OCE_DEV_CFG_BAR, &dev->dev_cfg_addr,
	    0, bar_size, &reg_accattr, &dev->dev_cfg_handle);

	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Could not map bar %d",
		    OCE_DEV_CFG_BAR);
		return (DDI_FAILURE);
	}

	/* CSR */
	ret = ddi_dev_regsize(dev->dip, OCE_PCI_CSR_BAR, &bar_size);

	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Could not get sizeof BAR %d",
		    OCE_PCI_CSR_BAR);
		return (DDI_FAILURE);
	}

	ret = ddi_regs_map_setup(dev->dip, OCE_PCI_CSR_BAR, &dev->csr_addr,
	    0, bar_size, &reg_accattr, &dev->csr_handle);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Could not map bar %d",
		    OCE_PCI_CSR_BAR);
		ddi_regs_map_free(&dev->dev_cfg_handle);
		return (DDI_FAILURE);
	}

	/* Doorbells */
	ret = ddi_dev_regsize(dev->dip, OCE_PCI_DB_BAR, &bar_size);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "%d Could not get sizeof BAR %d",
		    ret, OCE_PCI_DB_BAR);
		ddi_regs_map_free(&dev->csr_handle);
		ddi_regs_map_free(&dev->dev_cfg_handle);
		return (DDI_FAILURE);
	}

	ret = ddi_regs_map_setup(dev->dip, OCE_PCI_DB_BAR, &dev->db_addr,
	    0, 0, &reg_accattr, &dev->db_handle);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Could not map bar %d", OCE_PCI_DB_BAR);
		ddi_regs_map_free(&dev->csr_handle);
		ddi_regs_map_free(&dev->dev_cfg_handle);
		return (DDI_FAILURE);
	}
	return (DDI_SUCCESS);
}
Пример #20
0
int
oce_m_multicast(void *arg, boolean_t add, const uint8_t *mca)
{
	struct oce_dev *dev = (struct oce_dev *)arg;
	struct ether_addr  *mca_drv_list;
	struct ether_addr  mca_hw_list[OCE_MAX_MCA];
	uint16_t new_mcnt = dev->num_mca;
	int ret;
	int i;

	/* check the address */
	if ((mca[0] & 0x1) == 0) {
		return (EINVAL);
	}
	/* Allocate the local array for holding the addresses temporarily */
	bzero(&mca_hw_list, sizeof (&mca_hw_list));
	mca_drv_list = &dev->multi_cast[0];

	DEV_LOCK(dev);
	if (add) {
		/* check if we exceeded hw max  supported */
		if (new_mcnt < OCE_MAX_MCA) {
			/* copy entire dev mca to the mbx */
			bcopy((void*)mca_drv_list,
			    (void*)mca_hw_list,
			    (dev->num_mca * sizeof (struct ether_addr)));
			/* Append the new one to local list */
			bcopy(mca, &mca_hw_list[dev->num_mca],
			    sizeof (struct ether_addr));
		}
		new_mcnt++;
	} else {
		struct ether_addr *hwlistp = &mca_hw_list[0];
		for (i = 0; i < dev->num_mca; i++) {
			/* copy only if it does not match */
			if (bcmp((mca_drv_list + i), mca, ETHERADDRL)) {
				bcopy(mca_drv_list + i, hwlistp,
				    ETHERADDRL);
				hwlistp++;
			} else {
				new_mcnt--;
			}
		}
	}

	if (dev->suspended) {
		goto finish;
	}
	if (new_mcnt > OCE_MAX_MCA) {
		ret = oce_set_multicast_table(dev, dev->if_id, &mca_hw_list[0],
		    OCE_MAX_MCA, B_TRUE);
	} else {
		ret = oce_set_multicast_table(dev, dev->if_id,
		    &mca_hw_list[0], new_mcnt, B_FALSE);
	}
		if (ret != 0) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "mcast %s fails", add ? "ADD" : "DEL");
		DEV_UNLOCK(dev);
		return (EIO);
	}
	/*
	 *  Copy the local structure to dev structure
	 */
finish:
	if (new_mcnt && new_mcnt <= OCE_MAX_MCA) {
		bcopy(mca_hw_list, mca_drv_list,
		    new_mcnt * sizeof (struct ether_addr));

		dev->num_mca = (uint16_t)new_mcnt;
	}
	DEV_UNLOCK(dev);
	oce_log(dev, CE_NOTE, MOD_CONFIG,
	    "mcast %s, addr=%02x:%02x:%02x:%02x:%02x:%02x, num_mca=%d",
	    add ? "ADD" : "DEL",
	    mca[0], mca[1], mca[2], mca[3], mca[4], mca[5],
	    dev->num_mca);
	return (0);
} /* oce_m_multicast */
Пример #21
0
/*
 * function called by kstat to update the stats counters
 *
 * ksp - pointer to the kstats structure
 * rw - flags defining read/write
 *
 * return DDI_SUCCESS => success, failure otherwise
 */
static int
oce_update_stats(kstat_t *ksp, int rw)
{
	struct oce_dev *dev;
	struct oce_stat *stats;
	struct rx_port_stats *port_stats;
	int ret;

	if (rw == KSTAT_WRITE) {
		return (EACCES);
	}

	dev = ksp->ks_private;
	stats = (struct oce_stat *)ksp->ks_data;
	port_stats = &dev->hw_stats->params.rsp.rx.port[dev->port_id];

	mutex_enter(&dev->dev_lock);
	if (dev->suspended) {
		mutex_exit(&dev->dev_lock);
		return (EIO);
	}
	ret = oce_get_hw_stats(dev);
	if (ret != DDI_SUCCESS) {
		oce_log(dev, CE_WARN, MOD_CONFIG,
		    "Failed to get stats:%d", ret);
		mutex_exit(&dev->dev_lock);
		return (EIO);
	}

	/* update the stats */
	stats->rx_bytes_lo.value.ul = port_stats->rx_bytes_lsd;
	stats->rx_bytes_hi.value.ul = port_stats->rx_bytes_msd;

	stats->rx_frames.value.ul = port_stats->rx_total_frames;
	stats->rx_errors.value.ul = port_stats->rx_crc_errors +
	    port_stats->rx_alignment_symbol_errors +
	    port_stats->rx_in_range_errors +
	    port_stats->rx_out_range_errors +
	    port_stats->rx_frame_too_long +
	    port_stats->rx_ip_checksum_errs +
	    port_stats->rx_tcp_checksum_errs +
	    port_stats->rx_udp_checksum_errs;

	stats->rx_drops.value.ul = port_stats->rx_dropped_too_small +
	    port_stats->rx_dropped_too_short +
	    port_stats->rx_dropped_header_too_small +
	    port_stats->rx_dropped_tcp_length +
	    port_stats->rx_dropped_runt;

	stats->tx_bytes_lo.value.ul = port_stats->tx_bytes_lsd;
	stats->tx_bytes_hi.value.ul = port_stats->tx_bytes_msd;

	stats->tx_frames.value.ul = port_stats->tx_unicast_frames +
	    port_stats->tx_multicast_frames +
	    port_stats->tx_broadcast_frames +
	    port_stats->tx_pause_frames +
	    port_stats->tx_control_frames;
	stats->tx_errors.value.ul = dev->tx_errors;

	stats->rx_unicast_frames.value.ul =
	    port_stats->rx_unicast_frames;
	stats->rx_multicast_frames.value.ul =
	    port_stats->rx_multicast_frames;
	stats->rx_broadcast_frames.value.ul =
	    port_stats->rx_broadcast_frames;
	stats->rx_crc_errors.value.ul =
	    port_stats->rx_crc_errors;

	stats->rx_alignment_symbol_errors.value.ul =
	    port_stats->rx_alignment_symbol_errors;
	stats->rx_in_range_errors.value.ul =
	    port_stats->rx_in_range_errors;
	stats->rx_out_range_errors.value.ul =
	    port_stats->rx_out_range_errors;
	stats->rx_frame_too_long.value.ul =
	    port_stats->rx_frame_too_long;
	stats->rx_address_match_errors.value.ul =
	    port_stats->rx_address_match_errors;

	stats->rx_pause_frames.value.ul =
	    port_stats->rx_pause_frames;
	stats->rx_control_frames.value.ul =
	    port_stats->rx_control_frames;
	stats->rx_ip_checksum_errs.value.ul =
	    port_stats->rx_ip_checksum_errs;
	stats->rx_tcp_checksum_errs.value.ul =
	    port_stats->rx_tcp_checksum_errs;
	stats->rx_udp_checksum_errs.value.ul =
	    port_stats->rx_udp_checksum_errs;
	stats->rx_fifo_overflow.value.ul = port_stats->rx_fifo_overflow;
	stats->rx_input_fifo_overflow.value.ul =
	    port_stats->rx_input_fifo_overflow;

	stats->tx_unicast_frames.value.ul =
	    port_stats->tx_unicast_frames;
	stats->tx_multicast_frames.value.ul =
	    port_stats->tx_multicast_frames;
	stats->tx_broadcast_frames.value.ul =
	    port_stats->tx_broadcast_frames;
	stats->tx_pause_frames.value.ul =
	    port_stats->tx_pause_frames;
	stats->tx_control_frames.value.ul =
	    port_stats->tx_control_frames;


	stats->rx_drops_no_pbuf.value.ul =
	    dev->hw_stats->params.rsp.rx.rx_drops_no_pbuf;
	stats->rx_drops_no_txpb.value.ul =
	    dev->hw_stats->params.rsp.rx.rx_drops_no_txpb;
	stats->rx_drops_no_erx_descr.value.ul =
	    dev->hw_stats->params.rsp.rx.rx_drops_no_erx_descr;
	stats->rx_drops_no_tpre_descr.value.ul =
	    dev->hw_stats->params.rsp.rx.rx_drops_no_tpre_descr;
	stats->rx_drops_too_many_frags.value.ul =
	    dev->hw_stats->params.rsp.rx.rx_drops_too_many_frags;
	stats->rx_drops_invalid_ring.value.ul =
	    dev->hw_stats->params.rsp.rx.rx_drops_invalid_ring;
	stats->rx_drops_mtu.value.ul =
	    dev->hw_stats->params.rsp.rx.rx_drops_mtu;

	stats->rx_dropped_too_small.value.ul =
	    port_stats->rx_dropped_too_small;
	stats->rx_dropped_too_short.value.ul =
	    port_stats->rx_dropped_too_short;
	stats->rx_dropped_header_too_small.value.ul =
	    port_stats->rx_dropped_header_too_small;
	stats->rx_dropped_tcp_length.value.ul =
	    port_stats->rx_dropped_tcp_length;
	stats->rx_dropped_runt.value.ul =
	    port_stats->rx_dropped_runt;

	stats->rx_drops_no_fragments.value.ul =
	    dev->hw_stats->params.rsp.err_rx.rx_drops_no_fragments[0];

	mutex_exit(&dev->dev_lock);
	return (DDI_SUCCESS);
} /* oce_update_stats */
Пример #22
0
/*
 * function to handle dlpi streams message from GLDv3 mac layer
 */
void
oce_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
{
	struct oce_dev *dev = arg;
	struct  iocblk *iocp;
	int cmd;
	uint32_t payload_length;
	int ret;

	iocp = (struct iocblk *)voidptr(mp->b_rptr);
	iocp->ioc_error = 0;
	cmd = iocp->ioc_cmd;

	DEV_LOCK(dev);
	if (dev->suspended) {
		miocnak(wq, mp, 0, EINVAL);
		DEV_UNLOCK(dev);
		return;
	}
	DEV_UNLOCK(dev);

	switch (cmd) {

	case OCE_ISSUE_MBOX: {
		ret = oce_issue_mbox(dev, wq, mp, &payload_length);
		miocack(wq, mp, payload_length, ret);
		break;
	}
	case OCE_QUERY_DRIVER_DATA: {
		struct oce_driver_query *drv_query =
		    (struct oce_driver_query *)(void *)mp->b_cont->b_rptr;

		/* if the driver version does not match bail */
		if (drv_query->version != OCN_VERSION_SUPPORTED) {
			oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
			    "One Connect version mismatch");
			miocnak(wq, mp, 0, ENOTSUP);
			break;
		}

		/* fill the return values */
		bcopy(OCE_MOD_NAME, drv_query->driver_name,
		    (sizeof (OCE_MOD_NAME) > 32) ?
		    31 : sizeof (OCE_MOD_NAME));
		drv_query->driver_name[31] = '\0';

		bcopy(OCE_VERSION, drv_query->driver_version,
		    (sizeof (OCE_VERSION) > 32) ? 31 :
		    sizeof (OCE_VERSION));
		drv_query->driver_version[31] = '\0';

		if (dev->num_smac == 0) {
			drv_query->num_smac = 1;
			bcopy(dev->mac_addr, drv_query->smac_addr[0],
			    ETHERADDRL);
		} else {
			drv_query->num_smac = dev->num_smac;
			bcopy(dev->unicast_addr, drv_query->smac_addr[0],
			    ETHERADDRL);
		}

		bcopy(dev->mac_addr, drv_query->pmac_addr, ETHERADDRL);

		payload_length = sizeof (struct oce_driver_query);
		miocack(wq, mp, payload_length, 0);
		break;
	}

	default:
		miocnak(wq, mp, 0, ENOTSUP);
		break;
	}
} /* oce_m_ioctl */