/* * igb_alloc_rcb_lists - Memory allocation for the receive control blocks * of one ring. */ static int igb_alloc_rcb_lists(igb_rx_data_t *rx_data) { int i; int ret; rx_control_block_t *rcb; igb_t *igb = rx_data->rx_ring->igb; dma_buffer_t *rx_buf; uint32_t rcb_count; /* * Allocate memory for the rx control blocks for work list and * free list. */ rcb_count = rx_data->ring_size + rx_data->free_list_size; rcb = rx_data->rcb_area; for (i = 0; i < rcb_count; i++, rcb++) { ASSERT(rcb != NULL); if (i < rx_data->ring_size) { /* Attach the rx control block to the work list */ rx_data->work_list[i] = rcb; } else { /* Attach the rx control block to the free list */ rx_data->free_list[i - rx_data->ring_size] = rcb; } rx_buf = &rcb->rx_buf; ret = igb_alloc_dma_buffer(igb, rx_buf, igb->rx_buf_size); if (ret != IGB_SUCCESS) { igb_log(igb, IGB_LOG_ERROR, "Allocate rx dma buffer failed"); goto alloc_rcb_lists_fail; } rx_buf->size -= IPHDR_ALIGN_ROOM; rx_buf->address += IPHDR_ALIGN_ROOM; rx_buf->dma_address += IPHDR_ALIGN_ROOM; rcb->ref_cnt = 1; rcb->rx_data = (igb_rx_data_t *)rx_data; rcb->free_rtn.free_func = igb_rx_recycle; rcb->free_rtn.free_arg = (char *)rcb; rcb->mp = desballoc((unsigned char *) rx_buf->address, rx_buf->size, 0, &rcb->free_rtn); } return (IGB_SUCCESS); alloc_rcb_lists_fail: igb_free_rcb_lists(rx_data); return (IGB_FAILURE); }
/* * Create and initialize the driver private statistics. */ int igb_init_stats(igb_t *igb) { kstat_t *ks; igb_stat_t *igb_ks; /* * Create and init kstat */ ks = kstat_create(MODULE_NAME, ddi_get_instance(igb->dip), "statistics", "net", KSTAT_TYPE_NAMED, sizeof (igb_stat_t) / sizeof (kstat_named_t), 0); if (ks == NULL) { igb_log(igb, IGB_LOG_ERROR, "Could not create kernel statistics"); return (IGB_FAILURE); } igb->igb_ks = ks; igb_ks = (igb_stat_t *)ks->ks_data; /* * Initialize all the statistics. */ kstat_named_init(&igb_ks->reset_count, "reset_count", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->dout_sync, "DMA_out_sync", KSTAT_DATA_UINT64); #ifdef IGB_DEBUG kstat_named_init(&igb_ks->rx_frame_error, "rx_frame_error", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->rx_cksum_error, "rx_cksum_error", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->rx_exceed_pkt, "rx_exceed_pkt", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_overload, "tx_overload", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_fail_no_tbd, "tx_fail_no_tbd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_fail_no_tcb, "tx_fail_no_tcb", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_fail_dma_bind, "tx_fail_dma_bind", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tx_reschedule, "tx_reschedule", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->gprc, "good_pkts_recvd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->gptc, "good_pkts_xmitd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->gor, "good_octets_recvd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->got, "good_octets_xmitd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc64, "pkts_recvd_( 64b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc127, "pkts_recvd_( 65- 127b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc255, "pkts_recvd_( 127- 255b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc511, "pkts_recvd_( 256- 511b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc1023, "pkts_recvd_( 511-1023b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->prc1522, "pkts_recvd_(1024-1522b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc64, "pkts_xmitd_( 64b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc127, "pkts_xmitd_( 65- 127b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc255, "pkts_xmitd_( 128- 255b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc511, "pkts_xmitd_( 255- 511b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc1023, "pkts_xmitd_( 512-1023b)", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->ptc1522, "pkts_xmitd_(1024-1522b)", KSTAT_DATA_UINT64); #endif kstat_named_init(&igb_ks->symerrs, "recv_symbol_errors", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->mpc, "recv_missed_packets", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->rlec, "recv_length_errors", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->fcruc, "recv_unsupport_FC_pkts", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->rfc, "recv_frag", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tncrs, "xmit_with_no_CRS", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tsctc, "xmit_TCP_seg_contexts", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->tsctfc, "xmit_TCP_seg_contexts_fail", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->xonrxc, "XONs_recvd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->xontxc, "XONs_xmitd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->xoffrxc, "XOFFs_recvd", KSTAT_DATA_UINT64); kstat_named_init(&igb_ks->xofftxc, "XOFFs_xmitd", KSTAT_DATA_UINT64); /* * Function to provide kernel stat update on demand */ ks->ks_update = igb_update_stats; ks->ks_private = (void *)igb; /* * Add kstat to systems kstat chain */ kstat_install(ks); return (IGB_SUCCESS); }
void pci_dump(void *arg) { igb_t *igb = (igb_t *)arg; ddi_acc_handle_t handle; uint8_t cap_ptr; uint8_t next_ptr; uint32_t msix_bar; uint32_t msix_ctrl; uint32_t msix_tbl_sz; uint32_t tbl_offset; uint32_t tbl_bir; uint32_t pba_offset; uint32_t pba_bir; off_t offset; off_t mem_size; uintptr_t base; ddi_acc_handle_t acc_hdl; int i; handle = igb->osdep.cfg_handle; igb_log(igb, "Begin dump PCI config space"); igb_log(igb, "PCI_CONF_VENID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_VENID)); igb_log(igb, "PCI_CONF_DEVID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_DEVID)); igb_log(igb, "PCI_CONF_COMMAND:\t0x%x\n", pci_config_get16(handle, PCI_CONF_COMM)); igb_log(igb, "PCI_CONF_STATUS:\t0x%x\n", pci_config_get16(handle, PCI_CONF_STAT)); igb_log(igb, "PCI_CONF_REVID:\t0x%x\n", pci_config_get8(handle, PCI_CONF_REVID)); igb_log(igb, "PCI_CONF_PROG_CLASS:\t0x%x\n", pci_config_get8(handle, PCI_CONF_PROGCLASS)); igb_log(igb, "PCI_CONF_SUB_CLASS:\t0x%x\n", pci_config_get8(handle, PCI_CONF_SUBCLASS)); igb_log(igb, "PCI_CONF_BAS_CLASS:\t0x%x\n", pci_config_get8(handle, PCI_CONF_BASCLASS)); igb_log(igb, "PCI_CONF_CACHE_LINESZ:\t0x%x\n", pci_config_get8(handle, PCI_CONF_CACHE_LINESZ)); igb_log(igb, "PCI_CONF_LATENCY_TIMER:\t0x%x\n", pci_config_get8(handle, PCI_CONF_LATENCY_TIMER)); igb_log(igb, "PCI_CONF_HEADER_TYPE:\t0x%x\n", pci_config_get8(handle, PCI_CONF_HEADER)); igb_log(igb, "PCI_CONF_BIST:\t0x%x\n", pci_config_get8(handle, PCI_CONF_BIST)); igb_log(igb, "PCI_CONF_BASE0:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE0)); igb_log(igb, "PCI_CONF_BASE1:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE1)); igb_log(igb, "PCI_CONF_BASE2:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE2)); /* MSI-X BAR */ msix_bar = pci_config_get32(handle, PCI_CONF_BASE3); igb_log(igb, "PCI_CONF_BASE3:\t0x%x\n", msix_bar); igb_log(igb, "PCI_CONF_BASE4:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE4)); igb_log(igb, "PCI_CONF_BASE5:\t0x%x\n", pci_config_get32(handle, PCI_CONF_BASE5)); igb_log(igb, "PCI_CONF_CIS:\t0x%x\n", pci_config_get32(handle, PCI_CONF_CIS)); igb_log(igb, "PCI_CONF_SUBVENID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_SUBVENID)); igb_log(igb, "PCI_CONF_SUBSYSID:\t0x%x\n", pci_config_get16(handle, PCI_CONF_SUBSYSID)); igb_log(igb, "PCI_CONF_ROM:\t0x%x\n", pci_config_get32(handle, PCI_CONF_ROM)); cap_ptr = pci_config_get8(handle, PCI_CONF_CAP_PTR); igb_log(igb, "PCI_CONF_CAP_PTR:\t0x%x\n", cap_ptr); igb_log(igb, "PCI_CONF_ILINE:\t0x%x\n", pci_config_get8(handle, PCI_CONF_ILINE)); igb_log(igb, "PCI_CONF_IPIN:\t0x%x\n", pci_config_get8(handle, PCI_CONF_IPIN)); igb_log(igb, "PCI_CONF_MIN_G:\t0x%x\n", pci_config_get8(handle, PCI_CONF_MIN_G)); igb_log(igb, "PCI_CONF_MAX_L:\t0x%x\n", pci_config_get8(handle, PCI_CONF_MAX_L)); /* Power Management */ offset = cap_ptr; igb_log(igb, "PCI_PM_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset)); next_ptr = pci_config_get8(handle, offset + 1); igb_log(igb, "PCI_PM_NEXT_PTR:\t0x%x\n", next_ptr); igb_log(igb, "PCI_PM_CAP:\t0x%x\n", pci_config_get16(handle, offset + PCI_PMCAP)); igb_log(igb, "PCI_PM_CSR:\t0x%x\n", pci_config_get16(handle, offset + PCI_PMCSR)); igb_log(igb, "PCI_PM_CSR_BSE:\t0x%x\n", pci_config_get8(handle, offset + PCI_PMCSR_BSE)); igb_log(igb, "PCI_PM_DATA:\t0x%x\n", pci_config_get8(handle, offset + PCI_PMDATA)); /* MSI Configuration */ offset = next_ptr; igb_log(igb, "PCI_MSI_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset)); next_ptr = pci_config_get8(handle, offset + 1); igb_log(igb, "PCI_MSI_NEXT_PTR:\t0x%x\n", next_ptr); igb_log(igb, "PCI_MSI_CTRL:\t0x%x\n", pci_config_get16(handle, offset + PCI_MSI_CTRL)); igb_log(igb, "PCI_MSI_ADDR:\t0x%x\n", pci_config_get32(handle, offset + PCI_MSI_ADDR_OFFSET)); igb_log(igb, "PCI_MSI_ADDR_HI:\t0x%x\n", pci_config_get32(handle, offset + 0x8)); igb_log(igb, "PCI_MSI_DATA:\t0x%x\n", pci_config_get16(handle, offset + 0xC)); /* MSI-X Configuration */ offset = next_ptr; igb_log(igb, "PCI_MSIX_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset)); next_ptr = pci_config_get8(handle, offset + 1); igb_log(igb, "PCI_MSIX_NEXT_PTR:\t0x%x\n", next_ptr); msix_ctrl = pci_config_get16(handle, offset + PCI_MSIX_CTRL); msix_tbl_sz = msix_ctrl & 0x7ff; igb_log(igb, "PCI_MSIX_CTRL:\t0x%x\n", msix_ctrl); tbl_offset = pci_config_get32(handle, offset + PCI_MSIX_TBL_OFFSET); tbl_bir = tbl_offset & PCI_MSIX_TBL_BIR_MASK; tbl_offset = tbl_offset & ~PCI_MSIX_TBL_BIR_MASK; igb_log(igb, "PCI_MSIX_TBL_OFFSET:\t0x%x\n", tbl_offset); igb_log(igb, "PCI_MSIX_TBL_BIR:\t0x%x\n", tbl_bir); pba_offset = pci_config_get32(handle, offset + PCI_MSIX_PBA_OFFSET); pba_bir = pba_offset & PCI_MSIX_PBA_BIR_MASK; pba_offset = pba_offset & ~PCI_MSIX_PBA_BIR_MASK; igb_log(igb, "PCI_MSIX_PBA_OFFSET:\t0x%x\n", pba_offset); igb_log(igb, "PCI_MSIX_PBA_BIR:\t0x%x\n", pba_bir); /* PCI Express Configuration */ offset = next_ptr; igb_log(igb, "PCIE_CAP_ID:\t0x%x\n", pci_config_get8(handle, offset + PCIE_CAP_ID)); next_ptr = pci_config_get8(handle, offset + PCIE_CAP_NEXT_PTR); igb_log(igb, "PCIE_CAP_NEXT_PTR:\t0x%x\n", next_ptr); igb_log(igb, "PCIE_PCIECAP:\t0x%x\n", pci_config_get16(handle, offset + PCIE_PCIECAP)); igb_log(igb, "PCIE_DEVCAP:\t0x%x\n", pci_config_get32(handle, offset + PCIE_DEVCAP)); igb_log(igb, "PCIE_DEVCTL:\t0x%x\n", pci_config_get16(handle, offset + PCIE_DEVCTL)); igb_log(igb, "PCIE_DEVSTS:\t0x%x\n", pci_config_get16(handle, offset + PCIE_DEVSTS)); igb_log(igb, "PCIE_LINKCAP:\t0x%x\n", pci_config_get32(handle, offset + PCIE_LINKCAP)); igb_log(igb, "PCIE_LINKCTL:\t0x%x\n", pci_config_get16(handle, offset + PCIE_LINKCTL)); igb_log(igb, "PCIE_LINKSTS:\t0x%x\n", pci_config_get16(handle, offset + PCIE_LINKSTS)); /* MSI-X Memory Space */ if (ddi_dev_regsize(igb->dip, IGB_ADAPTER_MSIXTAB, &mem_size) != DDI_SUCCESS) { igb_log(igb, "ddi_dev_regsize() failed"); return; } if ((ddi_regs_map_setup(igb->dip, IGB_ADAPTER_MSIXTAB, (caddr_t *)&base, 0, mem_size, &igb_regs_acc_attr, &acc_hdl)) != DDI_SUCCESS) { igb_log(igb, "ddi_regs_map_setup() failed"); return; } igb_log(igb, "MSI-X Memory Space: (mem_size = %d, base = %x)", mem_size, base); for (i = 0; i <= msix_tbl_sz; i++) { igb_log(igb, "MSI-X Table Entry(%d):", i); igb_log(igb, "lo_addr:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16)))); igb_log(igb, "up_addr:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16) + 4))); igb_log(igb, "msg_data:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16) + 8))); igb_log(igb, "vct_ctrl:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + tbl_offset + (i * 16) + 12))); } igb_log(igb, "MSI-X Pending Bits:\t%x", ddi_get32(acc_hdl, (uint32_t *)(base + pba_offset))); ddi_regs_map_free(&acc_hdl); }
/* * igb_alloc_tcb_lists - Memory allocation for the transmit control bolcks * of one ring. */ static int igb_alloc_tcb_lists(igb_tx_ring_t *tx_ring) { int i; int ret; tx_control_block_t *tcb; dma_buffer_t *tx_buf; igb_t *igb = tx_ring->igb; dev_info_t *devinfo = igb->dip; /* * Allocate memory for the work list. */ tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) * tx_ring->ring_size, KM_NOSLEEP); if (tx_ring->work_list == NULL) { igb_log(igb, IGB_LOG_ERROR, "Cound not allocate memory for tx work list"); return (IGB_FAILURE); } /* * Allocate memory for the free list. */ tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) * tx_ring->free_list_size, KM_NOSLEEP); if (tx_ring->free_list == NULL) { kmem_free(tx_ring->work_list, sizeof (tx_control_block_t *) * tx_ring->ring_size); tx_ring->work_list = NULL; igb_log(igb, IGB_LOG_ERROR, "Cound not allocate memory for tx free list"); return (IGB_FAILURE); } /* * Allocate memory for the tx control blocks of free list. */ tx_ring->tcb_area = kmem_zalloc(sizeof (tx_control_block_t) * tx_ring->free_list_size, KM_NOSLEEP); if (tx_ring->tcb_area == NULL) { kmem_free(tx_ring->work_list, sizeof (tx_control_block_t *) * tx_ring->ring_size); tx_ring->work_list = NULL; kmem_free(tx_ring->free_list, sizeof (tx_control_block_t *) * tx_ring->free_list_size); tx_ring->free_list = NULL; igb_log(igb, IGB_LOG_ERROR, "Cound not allocate memory for tx control blocks"); return (IGB_FAILURE); } /* * Allocate dma memory for the tx control block of free list. */ tcb = tx_ring->tcb_area; for (i = 0; i < tx_ring->free_list_size; i++, tcb++) { ASSERT(tcb != NULL); tx_ring->free_list[i] = tcb; /* * Pre-allocate dma handles for transmit. These dma handles * will be dynamically bound to the data buffers passed down * from the upper layers at the time of transmitting. */ ret = ddi_dma_alloc_handle(devinfo, &igb_tx_dma_attr, DDI_DMA_DONTWAIT, NULL, &tcb->tx_dma_handle); if (ret != DDI_SUCCESS) { tcb->tx_dma_handle = NULL; igb_log(igb, IGB_LOG_ERROR, "Could not allocate tx dma handle: %x", ret); goto alloc_tcb_lists_fail; } /* * Pre-allocate transmit buffers for packets that the * size is less than bcopy_thresh. */ tx_buf = &tcb->tx_buf; ret = igb_alloc_dma_buffer(igb, tx_buf, igb->tx_buf_size); if (ret != IGB_SUCCESS) { ASSERT(tcb->tx_dma_handle != NULL); ddi_dma_free_handle(&tcb->tx_dma_handle); tcb->tx_dma_handle = NULL; igb_log(igb, IGB_LOG_ERROR, "Allocate tx dma buffer failed"); goto alloc_tcb_lists_fail; } tcb->last_index = MAX_TX_RING_SIZE; } return (IGB_SUCCESS); alloc_tcb_lists_fail: igb_free_tcb_lists(tx_ring); return (IGB_FAILURE); }
/* * igb_alloc_dma_buffer - Allocate DMA resources for a DMA buffer */ static int igb_alloc_dma_buffer(igb_t *igb, dma_buffer_t *buf, size_t size) { int ret; dev_info_t *devinfo = igb->dip; ddi_dma_cookie_t cookie; size_t len; uint_t cookie_num; ret = ddi_dma_alloc_handle(devinfo, &igb_buf_dma_attr, DDI_DMA_DONTWAIT, NULL, &buf->dma_handle); if (ret != DDI_SUCCESS) { buf->dma_handle = NULL; igb_log(igb, IGB_LOG_ERROR, "Could not allocate dma buffer handle: %x", ret); return (IGB_FAILURE); } ret = ddi_dma_mem_alloc(buf->dma_handle, size, &igb_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &buf->address, &len, &buf->acc_handle); if (ret != DDI_SUCCESS) { buf->acc_handle = NULL; buf->address = NULL; if (buf->dma_handle != NULL) { ddi_dma_free_handle(&buf->dma_handle); buf->dma_handle = NULL; } igb_log(igb, IGB_LOG_ERROR, "Could not allocate dma buffer memory: %x", ret); return (IGB_FAILURE); } ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL, buf->address, len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); if (ret != DDI_DMA_MAPPED) { buf->dma_address = NULL; if (buf->acc_handle != NULL) { ddi_dma_mem_free(&buf->acc_handle); buf->acc_handle = NULL; buf->address = NULL; } if (buf->dma_handle != NULL) { ddi_dma_free_handle(&buf->dma_handle); buf->dma_handle = NULL; } igb_log(igb, IGB_LOG_ERROR, "Could not bind dma buffer handle: %x", ret); return (IGB_FAILURE); } ASSERT(cookie_num == 1); buf->dma_address = cookie.dmac_laddress; buf->size = len; buf->len = 0; return (IGB_SUCCESS); }
/* * igb_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring. */ static int igb_alloc_rbd_ring(igb_rx_data_t *rx_data) { int ret; size_t size; size_t len; uint_t cookie_num; dev_info_t *devinfo; ddi_dma_cookie_t cookie; igb_t *igb = rx_data->rx_ring->igb; devinfo = igb->dip; size = sizeof (union e1000_adv_rx_desc) * rx_data->ring_size; /* * Allocate a new DMA handle for the receive descriptor * memory area. */ ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr, DDI_DMA_DONTWAIT, NULL, &rx_data->rbd_area.dma_handle); if (ret != DDI_SUCCESS) { igb_log(igb, IGB_LOG_ERROR, "Could not allocate rbd dma handle: %x", ret); rx_data->rbd_area.dma_handle = NULL; return (IGB_FAILURE); } /* * Allocate memory to DMA data to and from the receive * descriptors. */ ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle, size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, (caddr_t *)&rx_data->rbd_area.address, &len, &rx_data->rbd_area.acc_handle); if (ret != DDI_SUCCESS) { igb_log(igb, IGB_LOG_ERROR, "Could not allocate rbd dma memory: %x", ret); rx_data->rbd_area.acc_handle = NULL; rx_data->rbd_area.address = NULL; if (rx_data->rbd_area.dma_handle != NULL) { ddi_dma_free_handle(&rx_data->rbd_area.dma_handle); rx_data->rbd_area.dma_handle = NULL; } return (IGB_FAILURE); } /* * Initialize the entire transmit buffer descriptor area to zero */ bzero(rx_data->rbd_area.address, len); /* * Allocates DMA resources for the memory that was allocated by * the ddi_dma_mem_alloc call. */ ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle, NULL, (caddr_t)rx_data->rbd_area.address, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); if (ret != DDI_DMA_MAPPED) { igb_log(igb, IGB_LOG_ERROR, "Could not bind rbd dma resource: %x", ret); rx_data->rbd_area.dma_address = NULL; if (rx_data->rbd_area.acc_handle != NULL) { ddi_dma_mem_free(&rx_data->rbd_area.acc_handle); rx_data->rbd_area.acc_handle = NULL; rx_data->rbd_area.address = NULL; } if (rx_data->rbd_area.dma_handle != NULL) { ddi_dma_free_handle(&rx_data->rbd_area.dma_handle); rx_data->rbd_area.dma_handle = NULL; } return (IGB_FAILURE); } ASSERT(cookie_num == 1); rx_data->rbd_area.dma_address = cookie.dmac_laddress; rx_data->rbd_area.size = len; rx_data->rbd_ring = (union e1000_adv_rx_desc *)(uintptr_t) rx_data->rbd_area.address; return (IGB_SUCCESS); }
int igb_alloc_rx_ring_data(igb_rx_ring_t *rx_ring) { igb_rx_data_t *rx_data; igb_t *igb = rx_ring->igb; uint32_t rcb_count; /* * Allocate memory for software receive rings */ rx_data = kmem_zalloc(sizeof (igb_rx_data_t), KM_NOSLEEP); if (rx_data == NULL) { igb_log(igb, IGB_LOG_ERROR, "Allocate software receive rings failed"); return (IGB_FAILURE); } rx_data->rx_ring = rx_ring; mutex_init(&rx_data->recycle_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); rx_data->ring_size = igb->rx_ring_size; rx_data->free_list_size = igb->rx_ring_size; rx_data->rcb_head = 0; rx_data->rcb_tail = 0; rx_data->rcb_free = rx_data->free_list_size; /* * Allocate memory for the work list. */ rx_data->work_list = kmem_zalloc(sizeof (rx_control_block_t *) * rx_data->ring_size, KM_NOSLEEP); if (rx_data->work_list == NULL) { igb_log(igb, IGB_LOG_ERROR, "Could not allocate memory for rx work list"); goto alloc_rx_data_failure; } /* * Allocate memory for the free list. */ rx_data->free_list = kmem_zalloc(sizeof (rx_control_block_t *) * rx_data->free_list_size, KM_NOSLEEP); if (rx_data->free_list == NULL) { igb_log(igb, IGB_LOG_ERROR, "Cound not allocate memory for rx free list"); goto alloc_rx_data_failure; } /* * Allocate memory for the rx control blocks for work list and * free list. */ rcb_count = rx_data->ring_size + rx_data->free_list_size; rx_data->rcb_area = kmem_zalloc(sizeof (rx_control_block_t) * rcb_count, KM_NOSLEEP); if (rx_data->rcb_area == NULL) { igb_log(igb, IGB_LOG_ERROR, "Cound not allocate memory for rx control blocks"); goto alloc_rx_data_failure; } rx_ring->rx_data = rx_data; return (IGB_SUCCESS); alloc_rx_data_failure: igb_free_rx_ring_data(rx_data); return (IGB_FAILURE); }
/* * igb_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring. */ static int igb_alloc_tbd_ring(igb_tx_ring_t *tx_ring) { int ret; size_t size; size_t len; uint_t cookie_num; dev_info_t *devinfo; ddi_dma_cookie_t cookie; igb_t *igb = tx_ring->igb; devinfo = igb->dip; size = sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size; /* * If tx head write-back is enabled, an extra tbd is allocated * to save the head write-back value */ if (igb->tx_head_wb_enable) { size += sizeof (union e1000_adv_tx_desc); } /* * Allocate a DMA handle for the transmit descriptor * memory area. */ ret = ddi_dma_alloc_handle(devinfo, &igb_desc_dma_attr, DDI_DMA_DONTWAIT, NULL, &tx_ring->tbd_area.dma_handle); if (ret != DDI_SUCCESS) { igb_log(igb, IGB_LOG_ERROR, "Could not allocate tbd dma handle: %x", ret); tx_ring->tbd_area.dma_handle = NULL; return (IGB_FAILURE); } /* * Allocate memory to DMA data to and from the transmit * descriptors. */ ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle, size, &igb_desc_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, (caddr_t *)&tx_ring->tbd_area.address, &len, &tx_ring->tbd_area.acc_handle); if (ret != DDI_SUCCESS) { igb_log(igb, IGB_LOG_ERROR, "Could not allocate tbd dma memory: %x", ret); tx_ring->tbd_area.acc_handle = NULL; tx_ring->tbd_area.address = NULL; if (tx_ring->tbd_area.dma_handle != NULL) { ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); tx_ring->tbd_area.dma_handle = NULL; } return (IGB_FAILURE); } /* * Initialize the entire transmit buffer descriptor area to zero */ bzero(tx_ring->tbd_area.address, len); /* * Allocates DMA resources for the memory that was allocated by * the ddi_dma_mem_alloc call. The DMA resources then get bound to the * the memory address */ ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle, NULL, (caddr_t)tx_ring->tbd_area.address, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); if (ret != DDI_DMA_MAPPED) { igb_log(igb, IGB_LOG_ERROR, "Could not bind tbd dma resource: %x", ret); tx_ring->tbd_area.dma_address = NULL; if (tx_ring->tbd_area.acc_handle != NULL) { ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle); tx_ring->tbd_area.acc_handle = NULL; tx_ring->tbd_area.address = NULL; } if (tx_ring->tbd_area.dma_handle != NULL) { ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); tx_ring->tbd_area.dma_handle = NULL; } return (IGB_FAILURE); } ASSERT(cookie_num == 1); tx_ring->tbd_area.dma_address = cookie.dmac_laddress; tx_ring->tbd_area.size = len; tx_ring->tbd_ring = (union e1000_adv_tx_desc *)(uintptr_t) tx_ring->tbd_area.address; return (IGB_SUCCESS); }