static uint32_t ql_drvr_state_size(qla_host_t *ha) { uint32_t drvr_state_size; uint32_t size; size = sizeof (qla_drvr_state_hdr_t); drvr_state_size = QL_ALIGN(size, 64); size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t)); drvr_state_size += QL_ALIGN(size, 64); size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t)); drvr_state_size += QL_ALIGN(size, 64); size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t)); drvr_state_size += QL_ALIGN(size, 64); size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings; drvr_state_size += QL_ALIGN(size, 64); size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings; drvr_state_size += QL_ALIGN(size, 64); size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS * ha->hw.num_sds_rings; drvr_state_size += QL_ALIGN(size, 64); return (drvr_state_size); }
void ql_capture_drvr_state(qla_host_t *ha) { uint8_t *state_buffer; uint8_t *ptr; qla_drvr_state_hdr_t *hdr; uint32_t size; int i; state_buffer = ha->hw.drvr_state; if (state_buffer == NULL) return; hdr = (qla_drvr_state_hdr_t *)state_buffer; hdr->saved = 0; if (hdr->drvr_version_major) { hdr->saved = 1; return; } hdr->usec_ts = qla_get_usec_timestamp(); hdr->drvr_version_major = QLA_VERSION_MAJOR; hdr->drvr_version_minor = QLA_VERSION_MINOR; hdr->drvr_version_build = QLA_VERSION_BUILD; bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN); hdr->link_speed = ha->hw.link_speed; hdr->cable_length = ha->hw.cable_length; hdr->cable_oui = ha->hw.cable_oui; hdr->link_up = ha->hw.link_up; hdr->module_type = ha->hw.module_type; hdr->link_faults = ha->hw.link_faults; hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce; hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce; size = sizeof (qla_drvr_state_hdr_t); hdr->tx_state_offset = QL_ALIGN(size, 64); ptr = state_buffer + hdr->tx_state_offset; ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr); size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t)); hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64); ptr = state_buffer + hdr->rx_state_offset; ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr); size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t)); hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64); ptr = state_buffer + hdr->sds_state_offset; ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr); size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t)); hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64); ptr = state_buffer + hdr->txr_offset; hdr->num_tx_rings = ha->hw.num_tx_rings; hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS; hdr->txr_entries = NUM_TX_DESCRIPTORS; size = hdr->num_tx_rings * hdr->txr_size; bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size); hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64); ptr = state_buffer + hdr->rxr_offset; hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS; hdr->rxr_entries = NUM_RX_DESCRIPTORS; hdr->num_rx_rings = ha->hw.num_rds_rings; for (i = 0; i < ha->hw.num_rds_rings; i++) { bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size); ptr += hdr->rxr_size; } size = hdr->rxr_size * hdr->num_rx_rings; hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64); hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS; hdr->sds_entries = NUM_STATUS_DESCRIPTORS; hdr->num_sds_rings = ha->hw.num_sds_rings; ptr = state_buffer + hdr->sds_offset; for (i = 0; i < ha->hw.num_sds_rings; i++) { bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size); ptr += hdr->sds_ring_size; } return; }
/* * Name: qla_init_cntxt_regions * Function: Initializes Tx/Rx Contexts. */ static void qla_init_cntxt_regions(qla_host_t *ha) { qla_hw_t *hw; q80_tx_cntxt_req_t *tx_cntxt_req; q80_rcv_cntxt_req_t *rx_cntxt_req; bus_addr_t phys_addr; uint32_t i; device_t dev; uint32_t size; dev = ha->pci_dev; hw = &ha->hw; hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b; for (i = 0; i < ha->hw.num_sds_rings; i++) hw->sds[i].sds_ring_base = (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; phys_addr = hw->dma_buf.context.dma_addr; memset((void *)hw->dma_buf.context.dma_b, 0, ha->hw.dma_buf.context.size); hw->tx_cntxt_req = (q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b; hw->tx_cntxt_req_paddr = phys_addr; size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); hw->tx_cntxt_rsp = (q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size); hw->tx_cntxt_rsp_paddr = hw->tx_cntxt_req_paddr + size; size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); hw->rx_cntxt_req = (q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size); hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size; size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); hw->rx_cntxt_rsp = (q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size); hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size; size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size); hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size; /* * Initialize the Transmit Context Request so that we don't need to * do it everytime we need to create a context */ tx_cntxt_req = hw->tx_cntxt_req; tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr); tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr); tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW | CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO)); tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED); tx_cntxt_req->phys_addr = qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr); tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS); /* * Initialize the Receive Context Request */ rx_cntxt_req = hw->rx_cntxt_req; rx_cntxt_req->rx_req.rsp_dma_addr = qla_host_to_le64(hw->rx_cntxt_rsp_paddr); rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW | CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_JUMBO | CNTXT_CAP0_LRO| CNTXT_CAP0_HW_LRO); rx_cntxt_req->rx_req.intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED); rx_cntxt_req->rx_req.rds_intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE); rx_cntxt_req->rx_req.rds_ring_offset = 0; rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32( (hw->num_rds_rings * sizeof(q80_rq_rds_ring_t))); rx_cntxt_req->rx_req.num_rds_rings = qla_host_to_le16(hw->num_rds_rings); rx_cntxt_req->rx_req.num_sds_rings = qla_host_to_le16(hw->num_sds_rings); for (i = 0; i < hw->num_rds_rings; i++) { rx_cntxt_req->rds_req[i].phys_addr = qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); if (i == RDS_RING_INDEX_NORMAL) { rx_cntxt_req->rds_req[i].buf_size = qla_host_to_le64(MCLBYTES); rx_cntxt_req->rds_req[i].size = qla_host_to_le32(NUM_RX_DESCRIPTORS); } else { rx_cntxt_req->rds_req[i].buf_size = qla_host_to_le64(MJUM9BYTES); rx_cntxt_req->rds_req[i].size = qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS); } } for (i = 0; i < hw->num_sds_rings; i++) { rx_cntxt_req->sds_req[i].phys_addr = qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); rx_cntxt_req->sds_req[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i); } QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n", __func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr)); QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n", __func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr)); QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n", __func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr)); QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n", __func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr)); QL_DPRINT2((ha->pci_dev, "%s: tx_cons = %p paddr %p\n", __func__, hw->tx_cons, (void *)hw->tx_cons_paddr)); }
/* * Name: qla_alloc_dma * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. */ int qla_alloc_dma(qla_host_t *ha) { device_t dev; uint32_t i, j, size; dev = ha->pci_dev; QL_DPRINT2((dev, "%s: enter\n", __func__)); ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings; ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings; /* * Allocate Transmit Ring */ ha->hw.dma_buf.tx_ring.alignment = 8; ha->hw.dma_buf.tx_ring.size = (sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS; if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) { device_printf(dev, "%s: tx ring alloc failed\n", __func__); goto qla_alloc_dma_exit; } ha->hw.dma_buf.flags.tx_ring = 1; QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n", __func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr), ha->hw.dma_buf.tx_ring.dma_b)); /* * Allocate Receive Descriptor Rings */ for (i = 0; i < ha->hw.num_rds_rings; i++) { ha->hw.dma_buf.rds_ring[i].alignment = 8; if (i == RDS_RING_INDEX_NORMAL) { ha->hw.dma_buf.rds_ring[i].size = (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; } else if (i == RDS_RING_INDEX_JUMBO) { ha->hw.dma_buf.rds_ring[i].size = (sizeof(q80_recv_desc_t)) * NUM_RX_JUMBO_DESCRIPTORS; } else break; if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) { QL_DPRINT4((dev, "%s: rds ring alloc failed\n", __func__)); for (j = 0; j < i; j++) qla_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[j]); goto qla_alloc_dma_exit; } QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n", __func__, i, (void *)(ha->hw.dma_buf.rds_ring[i].dma_addr), ha->hw.dma_buf.rds_ring[i].dma_b)); } ha->hw.dma_buf.flags.rds_ring = 1; /* * Allocate Status Descriptor Rings */ for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->hw.dma_buf.sds_ring[i].alignment = 8; ha->hw.dma_buf.sds_ring[i].size = (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) { device_printf(dev, "%s: sds ring alloc failed\n", __func__); for (j = 0; j < i; j++) qla_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[j]); goto qla_alloc_dma_exit; } QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n", __func__, i, (void *)(ha->hw.dma_buf.sds_ring[i].dma_addr), ha->hw.dma_buf.sds_ring[i].dma_b)); } ha->hw.dma_buf.flags.sds_ring = 1; /* * Allocate Context Area */ size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN); size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN); size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN); size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN); size += sizeof (uint32_t); /* for tx consumer index */ size = QL_ALIGN(size, PAGE_SIZE); ha->hw.dma_buf.context.alignment = 8; ha->hw.dma_buf.context.size = size; if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) { device_printf(dev, "%s: context alloc failed\n", __func__); goto qla_alloc_dma_exit; } ha->hw.dma_buf.flags.context = 1; QL_DPRINT2((dev, "%s: context phys %p virt %p\n", __func__, (void *)(ha->hw.dma_buf.context.dma_addr), ha->hw.dma_buf.context.dma_b)); qla_init_cntxt_regions(ha); return 0; qla_alloc_dma_exit: qla_free_dma(ha); return -1; }