static void cvm_oct_free_hw_memory(int pool, int size, int elements) { if (USE_32BIT_SHARED) { pr_warning("Warning: 32 shared memory is not freeable\n"); } else { char *memory; do { memory = cvmx_fpa_alloc(pool); if (memory) { elements--; kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); } } while (memory); if (elements < 0) pr_warning("Freeing of pool %u had too many " "buffers (%d)\n", pool, elements); else if (elements > 0) pr_warning("Warning: Freeing of pool %u is " "missing %d buffers\n", pool, elements); } }
cvmx_npei_dmax_ibuff_saddr_t dmax_ibuff_saddr; dmax_ibuff_saddr.u64 = 0; dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7; cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), dmax_ibuff_saddr.u64); } else if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { cvmx_dpi_dmax_ibuff_saddr_t dpi_dmax_ibuff_saddr; dpi_dmax_ibuff_saddr.u64 = 0; dpi_dmax_ibuff_saddr.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8; dpi_dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7; cvmx_write_csr(CVMX_DPI_DMAX_IBUFF_SADDR(engine), dpi_dmax_ibuff_saddr.u64); } else { uint64_t address = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))); if (engine) cvmx_write_csr(CVMX_NPI_HIGHP_IBUFF_SADDR, address); else cvmx_write_csr(CVMX_NPI_LOWP_IBUFF_SADDR, address); } } if (octeon_has_feature(OCTEON_FEATURE_NPEI)) { cvmx_npei_dma_control_t dma_control; dma_control.u64 = 0; if (cvmx_dma_engine_get_num() >= 5) dma_control.s.dma4_enb = 1; dma_control.s.dma3_enb = 1; dma_control.s.dma2_enb = 1;
/** * Configure a output port and the associated queues for use. * * @port: Port to configure. * @base_queue: First queue number to associate with this port. * @num_queues: Number of queues to associate with this port * @priority: Array of priority levels for each queue. Values are * allowed to be 0-8. A value of 8 get 8 times the traffic * of a value of 1. A value of 0 indicates that no rounds * will be participated in. These priorities can be changed * on the fly while the pko is enabled. A priority of 9 * indicates that static priority should be used. If static * priority is used all queues with static priority must be * contiguous starting at the base_queue, and lower numbered * queues have higher priority than higher numbered queues. * There must be num_queues elements in the array. */ cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue, uint64_t num_queues, const uint64_t priority[]) { cvmx_pko_status_t result_code; uint64_t queue; union cvmx_pko_mem_queue_ptrs config; union cvmx_pko_reg_queue_ptrs1 config1; int static_priority_base = -1; int static_priority_end = -1; if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS) && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n", (unsigned long long)port); return CVMX_PKO_INVALID_PORT; } if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) { cvmx_dprintf ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n", (unsigned long long)(base_queue + num_queues)); return CVMX_PKO_INVALID_QUEUE; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { /* * Validate the static queue priority setup and set * static_priority_base and static_priority_end * accordingly. */ for (queue = 0; queue < num_queues; queue++) { /* Find first queue of static priority */ if (static_priority_base == -1 && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) static_priority_base = queue; /* Find last queue of static priority */ if (static_priority_base != -1 && static_priority_end == -1 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue) static_priority_end = queue - 1; else if (static_priority_base != -1 && static_priority_end == -1 && queue == num_queues - 1) /* all queues are static priority */ static_priority_end = queue; /* * Check to make sure all static priority * queues are contiguous. Also catches some * cases of static priorites not starting at * queue 0. */ if (static_priority_end != -1 && (int)queue > static_priority_end && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) { cvmx_dprintf("ERROR: cvmx_pko_config_port: " "Static priority queues aren't " "contiguous or don't start at " "base queue. q: %d, eq: %d\n", (int)queue, static_priority_end); return CVMX_PKO_INVALID_PRIORITY; } } if (static_priority_base > 0) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Static " "priority queues don't start at base " "queue. sq: %d\n", static_priority_base); return CVMX_PKO_INVALID_PRIORITY; } } /* * At this point, static_priority_base and static_priority_end * are either both -1, or are valid start/end queue * numbers. */ result_code = CVMX_PKO_SUCCESS; #ifdef PKO_DEBUG cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues, CVMX_PKO_QUEUES_PER_PORT_INTERFACE0, CVMX_PKO_QUEUES_PER_PORT_INTERFACE1); #endif for (queue = 0; queue < num_queues; queue++) { uint64_t *buf_ptr = NULL; config1.u64 = 0; config1.s.idx3 = queue >> 3; config1.s.qid7 = (base_queue + queue) >> 7; config.u64 = 0; config.s.tail = queue == (num_queues - 1); config.s.index = queue; config.s.port = port; config.s.queue = base_queue + queue; if (!cvmx_octeon_is_pass1()) { config.s.static_p = static_priority_base >= 0; config.s.static_q = (int)queue <= static_priority_end; config.s.s_tail = (int)queue == static_priority_end; } /* * Convert the priority into an enable bit field. Try * to space the bits out evenly so the packet don't * get grouped up */ switch ((int)priority[queue]) { case 0: config.s.qos_mask = 0x00; break; case 1: config.s.qos_mask = 0x01; break; case 2: config.s.qos_mask = 0x11; break; case 3: config.s.qos_mask = 0x49; break; case 4: config.s.qos_mask = 0x55; break; case 5: config.s.qos_mask = 0x57; break; case 6: config.s.qos_mask = 0x77; break; case 7: config.s.qos_mask = 0x7f; break; case 8: config.s.qos_mask = 0xff; break; case CVMX_PKO_QUEUE_STATIC_PRIORITY: /* Pass 1 will fall through to the error case */ if (!cvmx_octeon_is_pass1()) { config.s.qos_mask = 0xff; break; } default: cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid " "priority %llu\n", (unsigned long long)priority[queue]); config.s.qos_mask = 0xff; result_code = CVMX_PKO_INVALID_PRIORITY; break; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { cvmx_cmd_queue_result_t cmd_res = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO (base_queue + queue), CVMX_PKO_MAX_QUEUE_DEPTH, CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE - CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8); if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) { switch (cmd_res) { case CVMX_CMD_QUEUE_NO_MEMORY: cvmx_dprintf("ERROR: " "cvmx_pko_config_port: " "Unable to allocate " "output buffer.\n"); return CVMX_PKO_NO_MEMORY; case CVMX_CMD_QUEUE_ALREADY_SETUP: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Port already setup.\n"); return CVMX_PKO_PORT_ALREADY_SETUP; case CVMX_CMD_QUEUE_INVALID_PARAM: default: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n"); return CVMX_PKO_CMD_QUEUE_INIT_ERROR; } } buf_ptr = (uint64_t *) cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO (base_queue + queue)); config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr); } else config.s.buf_ptr = 0; CVMX_SYNCWS; if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); } return result_code; }
int hfa_bootmem_free (void *ptr, uint64_t size) { uint64_t address; address = cvmx_ptr_to_phys (ptr); return __cvmx_bootmem_phy_free (address, size, 0); }
cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue, uint64_t num_queues, const uint64_t priority[]) { cvmx_pko_status_t result_code; uint64_t queue; union cvmx_pko_mem_queue_ptrs config; union cvmx_pko_reg_queue_ptrs1 config1; int static_priority_base = -1; int static_priority_end = -1; if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS) && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n", (unsigned long long)port); return CVMX_PKO_INVALID_PORT; } if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) { cvmx_dprintf ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n", (unsigned long long)(base_queue + num_queues)); return CVMX_PKO_INVALID_QUEUE; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { for (queue = 0; queue < num_queues; queue++) { if (static_priority_base == -1 && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) static_priority_base = queue; if (static_priority_base != -1 && static_priority_end == -1 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue) static_priority_end = queue - 1; else if (static_priority_base != -1 && static_priority_end == -1 && queue == num_queues - 1) static_priority_end = queue; if (static_priority_end != -1 && (int)queue > static_priority_end && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) { cvmx_dprintf("ERROR: cvmx_pko_config_port: " "Static priority queues aren't " "contiguous or don't start at " "base queue. q: %d, eq: %d\n", (int)queue, static_priority_end); return CVMX_PKO_INVALID_PRIORITY; } } if (static_priority_base > 0) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Static " "priority queues don't start at base " "queue. sq: %d\n", static_priority_base); return CVMX_PKO_INVALID_PRIORITY; } #if 0 cvmx_dprintf("Port %d: Static priority queue base: %d, " "end: %d\n", port, static_priority_base, static_priority_end); #endif } result_code = CVMX_PKO_SUCCESS; #ifdef PKO_DEBUG cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues, CVMX_PKO_QUEUES_PER_PORT_INTERFACE0, CVMX_PKO_QUEUES_PER_PORT_INTERFACE1); #endif for (queue = 0; queue < num_queues; queue++) { uint64_t *buf_ptr = NULL; config1.u64 = 0; config1.s.idx3 = queue >> 3; config1.s.qid7 = (base_queue + queue) >> 7; config.u64 = 0; config.s.tail = queue == (num_queues - 1); config.s.index = queue; config.s.port = port; config.s.queue = base_queue + queue; if (!cvmx_octeon_is_pass1()) { config.s.static_p = static_priority_base >= 0; config.s.static_q = (int)queue <= static_priority_end; config.s.s_tail = (int)queue == static_priority_end; } switch ((int)priority[queue]) { case 0: config.s.qos_mask = 0x00; break; case 1: config.s.qos_mask = 0x01; break; case 2: config.s.qos_mask = 0x11; break; case 3: config.s.qos_mask = 0x49; break; case 4: config.s.qos_mask = 0x55; break; case 5: config.s.qos_mask = 0x57; break; case 6: config.s.qos_mask = 0x77; break; case 7: config.s.qos_mask = 0x7f; break; case 8: config.s.qos_mask = 0xff; break; case CVMX_PKO_QUEUE_STATIC_PRIORITY: if (!cvmx_octeon_is_pass1()) { config.s.qos_mask = 0xff; break; } default: cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid " "priority %llu\n", (unsigned long long)priority[queue]); config.s.qos_mask = 0xff; result_code = CVMX_PKO_INVALID_PRIORITY; break; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { cvmx_cmd_queue_result_t cmd_res = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO (base_queue + queue), CVMX_PKO_MAX_QUEUE_DEPTH, CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE - CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8); if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) { switch (cmd_res) { case CVMX_CMD_QUEUE_NO_MEMORY: cvmx_dprintf("ERROR: " "cvmx_pko_config_port: " "Unable to allocate " "output buffer.\n"); return CVMX_PKO_NO_MEMORY; case CVMX_CMD_QUEUE_ALREADY_SETUP: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Port already setup.\n"); return CVMX_PKO_PORT_ALREADY_SETUP; case CVMX_CMD_QUEUE_INVALID_PARAM: default: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n"); return CVMX_PKO_CMD_QUEUE_INIT_ERROR; } } buf_ptr = (uint64_t *) cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO (base_queue + queue)); config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr); } else config.s.buf_ptr = 0; CVMX_SYNCWS; if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); } return result_code; }
/** * NAND specific update routine. Handles erasing the previous * image if it exists. * * @param image_addr Address of image in DRAM. Always * has an image header. * * @return 0 on success * 1 on failure */ int do_bootloader_update_nand(uint32_t image_addr) { const bootloader_header_t *new_header; const bootloader_header_t *header; int chip = oct_nand_get_cur_chip(); int page_size = cvmx_nand_get_page_size(chip); int oob_size = octeon_nand_get_oob_size(chip); int pages_per_block = cvmx_nand_get_pages_per_block(chip); int bytes; uint64_t block_size = page_size * pages_per_block; uint64_t nand_addr = block_size; uint64_t buf_storage[2200 / 8] = { 0 }; unsigned char *buf = (unsigned char *)buf_storage; int read_size = CVMX_NAND_BOOT_ECC_BLOCK_SIZE + 8; uint64_t old_image_nand_addr = 0; int old_image_size = 0; int required_len; int required_blocks; int conseq_blank_blocks; uint64_t erase_base; uint64_t erase_end; header = (void *)buf; new_header = (void *)image_addr; if (!cvmx_nand_get_active_chips()) { puts("ERROR: No NAND Flash detected on board, can't burn " "NAND bootloader image\n"); return 1; } /* Find matching type (failsafe/normal, stage2/stage3) of image that * is currently in NAND, if present. Save location for later erasing */ while ((nand_addr = oct_nand_image_search(nand_addr, MAX_NAND_SEARCH_ADDR, new_header->image_type))) { /* Read new header */ bytes = cvmx_nand_page_read(chip, nand_addr, cvmx_ptr_to_phys(buf), read_size); if (bytes != read_size) { printf("Error reading NAND addr 0x%llx (bytes_read: %d, expected: %d)\n", nand_addr, bytes, read_size); return 1; } /* Check a few more fields from the headers */ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_GENERIC && header->board_type != CVMX_BOARD_TYPE_GENERIC) { /* If the board type of the running image is generic, * don't do any board matching. When looking for images * in NAND to overwrite, treat generic board type images * as matching all board types. */ if (new_header->board_type != header->board_type) { puts("WARNING: A bootloader for a different " "board type was found and skipped (not erased.)\n"); /* Different board type, so skip (this is * strange to find..... */ nand_addr += ((header->hlen + header->dlen + page_size - 1) & ~(page_size - 1)); continue; } } if ((new_header->flags & BL_HEADER_FLAG_FAILSAFE) != (new_header->flags & BL_HEADER_FLAG_FAILSAFE)) { /* Not a match, so skip */ nand_addr += ((header->hlen + header->dlen + page_size - 1) & ~(page_size - 1)); continue; } /* A match, so break out */ old_image_nand_addr = nand_addr; old_image_size = header->hlen + header->dlen; printf("Found existing bootloader image of same type at NAND addr: 0x%llx\n", old_image_nand_addr); break; } /* nand_addr is either 0 (no image found), or has the address of the * image we will delete after the write of the new image. */ if (!nand_addr) puts("No existing matching bootloader found in flash\n"); /* Find a blank set of _blocks_ to put the new image in. We want * to make sure that we don't put any part of it in a block with * something else, as we want to be able to erase it later. */ required_len = new_header->hlen + new_header->dlen; required_blocks = (required_len + block_size - 1) / block_size; conseq_blank_blocks = 0; read_size = page_size + oob_size; for (nand_addr = block_size; nand_addr < MAX_NAND_SEARCH_ADDR; nand_addr += block_size) { if (oct_nand_block_is_blank(nand_addr)) { conseq_blank_blocks++; if (conseq_blank_blocks == required_blocks) { /* We have a large enough blank spot */ nand_addr -= (conseq_blank_blocks - 1) * block_size; break; } } else conseq_blank_blocks = 0; } if (nand_addr >= MAX_NAND_SEARCH_ADDR) { puts("ERROR: unable to find blank space for new bootloader\n"); return 1; } printf("New bootloader image will be written at blank address 0x%llx, length 0x%x\n", nand_addr, required_len); /* Write the new bootloader to blank location. */ if (0 > oct_nand_boot_write(nand_addr, (void *)image_addr, required_len, 0)) { puts("ERROR: error while writing new image to flash.\n"); return 1; } /* Now erase the old bootloader of the same type. * We know these are not bad NAND blocks since they have valid data * in them. */ erase_base = old_image_nand_addr & ~(block_size - 1); erase_end = ((old_image_nand_addr + old_image_size + block_size - 1) & ~(block_size - 1)); for (nand_addr = erase_base; nand_addr < erase_end; nand_addr += block_size) { if (cvmx_nand_block_erase(chip, nand_addr)) { printf("cvmx_nand_block_erase() failed, addr 0x%08llx\n", nand_addr); return 1; } } puts("Bootloader update in NAND complete.\n"); return 0; }
void oct_tx_process_sw(mbuf_t *mbuf, uint8_t outport) { uint64_t queue; cvmx_pko_return_value_t send_status; uint8_t *dont_free_cookie = NULL; queue = cvmx_pko_get_base_queue(outport); cvmx_pko_send_packet_prepare(outport, queue, CVMX_PKO_LOCK_CMD_QUEUE); tx_done_t *tx_done = &(oct_stx[LOCAL_CPU_ID]->tx_done[outport]); if(tx_done->tx_entries < (OCT_PKO_TX_DESC_NUM - 1)) { dont_free_cookie = oct_pend_tx_done_add(tx_done, (void *)mbuf); } else { PACKET_DESTROY_ALL(mbuf); STAT_TX_SW_DESC_ERR; return; } /*command word0*/ cvmx_pko_command_word0_t pko_command; pko_command.u64 = 0; pko_command.s.segs = 1; pko_command.s.total_bytes = mbuf->pkt_totallen; pko_command.s.rsp = 1; pko_command.s.dontfree = 1; /*command word1*/ cvmx_buf_ptr_t packet; packet.u64 = 0; packet.s.size = mbuf->pkt_totallen; packet.s.addr = (uint64_t)mbuf->pkt_ptr; /*command word2*/ cvmx_pko_command_word2_t tx_ptr_word; tx_ptr_word.u64 = 0; tx_ptr_word.s.ptr = (uint64_t)cvmx_ptr_to_phys(dont_free_cookie); /* Send the packet */ send_status = cvmx_pko_send_packet_finish3(outport, queue, pko_command, packet, tx_ptr_word.u64, CVMX_PKO_LOCK_CMD_QUEUE); if(send_status != CVMX_PKO_SUCCESS) { if(dont_free_cookie) { oct_pend_tx_done_remove(tx_done); } PACKET_DESTROY_ALL(mbuf); STAT_TX_SW_SEND_ERR; return; } else { STAT_TX_SEND_OVER; } }
/* Look for an image of a given type in the NAND address range given. If an * image type of 0 is passed, matches all valid images */ u64 oct_nand_image_search(u64 start_addr, u64 max_addr, u16 image_type) { unsigned char *buf = (unsigned char *)buf_storage; int page_size; int chip = oct_nand_get_cur_chip(); u64 nand_addr; bootloader_header_t header_copy; if (chip < 0) { special_debug_printf("No nand chips found\n"); return 0; } page_size = cvmx_nand_get_page_size(chip); /* Round start address up to a page boundary */ start_addr = (start_addr + page_size - 1) & ~(page_size - 1); special_debug_printf("Using NAND chip %d, page size: %d\n", chip, page_size); int errors = 0; int read_size = CVMX_NAND_BOOT_ECC_BLOCK_SIZE + CVMX_NAND_BOOT_ECC_ECC_SIZE; if (!page_size) { special_debug_printf("ERROR: No NAND configured (run probe)\n"); return 0; } special_debug_printf("Scanning NAND for for image type: %d\n", image_type); for (nand_addr = start_addr; nand_addr < max_addr; nand_addr += page_size) { /* Adjust the read address based on location in page */ int offset = ((nand_addr & (page_size - 1)) / CVMX_NAND_BOOT_ECC_BLOCK_SIZE) * CVMX_NAND_BOOT_ECC_ECC_SIZE; int bytes = cvmx_nand_page_read(chip, nand_addr + offset, cvmx_ptr_to_phys(buf), read_size); if (bytes != read_size) { special_debug_printf("Error reading NAND addr %d " "(bytes_read: %d, expected: %d)\n", nand_addr, bytes, read_size); return -1; } errors = cvmx_nand_correct_boot_ecc(buf); if (errors <= 1) { if (errors == 1) special_debug_printf("Correctable ECC error at " "NAND address: 0x%llx\n", nand_addr + offset); const bootloader_header_t *header = (const bootloader_header_t *)buf; /* Block is good, see if it contains a bootloader image * header */ if ((header->magic == BOOTLOADER_HEADER_MAGIC)) { special_debug_printf("\nFound a header at addr 0x%llx\n", nand_addr); /* Check the CRC of the header */ u32 crc; crc = crc32(0, (void *)header, 12); crc = crc32(crc, (void *)header + 16, header->hlen - 16); if (crc == header->hcrc) { /* Copy header, as validate_image() call * will overwrite global buffer */ header_copy = *header; if (oct_nand_validate_image(nand_addr) >= 0) { if (!image_type || header_copy.image_type == image_type) { /* We found a valid * image of the type we * were looking for, so * return the address of it. */ special_debug_printf("Image type match at addr 0x%llx\n", nand_addr); return nand_addr; } /* Skip the image, as we have * validated it */ special_debug_printf("\nSkipping image type: %d\n", header_copy.image_type); nand_addr += ((header_copy.hlen + header_copy.dlen + page_size - 1) & ~(page_size - 1)) - page_size; } else { special_debug_printf("Image CRC failed\n"); } } else { special_debug_printf("Header CRC failed\n"); } } } else { special_debug_printf("Uncorrectable ECC error at NAND " "address: 0x%llx\n", nand_addr + offset); } } special_debug_printf("Done looking for image type: %d, none found.\n", image_type); return 0; }
int do_oct_nand(cmd_tbl_t * cmdtp, int flag, int argc, char * const argv[]) { u64 buf_storage[CVMX_NAND_MAX_PAGE_AND_OOB_SIZE / 8]; unsigned char *buf = (unsigned char *)buf_storage; int rc; const char *cmd; if (argc < 2) goto usage; cmd = argv[1]; if (!strcmp("probe", cmd)) { rc = oct_nand_probe(); if (rc < 0) { printf("NAND flash not found\n"); } else { printf("probe found NAND flash on boot bus %d.\n", rc); } return 0; } else if (!strcmp("dump", cmd)) { /* Only dump full pages.... */ u64 page, end; u64 start_addr, len; int page_size = cvmx_nand_get_page_size(chip); int oob_size = cvmx_nand_get_oob_size(chip); if (argc < 3) goto usage; start_addr = simple_strtoull(argv[2], NULL, 16); if (argc == 4) len = simple_strtoull(argv[3], NULL, 16); else len = 0; if (!page_size) { printf("ERROR: No NAND configured (run probe)\n"); return 1; } /* Convert from addresses to pages */ page = start_addr / page_size; if (!len) end = page; else end = (start_addr + len) / page_size; while (page <= end) { /* Read the next block */ int i; int read_size = page_size + oob_size; /* Read more than required in cases when total read size * is not a multiple of 8. */ read_size = (read_size + 7) & ~0x7; int bytes = cvmx_nand_page_read(chip, page_size * page, cvmx_ptr_to_phys(buf), read_size); int pages_per_block = cvmx_nand_get_pages_per_block(chip); if (bytes != read_size) { printf("Error reading page %llu, " "bytes read: %d\n", page, bytes); return 1; } /* Dump page data */ printf("Address 0x%llx, (Block 0x%llx, page 0x%llx) " "data:\n", page * page_size, page / pages_per_block, page % pages_per_block); for (i = 0; i < page_size; i++) { if (i % 16 == 0) printf("0x%04llx:", page * page_size + i); printf(" %02x", buf[i]); if (i % 16 == 15) printf("\n"); } /* Dump OOB data */ printf("Address 0x%llx, (Block 0x%llx, page 0x%llx) OOB:\n", page * page_size, page / pages_per_block, page % pages_per_block); for (i = 0; i < oob_size; i++) { if (i % 16 == 0) printf("0x%04x:", i); printf(" %02x", buf[page_size + i]); if (i % 16 == 15) printf("\n"); } printf("\n"); page++; } return 0; } else if (!strcmp("erase", cmd)) { u64 start_addr, length; int force = 0; u64 i; int page_size = cvmx_nand_get_page_size(chip); int pages_per_block = cvmx_nand_get_pages_per_block(chip); if (argc < 3 || argc > 5) goto usage; start_addr = simple_strtoull(argv[2], NULL, 16); if (argc >= 4) length = simple_strtoull(argv[3], NULL, 16); else length = page_size * pages_per_block; if (argc == 5) { if (strcmp(argv[4], "force")) goto usage; printf("WARNING: Forced erase: erasing bad blocks.\n"); force = 1; } if (!page_size) { printf("ERROR: No NAND configured (run probe)\n"); return 1; } if (start_addr & ((u64) page_size * pages_per_block - 1)) { printf("ERROR: erase start not at block boundary " "(maybe want: 0x%llx or 0x%llx)\n", start_addr & ~((u64) page_size * pages_per_block - 1), (start_addr + page_size * pages_per_block) & ~((u64) page_size * pages_per_block - 1)); return 1; } if ((length & (page_size * pages_per_block - 1))) { printf ("ERROR: erase length not multiple of block size " "(maybe want: 0x%llx)\n", (length + (page_size * pages_per_block - 1)) & ~(page_size * pages_per_block - 1)); return 1; } for (i = start_addr; i < start_addr + length; i += page_size * pages_per_block) { if (oct_nand_block_is_bad(chip, i, 1)) { if (force) { printf("WARNING: Erasing bad bock at " "address 0x%llx\n", i); } else { printf("Not erasing bad block at " "address 0x%llx\n", i); continue; } } if (cvmx_nand_block_erase(chip, i)) { printf("cvmx_nand_block_erase() failed, " "addr 0x%08llx\n", i); return 1; } } return 0; } else if (!strcmp("write", cmd)) { u64 nand_addr; u32 dram_addr, length; int page_size = cvmx_nand_get_page_size(chip); if (!page_size) { printf("ERROR: No NAND configured (run probe)\n"); return 1; } if (argc != 3 && argc != 5) goto usage; nand_addr = simple_strtoull(argv[2], NULL, 16); if (argc == 5) { dram_addr = simple_strtoul(argv[3], NULL, 16); length = simple_strtoul(argv[4], NULL, 16); } else { /* We look up the length/address from environment * variables.... * filesize=3410 * fileaddr=20000000 */ if (getenv("filesize") && getenv("fileaddr")) { dram_addr = simple_strtoul(getenv("fileaddr"), NULL, 16); length = simple_strtoul(getenv("filesize"), NULL, 16); /* Round length up to pagesize */ length = (length + page_size - 1) & ~(page_size - 1); } else { printf("ERROR: filesize and fileaddr " "environment variables\n" "must be set if address and size " "are not given\n"); goto usage; } } unsigned char *data_ptr = (void *)dram_addr; if (nand_addr & (page_size - 1)) { printf("ERROR: write address not at page boundary " "(maybe want: 0x%08llx)\n", nand_addr & ~(page_size - 1)); return 1; } return oct_nand_boot_write(nand_addr, data_ptr, length, 0); } else if (!strcmp("scan", cmd)) { u64 nand_addr; bootloader_header_t header_copy; int page_size = cvmx_nand_get_page_size(chip); int pages_per_block = cvmx_nand_get_pages_per_block(chip); int block_size = page_size * pages_per_block; int errors = 0; int read_size = CVMX_NAND_BOOT_ECC_BLOCK_SIZE + CVMX_NAND_BOOT_ECC_ECC_SIZE; if (!page_size) { printf("ERROR: No NAND configured (run probe)\n"); return 1; } printf("Scanning NAND for recognized images\n"); for (nand_addr = 0; nand_addr < MAX_NAND_SEARCH_ADDR; nand_addr += page_size) { /* Adjust the read address based on location in page */ int offset = ((nand_addr & (page_size - 1)) / CVMX_NAND_BOOT_ECC_BLOCK_SIZE) * CVMX_NAND_BOOT_ECC_ECC_SIZE; int bytes = cvmx_nand_page_read(chip, nand_addr + offset, cvmx_ptr_to_phys(buf), read_size); if (bytes != read_size) { printf("Error reading NAND addr 0x%08llx " "(bytes_read: %d, expected: %d)\n", nand_addr, bytes, read_size); return 1; } errors = cvmx_nand_correct_boot_ecc(buf); if (errors <= 1) { const bootloader_header_t *header = (const bootloader_header_t *)buf; /* Block is good, see if it contains a * bootloader image header */ if ((header->magic == BOOTLOADER_HEADER_MAGIC)) { int rval; /* Check the CRC of the header */ u32 crc; crc = crc32(0, (void *)header, 12); crc = crc32(crc, (void *)header + 16, header->hlen - 16); if (crc != header->hcrc) { printf("ERROR: 0x%llx Header " "CRC mismatch, expected:" " 0x%08x, found:" " 0x%08x\n", nand_addr, header->hcrc, crc); } else { header_copy = *header; printf("\nImage header at addr:" "0x%llx, size: 0x%x, " "crc: 0x%x ", nand_addr, header->dlen, header->dcrc); if ((rval = oct_nand_validate_image(nand_addr)) < 0) { printf("Invalid image(%d).\n", rval); } else { printf ("Valid image.\n"); if (header_copy.flags & BL_HEADER_FLAG_FAILSAFE) printf("Failsafe"); else printf("Normal"); if (header_copy.image_type == BL_HEADER_IMAGE_STAGE2) printf("NAND stage2 "); else if (header_copy.image_type == BL_HEADER_IMAGE_STAGE3) printf("NAND stage3 "); else if (header_copy.image_type == BL_HEADER_IMAGE_UBOOT_ENV) printf("U-boot environment (version: %d)\n", (int)header_copy.address); if (header_copy.board_type) printf("image for %s board\n", cvmx_board_type_to_string(header_copy.board_type)); else printf("\n"); /* Skip the image, as we * have validated it */ nand_addr += ((header->dlen + page_size - 1) & ~(page_size - 1)) - page_size; } } } } else { /* Uncorrectable errors detected. */ /* Check to see if we have a bad block */ if (oct_nand_block_is_bad(chip, nand_addr, 1)) { printf ("Bad NAND block at addr: 0x%llx\n", nand_addr & ~(block_size - 1)); /* Skip rest of block */ nand_addr += block_size - (nand_addr & (block_size - 1)) - page_size; } } } printf("\n"); return 0; } usage: printf("Usage:\n%s\n", cmdtp->usage); return 1; }
/* Validates the header that is at nand_addr, and returns the ** size in bytes. If invalid: ** -1 : wrong magic ** -2 : header CRC incorrect ** -3 : data CRC incorrect ** -4 : uncorrectable ECC error ** -5 : NAND read error */ int oct_nand_validate_image(u64 nand_addr) { int errors = 0; int read_size = CVMX_NAND_BOOT_ECC_BLOCK_SIZE + CVMX_NAND_BOOT_ECC_ECC_SIZE; int bytes; unsigned char *buf = (unsigned char *)buf_storage; const bootloader_header_t *header = (const bootloader_header_t *)buf; u32 dcrc; u32 crc; int page_size = cvmx_nand_get_page_size(chip); int data_length; int crc_bytes_remaining; int crc_skip; /* Read first block, which should contain header */ bytes = cvmx_nand_page_read(chip, nand_addr, cvmx_ptr_to_phys(buf), read_size); if (cvmx_nand_correct_boot_ecc(buf) > 1) return OCT_NAND_ECC_UNCOR; if ((header->magic != BOOTLOADER_HEADER_MAGIC)) return OCT_NAND_BAD_MAGIC; crc = crc32(0, (void *)header, offsetof(bootloader_header_t, hcrc)); crc = crc32(crc, (void *)header + offsetof(bootloader_header_t, hlen), header->hlen - offsetof(bootloader_header_t, hlen)); if (crc != header->hcrc) return OCT_NAND_BAD_HCRC; /* Save data crc for later use */ dcrc = header->dcrc; /* Data starts at nand_addr + header->hlen */ nand_addr += header->hlen; /* We need to always read CVMX_NAND_BOOT_ECC_BLOCK_SIZE size blocks so * that we can do ECC. * The data may start at an offset into this first block */ data_length = crc_bytes_remaining = header->dlen; crc_skip = nand_addr & (CVMX_NAND_BOOT_ECC_BLOCK_SIZE - 1); /* Start reading at an aligned address */ nand_addr &= ~(CVMX_NAND_BOOT_ECC_BLOCK_SIZE - 1); crc = 0; for (; crc_bytes_remaining > 0; nand_addr += CVMX_NAND_BOOT_ECC_BLOCK_SIZE) { /* Adjust the read address based on location in page */ int offset = ((nand_addr & (page_size - 1)) / CVMX_NAND_BOOT_ECC_BLOCK_SIZE) * CVMX_NAND_BOOT_ECC_ECC_SIZE; bytes = cvmx_nand_page_read(chip, nand_addr + offset, cvmx_ptr_to_phys(buf), read_size); if (bytes != read_size) { return OCT_NAND_READ_ERROR; } errors = cvmx_nand_correct_boot_ecc(buf); if (errors <= 1) { if (errors == 1) debug_printf("Correctable ECC error at NAND " "addr: 0x%llx\n", nand_addr + offset); crc = crc32(crc, (void *)buf + crc_skip, MIN(CVMX_NAND_BOOT_ECC_BLOCK_SIZE - crc_skip, crc_bytes_remaining)); crc_bytes_remaining -= CVMX_NAND_BOOT_ECC_BLOCK_SIZE - crc_skip; if (crc_skip) crc_skip = 0; } else { debug_printf("Uncorrectable ECC error at NAND " "addr: 0x%llx\n", nand_addr + offset); return OCT_NAND_ECC_UNCOR; } } if (dcrc != crc) return OCT_NAND_BAD_DCRC; return data_length; }
/* Writes to NAND using Octeon NAND-boot ECC */ oct_nand_status_t oct_nand_boot_write(u64 nand_addr, void *data, int len, u32 flags) { u64 tmp_addr; int input_remaining; u64 buf_storage[CVMX_NAND_MAX_PAGE_AND_OOB_SIZE / 8]; unsigned char *buf = (unsigned char *)buf_storage; int page_size = cvmx_nand_get_page_size(chip); int oob_size = octeon_nand_get_oob_size(chip); if (nand_addr & (page_size - 1)) { printf("ERROR: write start must be page aligned.\n"); return OCT_NAND_BAD_PARAM; } /* Round length up to page size */ len = (len + page_size - 1) & ~(page_size - 1); /* First check to make sure all pages are erased. We don't count the * OOB data against the length supplied, as that is where the ECC data * will go. BAD blocks are returned as !blank, so we will also skip * bad blocks */ for (tmp_addr = nand_addr; tmp_addr < nand_addr + len; tmp_addr += page_size) { if (oct_nand_block_is_bad(chip, tmp_addr, 1)) { printf("ERROR: Block containing addr 0x%llx is bad, " "unable to write to page\n", tmp_addr); return OCT_NAND_ERROR; } if (!oct_nand_page_is_blank(tmp_addr)) { printf("ERROR: page at addr 0x%llx is not blank, " "unable to write to page\n", tmp_addr); return OCT_NAND_ERROR; } } /* Now we have verified that the required flash is erased (and not bad), * now generate the ECC and write the data to the flash. */ int offset; /* offset within NAND page while computing ECC * (may limit size of final write) */ input_remaining = len; for (tmp_addr = nand_addr; tmp_addr < nand_addr + len; tmp_addr += page_size) { u64 buf1_storage[CVMX_NAND_MAX_PAGE_AND_OOB_SIZE / 8]; unsigned char *buf1 = (unsigned char *)buf1_storage; memset(buf, 0, sizeof(buf)); offset = 0; while (input_remaining > 0 && offset <= (page_size + oob_size - CVMX_NAND_BOOT_ECC_BLOCK_SIZE - CVMX_NAND_BOOT_ECC_ECC_SIZE)) { memcpy(buf + offset, data, MIN(CVMX_NAND_BOOT_ECC_BLOCK_SIZE, input_remaining)); cvmx_nand_compute_boot_ecc(buf + offset, buf + offset + CVMX_NAND_BOOT_ECC_BLOCK_SIZE); offset += CVMX_NAND_BOOT_ECC_BLOCK_SIZE + CVMX_NAND_BOOT_ECC_ECC_SIZE; data += CVMX_NAND_BOOT_ECC_BLOCK_SIZE; input_remaining -= CVMX_NAND_BOOT_ECC_BLOCK_SIZE; } /* We have filled up a page, so write it */ if (cvmx_nand_page_write(chip, tmp_addr, cvmx_ptr_to_phys(buf))) { printf("cvmx_nand_page_write() failed, address " "0x%08llx\n", tmp_addr); return OCT_NAND_WRITE_ERROR; } /* Now read the block to make sure it matches */ if (cvmx_nand_page_read(chip, tmp_addr, cvmx_ptr_to_phys(buf1), offset) != offset) { printf("ERROR reading back NAND to verify write.\n"); return OCT_NAND_READ_ERROR; } if (memcmp(buf, buf1, offset)) { printf("ERROR: write verify failed.\n"); return OCT_NAND_WRITE_ERROR; } } return OCT_NAND_SUCCESS; }
/** * Checks to see if address is part of a bad block. * This does the 'standard' bad block checks, but also takes * into account the Octeon NAND boot ECC format. Most blocks * with this format will appear to be bad, so we do extra checks * for blocks that fail the standard bad block test. If a 'bad' block * passes Octeon ECC, then it is treated as good. * One complication is that some bad blocks are all 0x00 bytes, which * also happens to be a valid Octeon ECC encoding. This special case is * checked for, and treated as a bad block. * * @param chip_select * Chip select for operation * @param nand_addr Address of block to check. (Any address within block is * OK.) * @param check_octeon_ecc * Do extra checks for Octeon ECC. Without these checks, many * Octeon NAND boot blocks will be reported as bad. * * @return 1 if bad * 0 if OK */ int oct_nand_block_is_bad(int chip_select, uint64_t nand_addr, int check_octeon_ecc) { /* Cache the results of the most recent check, * so that we can answer quickly about all pages in * a block. */ static u64 cached_block_addr = ~0ull; static int cached_block_is_bad = 0; unsigned char *buf = (unsigned char *)buf_storage; int page_size = cvmx_nand_get_page_size(chip_select); int pages_per_block = cvmx_nand_get_pages_per_block(chip_select); int block_size = page_size * pages_per_block; int read_size = page_size + octeon_nand_get_oob_size(chip_select); int block_is_bad = 0; int bytes; int i; int page_to_check; u64 nand_addr_read; /* Set the bad block position */ int bad_block_pos = page_size + (page_size > 512 ? NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS); /* Since NAND is marked bad in terms of blocks, we only * care about what block the supplied address is in */ nand_addr &= ~(block_size - 1); /* Return cached value if we have a match */ if (nand_addr == cached_block_addr) return cached_block_is_bad; nand_addr_read = nand_addr; for (page_to_check = 0; page_to_check < 2; page_to_check++) { /* Check first and last pages of block */ bytes = cvmx_nand_page_read(chip_select, nand_addr_read, cvmx_ptr_to_phys(buf), read_size); if (bytes != read_size) { printf("ERROR: %s: error reading NAND.\n", __FUNCTION__); return 1; } if (buf[bad_block_pos] != 0xFF) block_is_bad = 1; if (block_is_bad) { if (check_octeon_ecc) { /* Check to see if there is a valid Octeon ECC * encoded block at the beginning of the page. * If so, the block is good, otherwise it is * bad */ if (!cvmx_nand_correct_boot_ecc(buf)) { /* No ECC errors detected */ /* Some bad blocks are set to all 0x00, * including OOB. This meets all bad * block marking requirements, but is * also a valid Octeon BOOT bus * encoding. Treat this as a bad block. */ for (i = 0; i < read_size; i++) if (buf[i]) break; if (i == read_size) { block_is_bad = 1; goto page_is_bad_exit; } block_is_bad = 0; } } else goto page_is_bad_exit; } nand_addr_read += block_size - page_size; } page_is_bad_exit: /* Cache most recent lookup so looking up pages in same block is fast */ cached_block_addr = nand_addr; cached_block_is_bad = block_is_bad; return block_is_bad; }
/** * Packet transmit * * @param m Packet to send * @param dev Device info structure * @return Always returns zero */ int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp) { cvmx_pko_command_word0_t pko_command; cvmx_buf_ptr_t hw_buffer; int dropped; int qos; cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; int32_t in_use; int32_t buffers_to_free; cvmx_wqe_t *work; /* Prefetch the private data structure. It is larger that one cache line */ CVMX_PREFETCH(priv, 0); /* Start off assuming no drop */ dropped = 0; /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely remove "qos" in the event neither interface supports multiple queues per port */ if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { qos = GET_MBUF_QOS(m); if (qos <= 0) qos = 0; else if (qos >= cvmx_pko_get_num_queues(priv->port)) qos = 0; } else qos = 0; /* The CN3XXX series of parts has an errata (GMX-401) which causes the GMX block to hang if a collision occurs towards the end of a <68 byte packet. As a workaround for this, we pad packets to be 68 bytes whenever we are in half duplex mode. We don't handle the case of having a small packet but no room to add the padding. The kernel should always give us at least a cache line */ if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { cvmx_gmxx_prtx_cfg_t gmx_prt_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); if (interface < 2) { /* We only need to pad packet in half duplex mode */ gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); if (gmx_prt_cfg.s.duplex == 0) { static uint8_t pad[64]; if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad)) printf("%s: unable to padd small packet.", __func__); } } } #ifdef OCTEON_VENDOR_RADISYS /* * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes. */ if (__predict_false(m->m_pkthdr.len < 60) && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) { static uint8_t pad[60]; if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad)) printf("%s: unable to pad small packet.", __func__); } #endif /* * If the packet is not fragmented. */ if (m->m_pkthdr.len == m->m_len) { /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data); hw_buffer.s.pool = 0; hw_buffer.s.size = m->m_len; /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.segs = 1; pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA. */ work = NULL; } else { struct mbuf *n; unsigned segs; uint64_t *gp; /* * The packet is fragmented, we need to send a list of segments * in memory we borrow from the WQE pool. */ work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); if (work == NULL) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return 1; } segs = 0; gp = (uint64_t *)work; for (n = m; n != NULL; n = n->m_next) { if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t)) panic("%s: too many segments in packet; call m_collapse().", __func__); /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; hw_buffer.s.i = 1; /* Do not put this buffer into the FPA. */ hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data); hw_buffer.s.pool = 0; hw_buffer.s.size = n->m_len; *gp++ = hw_buffer.u64; segs++; } /* Build the PKO buffer gather list pointer */ hw_buffer.u64 = 0; hw_buffer.s.addr = cvmx_ptr_to_phys(work); hw_buffer.s.pool = CVMX_FPA_WQE_POOL; hw_buffer.s.size = segs; /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.segs = segs; pko_command.s.gather = 1; pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA. */ } /* Finish building the PKO command */ pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ pko_command.s.reg0 = priv->fau+qos*4; pko_command.s.total_bytes = m->m_pkthdr.len; pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; pko_command.s.subone0 = 1; /* Check if we can use the hardware checksumming */ if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) { /* Use hardware checksum calc */ pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1; } /* * XXX * Could use a different free queue (and different FAU address) per * core instead of per QoS, to reduce contention here. */ IF_LOCK(&priv->tx_free_queue[qos]); /* Get the number of mbufs in use by the hardware */ in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1); buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE); /* Drop this packet if we have too many already queued to the HW */ if (_IF_QFULL(&priv->tx_free_queue[qos])) { dropped = 1; } /* Send the packet to the output queue */ else if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) { DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp)); dropped = 1; } if (__predict_false(dropped)) { m_freem(m); cvmx_fau_atomic_add32(priv->fau+qos*4, -1); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { /* Put this packet on the queue to be freed later */ _IF_ENQUEUE(&priv->tx_free_queue[qos], m); /* Pass it to any BPF listeners. */ ETHER_BPF_MTAP(ifp, m); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); } /* Free mbufs not in use by the hardware */ if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) { while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) { _IF_DEQUEUE(&priv->tx_free_queue[qos], m); m_freem(m); } } IF_UNLOCK(&priv->tx_free_queue[qos]); return dropped; }
void oct_tx_process_mbuf(mbuf_t *mbuf, uint8_t port) { uint64_t queue; cvmx_pko_return_value_t send_status; if(port > OCT_PHY_PORT_MAX) { printf("Send port is invalid"); PACKET_DESTROY_ALL(mbuf); STAT_TX_SEND_PORT_ERR; return; } queue = cvmx_pko_get_base_queue(port); cvmx_pko_send_packet_prepare(port, queue, CVMX_PKO_LOCK_CMD_QUEUE); if(PKTBUF_IS_HW(mbuf)) { /* Build a PKO pointer to this packet */ cvmx_pko_command_word0_t pko_command; pko_command.u64 = 0; pko_command.s.segs = 1; pko_command.s.total_bytes = mbuf->pkt_totallen; /* Send the packet */ send_status = cvmx_pko_send_packet_finish(port, queue, pko_command, mbuf->packet_ptr, CVMX_PKO_LOCK_CMD_QUEUE); if (send_status != CVMX_PKO_SUCCESS) { printf("Failed to send packet using cvmx_pko_send_packet2\n"); STAT_TX_HW_SEND_ERR; PACKET_DESTROY_DATA(mbuf); } MBUF_FREE(mbuf); } else if(PKTBUF_IS_SW(mbuf)) { uint8_t *dont_free_cookie = NULL; tx_done_t *tx_done = &(oct_stx[local_cpu_id]->tx_done[port]); if(tx_done->tx_entries < (OCT_PKO_TX_DESC_NUM - 1)) { dont_free_cookie = oct_pend_tx_done_add(tx_done, (void *)mbuf); } else { PACKET_DESTROY_ALL(mbuf); STAT_TX_SW_DESC_ERR; return; } /*command word0*/ cvmx_pko_command_word0_t pko_command; pko_command.u64 = 0; pko_command.s.segs = 1; pko_command.s.total_bytes = mbuf->pkt_totallen; pko_command.s.rsp = 1; pko_command.s.dontfree = 1; /*command word1*/ cvmx_buf_ptr_t packet; packet.u64 = 0; packet.s.size = mbuf->pkt_totallen; packet.s.addr = (uint64_t)mbuf->pkt_ptr; /*command word2*/ cvmx_pko_command_word2_t tx_ptr_word; tx_ptr_word.u64 = 0; tx_ptr_word.s.ptr = (uint64_t)cvmx_ptr_to_phys(dont_free_cookie); /* Send the packet */ send_status = cvmx_pko_send_packet_finish3(port, queue, pko_command, packet, tx_ptr_word.u64, CVMX_PKO_LOCK_CMD_QUEUE); if(send_status != CVMX_PKO_SUCCESS) { if(dont_free_cookie) { oct_pend_tx_done_remove(tx_done); } printf("Failed to send packet using cvmx_pko_send_packet3\n"); PACKET_DESTROY_ALL(mbuf); STAT_TX_SW_SEND_ERR; return; } } else { printf("pkt space %d is wrong, please check it\n", PKTBUF_SPACE_GET(mbuf)); } STAT_TX_SEND_OVER; }