static void set_SPI_registers(void) { volatile uint16_t spi_API = 0; AJ_AlwaysPrintf(("\n\n**************\nTEST: %s\n\n", __FUNCTION__)); // reset the target // spi_API = (1 << 15); // AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_SPI_CONFIG, spi_API); // AJ_Sleep(1 << 22); // one extra write to force the device out of the reset state. spi_API = 0x80; AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_SPI_CONFIG, spi_API); AJ_Sleep(100); // write spi_API = 0x80; // same as capture AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_SPI_CONFIG, spi_API); // AJ_InfoPrintf(("AJ_WSL_SPI_REG_SPI_CONFIG was %04x\n", spi_API)); AJ_WSL_SPI_RegisterRead(AJ_WSL_SPI_REG_WRBUF_SPC_AVA, (uint8_t*)&spi_API); spi_API = LE16_TO_CPU(spi_API); AJ_InfoPrintf(("AJ_WSL_SPI_REG_WRBUF_SPC_AVA was %04x\n", spi_API)); spi_API = 0x40; // same as capture AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_WRBUF_WATERMARK, spi_API); // AJ_InfoPrintf(("AJ_WSL_SPI_REG_SPI_CONFIG was %04x\n", spi_API)); spi_API = 0x400; // same as capture AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_CAUSE, spi_API); AJ_InfoPrintf(("AJ_WSL_SPI_REG_INTR_CAUSE was %04x\n", spi_API)); spi_API = 0x3ff; AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_ENABLE, spi_API); AJ_InfoPrintf(("AJ_WSL_SPI_REG_INTR_ENABLE was %04x\n", spi_API)); spi_API = 0x0e; AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_ENABLE, spi_API); AJ_InfoPrintf(("AJ_WSL_SPI_REG_INTR_ENABLE was %04x\n", spi_API)); spi_API = 0x1e; AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_ENABLE, spi_API); AJ_InfoPrintf(("AJ_WSL_SPI_REG_INTR_ENABLE was %04x\n", spi_API)); spi_API = 0x0; AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_ENABLE, spi_API); AJ_InfoPrintf(("AJ_WSL_SPI_REG_INTR_ENABLE was %04x\n", spi_API)); spi_API = 0x1e; AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_ENABLE, spi_API); AJ_InfoPrintf(("AJ_WSL_SPI_REG_INTR_ENABLE was %04x\n", spi_API)); AJ_Sleep(100); }
void AJ_WSL_HTC_ProcessInterruptCause(void) { uint16_t cause = 0; AJ_Status status = AJ_ERR_SPI_READ; status = AJ_WSL_SPI_RegisterRead(AJ_WSL_SPI_REG_INTR_CAUSE, (uint8_t*)&cause); AJ_ASSERT(status == AJ_OK); cause = LE16_TO_CPU(cause); if (cause & AJ_WSL_SPI_REG_INTR_CAUSE_DATA_AVAILABLE) { AJ_WSL_HTC_ProcessIncoming(); cause = cause ^ AJ_WSL_SPI_REG_INTR_CAUSE_DATA_AVAILABLE; //clear the bit } if (cause & AJ_WSL_SPI_REG_INTR_CAUSE_READ_DONE) { uint16_t clearCause = CPU_TO_LE16(AJ_WSL_SPI_REG_INTR_CAUSE_READ_DONE); status = AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_CAUSE, clearCause); AJ_ASSERT(status == AJ_OK); cause = cause ^ AJ_WSL_SPI_REG_INTR_CAUSE_READ_DONE; } if (cause & AJ_WSL_SPI_REG_INTR_CAUSE_WRITE_DONE) { uint16_t clearCause = CPU_TO_LE16(AJ_WSL_SPI_REG_INTR_CAUSE_WRITE_DONE); status = AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_CAUSE, clearCause); AJ_ASSERT(status == AJ_OK); cause = cause ^ AJ_WSL_SPI_REG_INTR_CAUSE_WRITE_DONE; } if (cause & AJ_WSL_SPI_REG_INTR_CAUSE_CPU_AWAKE) { uint16_t clearCause = CPU_TO_LE16(AJ_WSL_SPI_REG_INTR_CAUSE_CPU_AWAKE); status = AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_CAUSE, clearCause); AJ_ASSERT(status == AJ_OK); cause = cause ^ AJ_WSL_SPI_REG_INTR_CAUSE_CPU_AWAKE; } if (cause & AJ_WSL_SPI_REG_INTR_CAUSE_COUNTER) { uint16_t clearCause = CPU_TO_LE16(AJ_WSL_SPI_REG_INTR_CAUSE_COUNTER); status = AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_CAUSE, clearCause); AJ_ASSERT(status == AJ_OK); cause = cause ^ AJ_WSL_SPI_REG_INTR_CAUSE_COUNTER; } if (cause & ~AJ_WSL_SPI_REG_INTR_CAUSE_DATA_AVAILABLE) { //AJ_InfoPrintf(("Some other interrupt cause as well %x\n", cause)); } }
/** * i40e_read_word - read HMC context word into struct * @hmc_bits: pointer to the HMC memory * @ce_info: a description of the struct to be filled * @dest: the struct to be filled **/ static void i40e_read_word(u8 *hmc_bits, struct i40e_context_ele *ce_info, u8 *dest) { u16 dest_word, mask; u8 *src, *target; u16 shift_width; __le16 src_word; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = ((u16)1 << ce_info->width) - 1; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = hmc_bits + (ce_info->lsb / 8); i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_word &= ~(CPU_TO_LE16(mask)); /* get the data back into host order before shifting */ dest_word = LE16_TO_CPU(src_word); dest_word >>= shift_width; /* get the address from the struct field */ target = dest + ce_info->offset; /* put it back in the struct */ i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA); }
/** * i40e_asq_send_command - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands * @cmd_details: pointer to command details structure * * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { enum i40e_status_code status = I40E_SUCCESS; struct i40e_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; struct i40e_aq_desc *desc_on_ring; bool cmd_completed = false; u16 retval = 0; u32 val = 0; i40e_acquire_spinlock(&hw->aq.asq_spinlock); hw->aq.asq_last_status = I40E_AQ_RC_OK; if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Admin queue not initialized.\n"); status = I40E_ERR_QUEUE_EMPTY; goto asq_send_command_error; } val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); status = I40E_ERR_QUEUE_EMPTY; goto asq_send_command_error; } details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { i40e_memcpy(details, cmd_details, sizeof(struct i40e_asq_cmd_details), I40E_NONDMA_TO_NONDMA); /* If the cmd_details are defined copy the cookie. The * CPU_TO_LE32 is not needed here because the data is ignored * by the FW, only used by the driver */ if (details->cookie) { desc->cookie_high = CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); desc->cookie_low = CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); } } else { i40e_memset(details, 0, sizeof(struct i40e_asq_cmd_details), I40E_NONDMA_MEM); } /* clear requested flags and then set additional flags if defined */ desc->flags &= ~CPU_TO_LE16(details->flags_dis); desc->flags |= CPU_TO_LE16(details->flags_ena); if (buff_size > hw->aq.asq_buf_size) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Invalid buffer size: %d.\n", buff_size); status = I40E_ERR_INVALID_SIZE; goto asq_send_command_error; } if (details->postpone && !details->async) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Async flag not set along with postpone flag"); status = I40E_ERR_PARAM; goto asq_send_command_error; } /* call clean and check queue available function to reclaim the * descriptors that were processed by FW, the function returns the * number of desc available */ /* the clean function called here could be called in a separate thread * in case of asynchronous completions */ if (i40e_clean_asq(hw) == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Error queue is full.\n"); status = I40E_ERR_ADMIN_QUEUE_FULL; goto asq_send_command_error; } /* initialize the temp desc pointer with the right desc */ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); /* if the desc is available copy the temp desc to the right place */ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA); /* if buff is not NULL assume indirect command */ if (buff != NULL) { dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); /* copy the user buff into the respective DMA buff */ i40e_memcpy(dma_buff->va, buff, buff_size, I40E_NONDMA_TO_DMA); desc_on_ring->datalen = CPU_TO_LE16(buff_size); /* Update the address values in the desc with the pa value * for respective buffer */ desc_on_ring->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); desc_on_ring->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); } /* bump the tail */ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back */ if (!details->async && !details->postpone) { u32 total_delay = 0; do { /* AQ designers suggest use of head for better * timing reliability than DD bit */ if (i40e_asq_done(hw)) break; i40e_usec_delay(50); total_delay += 50; } while (total_delay < hw->aq.asq_cmd_timeout); } /* if ready, copy the desc back to temp */ if (i40e_asq_done(hw)) { i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); if (buff != NULL) i40e_memcpy(buff, dma_buff->va, buff_size, I40E_DMA_TO_NONDMA); retval = LE16_TO_CPU(desc->retval); if (retval != 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Command completed with error 0x%X.\n", retval); /* strip off FW internal code */ retval &= 0xff; } cmd_completed = true; if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) status = I40E_SUCCESS; else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) status = I40E_ERR_NOT_READY; else status = I40E_ERR_ADMIN_QUEUE_ERROR; hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* save writeback aq if requested */ if (details->wb_desc) i40e_memcpy(details->wb_desc, desc_on_ring, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { #ifdef PF_DRIVER if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { #else if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) { #endif i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; } else { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Writeback timeout.\n"); status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; } } asq_send_command_error: i40e_release_spinlock(&hw->aq.asq_spinlock); return status; } /** * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) * @opcode: the opcode can be used to decide which flags to turn off or on * * Fill the desc with default values **/ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) { /* zero out the desc */ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_NONDMA_MEM); desc->opcode = CPU_TO_LE16(opcode); desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); }
/** * i40e_clean_arq_element * @hw: pointer to the hw struct * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process * * This function cleans one Admin Receive Queue element and returns * the contents through e. It can also return how many events are * left to process through 'pending' **/ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *pending) { enum i40e_status_code ret_code = I40E_SUCCESS; u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; u16 desc_idx; u16 datalen; u16 flags; u16 ntu; /* pre-clean the event info */ i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM); /* take the lock before we start messing with the ring */ i40e_acquire_spinlock(&hw->aq.arq_spinlock); if (hw->aq.arq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Admin queue not initialized.\n"); ret_code = I40E_ERR_QUEUE_EMPTY; goto clean_arq_element_err; } /* set next_to_use to head */ #ifdef INTEGRATED_VF if (!i40e_is_vf(hw)) ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; else ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; #else #ifdef PF_DRIVER ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; #endif /* PF_DRIVER */ #ifdef VF_DRIVER ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; #endif /* VF_DRIVER */ #endif /* INTEGRATED_VF */ if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } /* now clean the next descriptor */ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); desc_idx = ntc; hw->aq.arq_last_status = (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); flags = LE16_TO_CPU(desc->flags); if (flags & I40E_AQ_FLAG_ERR) { ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Event received with error 0x%X.\n", hw->aq.arq_last_status); } i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); datalen = LE16_TO_CPU(desc->datalen); e->msg_len = min(datalen, e->buf_len); if (e->msg_buf != NULL && (e->msg_len != 0)) i40e_memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_len, I40E_DMA_TO_NONDMA); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message * size */ bi = &hw->aq.arq.r.arq_bi[ntc]; i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); desc->datalen = CPU_TO_LE16((u16)bi->size); desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); /* set tail = the last cleaned desc index. */ wr32(hw, hw->aq.arq.tail, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) ntc = 0; hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; #ifdef PF_DRIVER i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc); #endif /* PF_DRIVER */ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); clean_arq_element_err: i40e_release_spinlock(&hw->aq.arq_spinlock); return ret_code; }
uint8_t HID_Host_ConfigurePipes(USB_ClassInfo_HID_Host_t* const HIDInterfaceInfo, uint16_t ConfigDescriptorSize, void* ConfigDescriptorData) { USB_Descriptor_Endpoint_t* DataINEndpoint = NULL; USB_Descriptor_Endpoint_t* DataOUTEndpoint = NULL; USB_Descriptor_Interface_t* HIDInterface = NULL; USB_HID_Descriptor_HID_t* HIDDescriptor = NULL; memset(&HIDInterfaceInfo->State, 0x00, sizeof(HIDInterfaceInfo->State)); if (DESCRIPTOR_TYPE(ConfigDescriptorData) != DTYPE_Configuration) return HID_ENUMERROR_InvalidConfigDescriptor; while (!(DataINEndpoint) || !(DataOUTEndpoint)) { if (!(HIDInterface) || USB_GetNextDescriptorComp(&ConfigDescriptorSize, &ConfigDescriptorData, DCOMP_HID_Host_NextHIDInterfaceEndpoint) != DESCRIPTOR_SEARCH_COMP_Found) { if (DataINEndpoint || DataOUTEndpoint) break; do { if (USB_GetNextDescriptorComp(&ConfigDescriptorSize, &ConfigDescriptorData, DCOMP_HID_Host_NextHIDInterface) != DESCRIPTOR_SEARCH_COMP_Found) { return HID_ENUMERROR_NoCompatibleInterfaceFound; } HIDInterface = DESCRIPTOR_PCAST(ConfigDescriptorData, USB_Descriptor_Interface_t); } while (HIDInterfaceInfo->Config.HIDInterfaceProtocol && (HIDInterface->Protocol != HIDInterfaceInfo->Config.HIDInterfaceProtocol)); if (USB_GetNextDescriptorComp(&ConfigDescriptorSize, &ConfigDescriptorData, DCOMP_HID_Host_NextHIDDescriptor) != DESCRIPTOR_SEARCH_COMP_Found) { return HID_ENUMERROR_NoCompatibleInterfaceFound; } HIDDescriptor = DESCRIPTOR_PCAST(ConfigDescriptorData, USB_HID_Descriptor_HID_t); DataINEndpoint = NULL; DataOUTEndpoint = NULL; continue; } USB_Descriptor_Endpoint_t* EndpointData = DESCRIPTOR_PCAST(ConfigDescriptorData, USB_Descriptor_Endpoint_t); if ((EndpointData->EndpointAddress & ENDPOINT_DIR_MASK) == ENDPOINT_DIR_IN) DataINEndpoint = EndpointData; else DataOUTEndpoint = EndpointData; } HIDInterfaceInfo->Config.DataINPipe.Size = le16_to_cpu(DataINEndpoint->EndpointSize); HIDInterfaceInfo->Config.DataINPipe.EndpointAddress = DataINEndpoint->EndpointAddress; HIDInterfaceInfo->Config.DataINPipe.Type = EP_TYPE_INTERRUPT; HIDInterfaceInfo->Config.DataOUTPipe.Size = le16_to_cpu(DataOUTEndpoint->EndpointSize); HIDInterfaceInfo->Config.DataOUTPipe.EndpointAddress = DataOUTEndpoint->EndpointAddress; HIDInterfaceInfo->Config.DataOUTPipe.Type = EP_TYPE_INTERRUPT; if (!(Pipe_ConfigurePipeTable(&HIDInterfaceInfo->Config.DataINPipe, 1))) return false; if (!(Pipe_ConfigurePipeTable(&HIDInterfaceInfo->Config.DataOUTPipe, 1))) return false; HIDInterfaceInfo->State.InterfaceNumber = HIDInterface->InterfaceNumber; HIDInterfaceInfo->State.HIDReportSize = LE16_TO_CPU(HIDDescriptor->HIDReportLength); HIDInterfaceInfo->State.SupportsBootProtocol = (HIDInterface->SubClass != HID_CSCP_NonBootProtocol); HIDInterfaceInfo->State.LargestReportSize = 8; HIDInterfaceInfo->State.IsActive = true; return HID_ENUMERROR_NoError; }
AJ_Status AJ_WSL_SPI_HostControlRegisterRead(uint32_t targetRegister, uint8_t increment, uint16_t cbLen, uint8_t* spi_data) { aj_spi_status rc; wsl_spi_command send; AJ_Status status = AJ_ERR_SPI_WRITE; uint8_t pcs = AJ_WSL_SPI_PCS; // write the size AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_HOST_CTRL_BYTE_SIZE, cbLen | (increment ? 0 : 0x40)); // now send the host_control_config register update { uint16_t externalRegister = 0; externalRegister = (1 << 15) | (0 << 14) | (targetRegister); // external access, write, counter dec externalRegister = CPU_TO_LE16(externalRegister); AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_HOST_CTRL_CONFIG, externalRegister); } // get the spi status { uint16_t spi_16 = 0; AJ_WSL_SPI_RegisterRead(AJ_WSL_SPI_REG_SPI_STATUS, (uint8_t*)&spi_16); spi_16 = LE16_TO_CPU(spi_16); } // initialize an SPI CMD structure with the register of interest send.cmd_rx = AJ_WSL_SPI_READ; send.cmd_reg = AJ_WSL_SPI_INTERNAL; send.cmd_addr = AJ_WSL_SPI_REG_HOST_CTRL_RD_PORT; // write the register, one byte at a time, in the right order rc = AJ_SPI_WRITE(AJ_WSL_SPI_DEVICE, *((uint8_t*)&send + 1), AJ_WSL_SPI_PCS, AJ_WSL_SPI_CONTINUE); AJ_ASSERT(rc == SPI_OK); rc = AJ_SPI_READ(AJ_WSL_SPI_DEVICE, spi_data, &pcs); // toss. AJ_ASSERT(rc == SPI_OK); rc = AJ_SPI_WRITE(AJ_WSL_SPI_DEVICE, *(uint8_t*)&send, AJ_WSL_SPI_PCS, AJ_WSL_SPI_END); AJ_ASSERT(rc == SPI_OK); rc = AJ_SPI_READ(AJ_WSL_SPI_DEVICE, spi_data, &pcs); // toss. AJ_ASSERT(rc == SPI_OK); // now, read the data back if (rc == SPI_OK) { while ((rc == SPI_OK) && (cbLen > 1)) { /* Test read: should return OK with what is sent. */ rc = AJ_SPI_WRITE(AJ_WSL_SPI_DEVICE, 0, AJ_WSL_SPI_PCS, AJ_WSL_SPI_CONTINUE); AJ_ASSERT(rc == SPI_OK); rc = AJ_SPI_READ(AJ_WSL_SPI_DEVICE, spi_data, &pcs); AJ_ASSERT(rc == SPI_OK); spi_data++; cbLen = cbLen - 1; } if (rc == SPI_OK) { rc = AJ_SPI_WRITE(AJ_WSL_SPI_DEVICE, 0, AJ_WSL_SPI_PCS, AJ_WSL_SPI_END); AJ_ASSERT(rc == SPI_OK); rc = AJ_SPI_READ(AJ_WSL_SPI_DEVICE, spi_data, &pcs); AJ_ASSERT(rc == SPI_OK); } if (rc == SPI_OK) { status = AJ_OK; } } // clear the rd/wr buffer interrupt { uint16_t spi_16 = 0x300; spi_16 = CPU_TO_LE16(spi_16); AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_CAUSE, spi_16); } return status; }
uint8_t HID_Host_ConfigurePipes(USB_ClassInfo_HID_Host_t* const HIDInterfaceInfo, uint16_t ConfigDescriptorSize, void* ConfigDescriptorData) { USB_Descriptor_Endpoint_t* DataINEndpoint = NULL; USB_Descriptor_Endpoint_t* DataOUTEndpoint = NULL; USB_Descriptor_Interface_t* HIDInterface = NULL; USB_HID_Descriptor_HID_t* HIDDescriptor = NULL; uint8_t portnum = HIDInterfaceInfo->Config.PortNumber; memset(&HIDInterfaceInfo->State, 0x00, sizeof(HIDInterfaceInfo->State)); if (DESCRIPTOR_TYPE(ConfigDescriptorData) != DTYPE_Configuration) return HID_ENUMERROR_InvalidConfigDescriptor; while (!(DataINEndpoint) || !(DataOUTEndpoint)) { if (!(HIDInterface) || USB_GetNextDescriptorComp(&ConfigDescriptorSize, &ConfigDescriptorData, DCOMP_HID_Host_NextHIDInterfaceEndpoint) != DESCRIPTOR_SEARCH_COMP_Found) { if (DataINEndpoint || DataOUTEndpoint) break; do { if (USB_GetNextDescriptorComp(&ConfigDescriptorSize, &ConfigDescriptorData, DCOMP_HID_Host_NextHIDInterface) != DESCRIPTOR_SEARCH_COMP_Found) { return HID_ENUMERROR_NoCompatibleInterfaceFound; } HIDInterface = DESCRIPTOR_PCAST(ConfigDescriptorData, USB_Descriptor_Interface_t); } while (HIDInterfaceInfo->Config.HIDInterfaceProtocol && (HIDInterface->Protocol != HIDInterfaceInfo->Config.HIDInterfaceProtocol)); if (USB_GetNextDescriptorComp(&ConfigDescriptorSize, &ConfigDescriptorData, DCOMP_HID_Host_NextHIDDescriptor) != DESCRIPTOR_SEARCH_COMP_Found) { return HID_ENUMERROR_NoCompatibleInterfaceFound; } HIDDescriptor = DESCRIPTOR_PCAST(ConfigDescriptorData, USB_HID_Descriptor_HID_t); DataINEndpoint = NULL; DataOUTEndpoint = NULL; continue; } USB_Descriptor_Endpoint_t* EndpointData = DESCRIPTOR_PCAST(ConfigDescriptorData, USB_Descriptor_Endpoint_t); if ((EndpointData->EndpointAddress & ENDPOINT_DIR_MASK) == ENDPOINT_DIR_IN) DataINEndpoint = EndpointData; else DataOUTEndpoint = EndpointData; } for (uint8_t PipeNum = 1; PipeNum < PIPE_TOTAL_PIPES; PipeNum++) { uint16_t Size; uint8_t Type; uint8_t Token; uint8_t EndpointAddress; uint8_t InterruptPeriod; bool DoubleBanked; if (PipeNum == HIDInterfaceInfo->Config.DataINPipeNumber) { Size = le16_to_cpu(DataINEndpoint->EndpointSize); EndpointAddress = DataINEndpoint->EndpointAddress; Token = PIPE_TOKEN_IN; Type = EP_TYPE_INTERRUPT; DoubleBanked = HIDInterfaceInfo->Config.DataINPipeDoubleBank; InterruptPeriod = DataINEndpoint->PollingIntervalMS; HIDInterfaceInfo->State.DataINPipeSize = DataINEndpoint->EndpointSize; } else if (PipeNum == HIDInterfaceInfo->Config.DataOUTPipeNumber) { if (DataOUTEndpoint == NULL) continue; Size = le16_to_cpu(DataOUTEndpoint->EndpointSize); EndpointAddress = DataOUTEndpoint->EndpointAddress; Token = PIPE_TOKEN_OUT; Type = EP_TYPE_INTERRUPT; DoubleBanked = HIDInterfaceInfo->Config.DataOUTPipeDoubleBank; InterruptPeriod = DataOUTEndpoint->PollingIntervalMS; HIDInterfaceInfo->State.DataOUTPipeSize = DataOUTEndpoint->EndpointSize; HIDInterfaceInfo->State.DeviceUsesOUTPipe = true; } else { continue; } if (!(Pipe_ConfigurePipe(portnum,PipeNum, Type, Token, EndpointAddress, Size, DoubleBanked ? PIPE_BANK_DOUBLE : PIPE_BANK_SINGLE))) { return HID_ENUMERROR_PipeConfigurationFailed; } if (InterruptPeriod) Pipe_SetInterruptPeriod(InterruptPeriod); } HIDInterfaceInfo->State.InterfaceNumber = HIDInterface->InterfaceNumber; HIDInterfaceInfo->State.HIDReportSize = LE16_TO_CPU(HIDDescriptor->HIDReportLength); HIDInterfaceInfo->State.SupportsBootProtocol = (HIDInterface->SubClass != HID_CSCP_NonBootProtocol); HIDInterfaceInfo->State.LargestReportSize = 8; HIDInterfaceInfo->State.IsActive = true; return HID_ENUMERROR_NoError; }
/** * i40e_cee_to_dcb_v1_config * @cee_cfg: pointer to CEE v1 response configuration struct * @dcbcfg: DCB configuration struct * * Convert CEE v1 configuration from firmware to DCB configuration **/ static void i40e_cee_to_dcb_v1_config( struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, struct i40e_dcbx_config *dcbcfg) { u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status); u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); u8 i, tc, err; /* CEE PG data to ETS config */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; /* Note that the FW creates the oper_prio_tc nibbles reversed * from those in the CEE Priority Group sub-TLV. */ for (i = 0; i < 4; i++) { tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_0_MASK) >> I40E_CEE_PGID_PRIO_0_SHIFT); dcbcfg->etscfg.prioritytable[i*2] = tc; tc = (u8)((cee_cfg->oper_prio_tc[i] & I40E_CEE_PGID_PRIO_1_MASK) >> I40E_CEE_PGID_PRIO_1_SHIFT); dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { /* Map it to next empty TC */ dcbcfg->etscfg.prioritytable[i] = cee_cfg->oper_num_tc - 1; dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; } else { dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; } } /* CEE PFC data to ETS config */ dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> I40E_AQC_CEE_APP_STATUS_SHIFT; err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; /* Add APPs if Error is False */ if (!err) { /* CEE operating configuration supports FCoE/iSCSI/FIP only */ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; /* FCoE APP */ dcbcfg->app[0].priority = (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> I40E_AQC_CEE_APP_FCOE_SHIFT; dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; /* iSCSI APP */ dcbcfg->app[1].priority = (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> I40E_AQC_CEE_APP_ISCSI_SHIFT; dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; /* FIP APP */ dcbcfg->app[2].priority = (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> I40E_AQC_CEE_APP_FIP_SHIFT; dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; }
static void write_BOOT_PARAM(void) { uint32_t spi_API = 0; uint16_t spi_API16 = 0; AJ_AlwaysPrintf(("\n\n**************\nTEST: %s\n\n", __FUNCTION__)); // read the clock speed value spi_API = 0x88888888; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ + 1, FALSE, 4, (uint8_t*)&spi_API); spi_API = 0x42424242; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ + 2, FALSE, 4, (uint8_t*)&spi_API); spi_API = 0x00000000; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ + 3, FALSE, 4, (uint8_t*)&spi_API); spi_API = AJ_WSL_SPI_TARGET_CLOCK_SPEED_ADDR; //0x00428878; spi_API = CPU_TO_LE32(spi_API); AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ, TRUE, 4, (uint8_t*)&spi_API); // now read back the value from the data port. AJ_WSL_SPI_HostControlRegisterRead(AJ_WSL_SPI_TARGET_VALUE, TRUE, 4, (uint8_t*)&spi_API); spi_API = LE32_TO_CPU(spi_API); //AJ_InfoPrintf(("cycles read back was %ld \n", spi_API)); // read the flash is present value { // let's try this dance of writing multiple times... spi_API = 0x88888888; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ + 1, FALSE, 4, (uint8_t*)&spi_API); spi_API = 0x42424242; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ + 2, FALSE, 4, (uint8_t*)&spi_API); spi_API = 0x00000000; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ + 3, FALSE, 4, (uint8_t*)&spi_API); spi_API = AJ_WSL_SPI_TARGET_FLASH_PRESENT_ADDR; //0x0042880C; spi_API = CPU_TO_LE32(spi_API); AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ, TRUE, 4, (uint8_t*)&spi_API); // now read back the value from the data port. AJ_WSL_SPI_HostControlRegisterRead(AJ_WSL_SPI_TARGET_VALUE, TRUE, 4, (uint8_t*)&spi_API); spi_API = LE32_TO_CPU(spi_API); //AJ_InfoPrintf(("host if flash is present read back was %ld \n", spi_API)); } // now write out the flash_is_present value spi_API = 0x00000002; spi_API = CPU_TO_LE32(spi_API); AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_VALUE, TRUE, 4, (uint8_t*)&spi_API); spi_API = 0x88888888; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_WRITE + 1, FALSE, 4, (uint8_t*)&spi_API); spi_API = 0x42424242; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_WRITE + 2, FALSE, 4, (uint8_t*)&spi_API); spi_API = 0x00000000; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_WRITE + 3, FALSE, 4, (uint8_t*)&spi_API); spi_API = AJ_WSL_SPI_TARGET_FLASH_PRESENT_ADDR; //0x0042880C; spi_API = CPU_TO_LE32(spi_API); AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_WRITE, TRUE, 4, (uint8_t*)&spi_API); // read the mbox block size spi_API = 0x88888888; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ + 1, FALSE, 4, (uint8_t*)&spi_API); spi_API = 0x42424242; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ + 2, FALSE, 4, (uint8_t*)&spi_API); spi_API = 0x00000000; AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ + 3, FALSE, 4, (uint8_t*)&spi_API); spi_API = AJ_WSL_SPI_TARGET_MBOX_BLOCKSZ_ADDR; //0x0042886C; spi_API = CPU_TO_LE32(spi_API); AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_TARGET_ADDR_READ, TRUE, 4, (uint8_t*)&spi_API); // now read back the value from the data port. AJ_WSL_SPI_HostControlRegisterRead(AJ_WSL_SPI_TARGET_VALUE, TRUE, 4, (uint8_t*)&spi_API); spi_API = LE32_TO_CPU(spi_API); AJ_WSL_MBOX_BLOCK_SIZE = spi_API; //AJ_InfoPrintf(("block size was %ld \n", spi_API)); spi_API16 = 0x001f; spi_API16 = CPU_TO_LE16(spi_API16); AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_ENABLE, spi_API16); // wait until the write has been processed. spi_API16 = 0; while (!(spi_API16 & 1)) { AJ_WSL_SPI_RegisterRead(AJ_WSL_SPI_REG_SPI_STATUS, (uint8_t*)&spi_API16); spi_API16 = LE16_TO_CPU(spi_API16); uint16_t space = 0; AJ_WSL_SPI_RegisterRead(AJ_WSL_SPI_REG_WRBUF_SPC_AVA, (uint8_t*)&space); } // clear the read and write interrupt cause register spi_API = (1 << 9) | (1 << 8); spi_API = CPU_TO_LE16(spi_API); AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_INTR_CAUSE, spi_API); { spi_API16 = 0x1; spi_API16 = CPU_TO_LE16(spi_API16); AJ_WSL_SPI_RegisterWrite(AJ_WSL_SPI_REG_HOST_CTRL_BYTE_SIZE, spi_API16); // waiting seems to allow the following write to succeed and thus enable interrupts. // we need something more deterministic. AJ_Sleep(1000); spi_API16 = 0x00FF; spi_API = CPU_TO_LE16(spi_API); AJ_WSL_SPI_HostControlRegisterWrite(AJ_WSL_SPI_CPU_INT_STATUS, FALSE, 1, (uint8_t*)&spi_API16); } }
/** * i40e_asq_send_command - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands * @cmd_details: pointer to command details structure * * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { #ifdef I40E_QV struct i40e_aq_desc qv_desc = {0}; struct i40e_aq_desc *qv_desc_on_ring; #endif /* I40E_QV */ enum i40e_status_code status = I40E_SUCCESS; struct i40e_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; struct i40e_aq_desc *desc_on_ring; bool cmd_completed = FALSE; u16 retval = 0; u32 val = 0; val = rd32(hw, hw->aq.asq.head); if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); status = I40E_ERR_QUEUE_EMPTY; goto asq_send_command_exit; } if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Admin queue not initialized.\n"); status = I40E_ERR_QUEUE_EMPTY; goto asq_send_command_exit; } if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n"); status = I40E_ERR_NVM; goto asq_send_command_exit; } details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { i40e_memcpy(details, cmd_details, sizeof(struct i40e_asq_cmd_details), I40E_NONDMA_TO_NONDMA); /* If the cmd_details are defined copy the cookie. The * CPU_TO_LE32 is not needed here because the data is ignored * by the FW, only used by the driver */ if (details->cookie) { desc->cookie_high = CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); desc->cookie_low = CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); } } else { i40e_memset(details, 0, sizeof(struct i40e_asq_cmd_details), I40E_NONDMA_MEM); } /* clear requested flags and then set additional flags if defined */ desc->flags &= ~CPU_TO_LE16(details->flags_dis); desc->flags |= CPU_TO_LE16(details->flags_ena); i40e_acquire_spinlock(&hw->aq.asq_spinlock); if (buff_size > hw->aq.asq_buf_size) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Invalid buffer size: %d.\n", buff_size); status = I40E_ERR_INVALID_SIZE; goto asq_send_command_error; } if (details->postpone && !details->async) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Async flag not set along with postpone flag"); status = I40E_ERR_PARAM; goto asq_send_command_error; } /* call clean and check queue available function to reclaim the * descriptors that were processed by FW, the function returns the * number of desc available */ /* the clean function called here could be called in a separate thread * in case of asynchronous completions */ if (i40e_clean_asq(hw) == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Error queue is full.\n"); status = I40E_ERR_ADMIN_QUEUE_FULL; goto asq_send_command_error; } /* initialize the temp desc pointer with the right desc */ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); /* if the desc is available copy the temp desc to the right place */ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA); #ifdef I40E_QV /* copy the descriptor from ring to userspace buffer */ i40e_memcpy(&qv_desc, desc_on_ring, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); qv_desc_on_ring = desc_on_ring; desc_on_ring = &qv_desc; #endif /* I40E_QV */ /* if buff is not NULL assume indirect command */ if (buff != NULL) { dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); /* copy the user buff into the respective DMA buff */ i40e_memcpy(dma_buff->va, buff, buff_size, I40E_NONDMA_TO_DMA); desc_on_ring->datalen = CPU_TO_LE16(buff_size); /* Update the address values in the desc with the pa value * for respective buffer */ desc_on_ring->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); desc_on_ring->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); #ifdef I40E_QV /* copy the descriptor from userspace buffer to ring */ i40e_memcpy(qv_desc_on_ring, desc_on_ring, sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA); #endif /* I40E_QV */ } /* bump the tail */ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back */ if (!details->async && !details->postpone) { u32 total_delay = 0; u32 delay_len = 10; do { #ifdef I40E_QV /* copy the descriptor from ring to user buffer */ i40e_memcpy(desc_on_ring, qv_desc_on_ring, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); #endif /* I40E_QV */ /* AQ designers suggest use of head for better * timing reliability than DD bit */ if (i40e_asq_done(hw)) break; /* ugh! delay while spin_lock */ i40e_usec_delay(delay_len); total_delay += delay_len; } while (total_delay < hw->aq.asq_cmd_timeout); } /* if ready, copy the desc back to temp */ if (i40e_asq_done(hw)) { #ifdef I40E_QV /* Swap pointer back */ desc_on_ring = qv_desc_on_ring; #endif /* I40E_QV */ i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); if (buff != NULL) i40e_memcpy(buff, dma_buff->va, buff_size, I40E_DMA_TO_NONDMA); retval = LE16_TO_CPU(desc->retval); if (retval != 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Command completed with error 0x%X.\n", retval); /* strip off FW internal code */ retval &= 0xff; } cmd_completed = TRUE; if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) status = I40E_SUCCESS; else status = I40E_ERR_ADMIN_QUEUE_ERROR; hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Writeback timeout.\n"); status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; } if (!status && i40e_is_nvm_update_op(desc)) hw->aq.nvm_busy = TRUE; asq_send_command_error: i40e_release_spinlock(&hw->aq.asq_spinlock); asq_send_command_exit: return status; }
/** * i40e_clean_arq_element * @hw: pointer to the hw struct * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process * * This function cleans one Admin Receive Queue element and returns * the contents through e. It can also return how many events are * left to process through 'pending' **/ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *pending) { #ifdef I40E_QV struct i40e_aq_desc qv_desc = {0}; struct i40e_aq_desc *qv_desc_on_ring; #endif /* I40E_QV */ enum i40e_status_code ret_code = I40E_SUCCESS; u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; u16 desc_idx; u16 datalen; u16 flags; u16 ntu; /* take the lock before we start messing with the ring */ i40e_acquire_spinlock(&hw->aq.arq_spinlock); /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Queue is empty.\n"); ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } /* now clean the next descriptor */ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); #ifdef I40E_QV /* copy the descriptor from ring to userspace buffer */ i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); qv_desc_on_ring = desc; desc = &qv_desc; #endif /* I40E_QV */ desc_idx = ntc; flags = LE16_TO_CPU(desc->flags); if (flags & I40E_AQ_FLAG_ERR) { ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; hw->aq.arq_last_status = (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Event received with error 0x%X.\n", hw->aq.arq_last_status); } i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); datalen = LE16_TO_CPU(desc->datalen); e->msg_size = min(datalen, e->msg_size); if (e->msg_buf != NULL && (e->msg_size != 0)) i40e_memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, e->msg_size, I40E_DMA_TO_NONDMA); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message * size */ bi = &hw->aq.arq.r.arq_bi[ntc]; i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); desc->datalen = CPU_TO_LE16((u16)bi->size); desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); #ifdef I40E_QV /* copy the descriptor from userspace buffer to ring */ i40e_memcpy(qv_desc_on_ring, desc, sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA); #endif /* I40E_QV */ /* set tail = the last cleaned desc index. */ wr32(hw, hw->aq.arq.tail, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) ntc = 0; hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); i40e_release_spinlock(&hw->aq.arq_spinlock); if (i40e_is_nvm_update_op(&e->desc)) { hw->aq.nvm_busy = FALSE; if (hw->aq.nvm_release_on_done) { i40e_release_nvm(hw); hw->aq.nvm_release_on_done = FALSE; } } return ret_code; }