void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) { u_int32_t id; u_int8_t *data; u_int32_t len; void *wmi_cmd_struct_ptr = NULL; int tlv_ok_status = 0; id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) goto end; data = adf_nbuf_data(evt_buf); len = adf_nbuf_len(evt_buf); /* Validate and pad(if necessary) the TLVs */ tlv_ok_status = wmitlv_check_and_pad_event_tlvs(wmi_handle->scn_handle, data, len, id, &wmi_cmd_struct_ptr); if (tlv_ok_status != 0) { pr_err("%s: Error: id=0x%d, wmitlv_check_and_pad_tlvs ret=%d\n", __func__, id, tlv_ok_status); goto end; } #ifdef FEATURE_WLAN_D0WOW if (wmi_get_d0wow_flag(wmi_handle)) pr_debug("%s: WMI event ID is 0x%x\n", __func__, id); #endif if (id >= WMI_EVT_GRP_START_ID(WMI_GRP_START)) { u_int32_t idx = 0; idx = wmi_unified_get_event_handler_ix(wmi_handle, id) ; if (idx == -1) { pr_err("%s : event handler is not registered: event id 0x%x\n", __func__, id); goto end; } #ifdef WMI_INTERFACE_EVENT_LOGGING adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock); /* Exclude 4 bytes of TLV header */ WMI_EVENT_RECORD(id, ((u_int8_t *)data + 4)); adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock); #endif /* Call the WMI registered event handler */ wmi_handle->event_handler[idx](wmi_handle->scn_handle, wmi_cmd_struct_ptr, len); goto end; } switch (id) { default: pr_info("%s: Unhandled WMI event %d\n", __func__, id); break; case WMI_SERVICE_READY_EVENTID: pr_info("%s: WMI UNIFIED SERVICE READY event\n", __func__); wma_rx_service_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr); break; case WMI_READY_EVENTID: pr_info("%s: WMI UNIFIED READY event\n", __func__); wma_rx_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr); break; } end: wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); adf_nbuf_free(evt_buf); }
void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf) { uint32_t id; uint8_t *data; uint32_t len; void *wmi_cmd_struct_ptr = NULL; int tlv_ok_status = 0; id = WMI_GET_FIELD(cdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); if (cdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) goto end; data = cdf_nbuf_data(evt_buf); len = cdf_nbuf_len(evt_buf); /* Validate and pad(if necessary) the TLVs */ tlv_ok_status = wmitlv_check_and_pad_event_tlvs(wmi_handle->scn_handle, data, len, id, &wmi_cmd_struct_ptr); if (tlv_ok_status != 0) { pr_err("%s: Error: id=0x%d, wmitlv_check_and_pad_tlvs ret=%d\n", __func__, id, tlv_ok_status); goto end; } if ((id >= WMI_EVT_GRP_START_ID(WMI_GRP_START)) && /* WMI_SERVICE_READY_EXT_EVENTID is supposed to be part of the * WMI_GRP_START group. Since the group is out of space, FW * has accomodated this in WMI_GRP_VDEV. * WMI_SERVICE_READY_EXT_EVENTID does not have any specific * event handler registered. So, we do not want to go through * the WMI registered event handler path for this event. */ (id != WMI_SERVICE_READY_EXT_EVENTID)) { uint32_t idx = 0; idx = wmi_unified_get_event_handler_ix(wmi_handle, id); if (idx == -1) { pr_err ("%s : event handler is not registered: event id 0x%x\n", __func__, id); goto end; } #ifdef WMI_INTERFACE_EVENT_LOGGING cdf_spin_lock_bh(&wmi_handle->wmi_record_lock); /* Exclude 4 bytes of TLV header */ if (id == WMI_MGMT_TX_COMPLETION_EVENTID) { WMI_MGMT_EVENT_RECORD(id, ((uint8_t *) data + 4)); } else { WMI_EVENT_RECORD(id, ((uint8_t *) data + 4)); } cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock); #endif /* Call the WMI registered event handler */ wmi_handle->event_handler[idx] (wmi_handle->scn_handle, wmi_cmd_struct_ptr, len); goto end; } switch (id) { default: pr_info("%s: Unhandled WMI event %d\n", __func__, id); break; case WMI_SERVICE_READY_EVENTID: pr_info("%s: WMI UNIFIED SERVICE READY event\n", __func__); wma_rx_service_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr); break; case WMI_SERVICE_READY_EXT_EVENTID: WMA_LOGA("%s: WMI UNIFIED SERVICE READY Extended event", __func__); wma_rx_service_ready_ext_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr); break; case WMI_READY_EVENTID: pr_info("%s: WMI UNIFIED READY event\n", __func__); wma_rx_ready_event(wmi_handle->scn_handle, wmi_cmd_struct_ptr); break; } end: wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); cdf_nbuf_free(evt_buf); }
/* * Temporarily added to support older WMI events. We should move all events to unified * when the target is ready to support it. */ void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet) { struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx; wmi_buf_t evt_buf; u_int32_t len; void *wmi_cmd_struct_ptr = NULL; u_int32_t idx = 0; int tlv_ok_status = 0; #if defined(WMI_INTERFACE_EVENT_LOGGING) || !defined(QCA_CONFIG_SMP) u_int32_t id; u_int8_t *data; #endif evt_buf = (wmi_buf_t) htc_packet->pPktContext; id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); /* TX_PAUSE EVENT should be handled with tasklet context */ if ((WMI_TX_PAUSE_EVENTID == id) || (WMI_WOW_WAKEUP_HOST_EVENTID == id)) { if (adf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL) return; data = adf_nbuf_data(evt_buf); len = adf_nbuf_len(evt_buf); tlv_ok_status = wmitlv_check_and_pad_event_tlvs( wmi_handle->scn_handle, data, len, id, &wmi_cmd_struct_ptr); if (tlv_ok_status != 0) { if (tlv_ok_status == 1) { wmi_cmd_struct_ptr = data; } else { return; } } idx = wmi_unified_get_event_handler_ix(wmi_handle, id); if (idx == -1) { wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); adf_nbuf_free(evt_buf); return; } wmi_handle->event_handler[idx](wmi_handle->scn_handle, wmi_cmd_struct_ptr, len); wmitlv_free_allocated_event_tlvs(id, &wmi_cmd_struct_ptr); adf_nbuf_free(evt_buf); return; } #ifdef WMI_INTERFACE_EVENT_LOGGING id = WMI_GET_FIELD(adf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID); data = adf_nbuf_data(evt_buf); adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock); /* Exclude 4 bytes of TLV header */ WMI_RX_EVENT_RECORD(id, ((u_int8_t *)data + 4)); adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock); #endif adf_os_spin_lock_bh(&wmi_handle->eventq_lock); adf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf); adf_os_spin_unlock_bh(&wmi_handle->eventq_lock); schedule_work(&wmi_handle->rx_event_work); }
/* * Helper Function to vaidate the TLV's coming for an event/command and also pads data to TLV's if necessary * Return 0 if success. <0 if failure. */ static int wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr, A_UINT32 param_buf_len, A_UINT32 is_cmd_id, A_UINT32 wmi_cmd_event_id, void **wmi_cmd_struct_ptr) { wmitlv_attributes_struc attr_struct_ptr; A_UINT32 buf_idx = 0; A_UINT32 tlv_index = 0; A_UINT32 num_of_elems = 0; int tlv_size_diff = 0; A_UINT8 *buf_ptr = (unsigned char *)param_struc_ptr; wmitlv_cmd_param_info *cmd_param_tlvs_ptr = NULL; A_UINT32 remaining_expected_tlvs = 0xFFFFFFFF; A_UINT32 len_wmi_cmd_struct_buf; /* Get the number of TLVs for this command/event */ if (wmitlv_get_attributes (is_cmd_id, wmi_cmd_event_id, WMITLV_GET_ATTRIB_NUM_TLVS, &attr_struct_ptr) != 0) { wmi_tlv_print_error ("%s: ERROR: Couldn't get expected number of TLVs for Cmd=%d\n", __func__, wmi_cmd_event_id); return -1; } /* NOTE: the returned number of TLVs is in "attr_struct_ptr.cmd_num_tlv" */ /* Create base structure of format wmi_cmd_event_id##_param_tlvs */ len_wmi_cmd_struct_buf = attr_struct_ptr.cmd_num_tlv * sizeof(wmitlv_cmd_param_info); #ifndef NO_DYNAMIC_MEM_ALLOC /* Dynamic memory allocation supported */ wmi_tlv_os_mem_alloc(os_handle, *wmi_cmd_struct_ptr, len_wmi_cmd_struct_buf); #else /* Dynamic memory allocation is not supported. Use the buffer g_wmi_static_cmd_param_info_buf, which should be set using wmi_tlv_set_static_param_tlv_buf(), for base structure of format wmi_cmd_event_id##_param_tlvs */ *wmi_cmd_struct_ptr = g_wmi_static_cmd_param_info_buf; if (attr_struct_ptr.cmd_num_tlv > g_wmi_static_max_cmd_param_tlvs) { /* Error: Expecting more TLVs that accomodated for static structure */ wmi_tlv_print_error ("%s: Error: Expecting more TLVs that accomodated for static structure. Expected:%d Accomodated:%d\n", __func__, attr_struct_ptr.cmd_num_tlv, g_wmi_static_max_cmd_param_tlvs); return -1; } #endif if (*wmi_cmd_struct_ptr == NULL) { /* Error: unable to alloc memory */ wmi_tlv_print_error ("%s: Error: unable to alloc memory (size=%d) for TLV\n", __func__, len_wmi_cmd_struct_buf); return -1; } cmd_param_tlvs_ptr = (wmitlv_cmd_param_info *) *wmi_cmd_struct_ptr; wmi_tlv_OS_MEMZERO(cmd_param_tlvs_ptr, len_wmi_cmd_struct_buf); remaining_expected_tlvs = attr_struct_ptr.cmd_num_tlv; while (((buf_idx + WMI_TLV_HDR_SIZE) <= param_buf_len) && (remaining_expected_tlvs)) { A_UINT32 curr_tlv_tag = WMITLV_GET_TLVTAG(WMITLV_GET_HDR(buf_ptr)); A_UINT32 curr_tlv_len = WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr)); int num_padding_bytes = 0; /* Get the attributes of the TLV with the given order in "tlv_index" */ wmi_tlv_OS_MEMZERO(&attr_struct_ptr, sizeof(wmitlv_attributes_struc)); if (wmitlv_get_attributes (is_cmd_id, wmi_cmd_event_id, tlv_index, &attr_struct_ptr) != 0) { wmi_tlv_print_error ("%s: ERROR: No TLV attributes found for Cmd=%d Tag_order=%d\n", __func__, wmi_cmd_event_id, tlv_index); goto Error_wmitlv_check_and_pad_tlvs; } /* Found the TLV that we wanted */ wmi_tlv_print_verbose("%s: [tlv %d]: tag=%d, len=%d\n", __func__, tlv_index, curr_tlv_tag, curr_tlv_len); /* Validating Tag order */ if (curr_tlv_tag != attr_struct_ptr.tag_id) { wmi_tlv_print_error ("%s: ERROR: TLV has wrong tag in order for Cmd=0x%x. Given=%d, Expected=%d.\n", __func__, wmi_cmd_event_id, curr_tlv_tag, attr_struct_ptr.tag_id); goto Error_wmitlv_check_and_pad_tlvs; } if ((curr_tlv_tag >= WMITLV_TAG_FIRST_ARRAY_ENUM) && (curr_tlv_tag <= WMITLV_TAG_LAST_ARRAY_ENUM)) { /* Current Tag is an array of some kind. */ /* Skip the TLV header of this array */ buf_ptr += WMI_TLV_HDR_SIZE; buf_idx += WMI_TLV_HDR_SIZE; } else { /* Non-array TLV. */ curr_tlv_len += WMI_TLV_HDR_SIZE; } if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) { /* This TLV is fixed length */ if (WMITLV_ARR_SIZE_INVALID == attr_struct_ptr.tag_array_size) { tlv_size_diff = curr_tlv_len - attr_struct_ptr.tag_struct_size; num_of_elems = (curr_tlv_len > WMI_TLV_HDR_SIZE) ? 1 : 0; } else { tlv_size_diff = curr_tlv_len - (attr_struct_ptr.tag_struct_size * attr_struct_ptr.tag_array_size); num_of_elems = attr_struct_ptr.tag_array_size; } } else { /* This TLV has a variable number of elements */ if (WMITLV_TAG_ARRAY_STRUC == attr_struct_ptr.tag_id) { A_UINT32 in_tlv_len = 0; if (curr_tlv_len != 0) { in_tlv_len = WMITLV_GET_TLVLEN(WMITLV_GET_HDR (buf_ptr)); in_tlv_len += WMI_TLV_HDR_SIZE; tlv_size_diff = in_tlv_len - attr_struct_ptr.tag_struct_size; num_of_elems = curr_tlv_len / in_tlv_len; wmi_tlv_print_verbose ("%s: WARN: TLV array of structures in_tlv_len=%d struct_size:%d diff:%d num_of_elems=%d \n", __func__, in_tlv_len, attr_struct_ptr.tag_struct_size, tlv_size_diff, num_of_elems); } else { tlv_size_diff = 0; num_of_elems = 0; } } else if ((WMITLV_TAG_ARRAY_UINT32 == attr_struct_ptr.tag_id) || (WMITLV_TAG_ARRAY_BYTE == attr_struct_ptr.tag_id) || (WMITLV_TAG_ARRAY_FIXED_STRUC == attr_struct_ptr.tag_id)) { tlv_size_diff = 0; num_of_elems = curr_tlv_len / attr_struct_ptr.tag_struct_size; } else { wmi_tlv_print_error ("%s ERROR Need to handle this tag ID for variable length %d\n", __func__, attr_struct_ptr.tag_id); goto Error_wmitlv_check_and_pad_tlvs; } } if ((WMITLV_TAG_ARRAY_STRUC == attr_struct_ptr.tag_id) && (tlv_size_diff != 0)) { void *new_tlv_buf = NULL; A_UINT8 *tlv_buf_ptr = NULL; A_UINT32 in_tlv_len; A_UINT32 i; if (attr_struct_ptr.tag_varied_size == WMITLV_SIZE_FIX) { /* This is not allowed. The tag WMITLV_TAG_ARRAY_STRUC can only be used with variable-length structure array should not have a fixed number of elements (contradicting). Use WMITLV_TAG_ARRAY_FIXED_STRUC tag for fixed size structure array(where structure never change without breaking compatibility) */ wmi_tlv_print_error ("%s: ERROR: TLV (tag=%d) should be variable-length and not fixed length\n", __func__, curr_tlv_tag); goto Error_wmitlv_check_and_pad_tlvs; } /* Warning: Needs to allocate a larger structure and pad with zeros */ wmi_tlv_print_error ("%s: WARN: TLV array of structures needs padding. tlv_size_diff=%d\n", __func__, tlv_size_diff); /* incoming structure length */ in_tlv_len = WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr)) + WMI_TLV_HDR_SIZE; #ifndef NO_DYNAMIC_MEM_ALLOC wmi_tlv_os_mem_alloc(os_handle, new_tlv_buf, (num_of_elems * attr_struct_ptr.tag_struct_size)); if (new_tlv_buf == NULL) { /* Error: unable to alloc memory */ wmi_tlv_print_error ("%s: Error: unable to alloc memory (size=%d) for padding the TLV array %d\n", __func__, (num_of_elems * attr_struct_ptr.tag_struct_size), curr_tlv_tag); goto Error_wmitlv_check_and_pad_tlvs; } wmi_tlv_OS_MEMZERO(new_tlv_buf, (num_of_elems * attr_struct_ptr.tag_struct_size)); tlv_buf_ptr = (A_UINT8 *) new_tlv_buf; for (i = 0; i < num_of_elems; i++) { if (tlv_size_diff > 0) { /* Incoming structure size is greater than expected structure size. so copy the number of bytes equal to expected structure size */ wmi_tlv_OS_MEMCPY(tlv_buf_ptr, (void *)(buf_ptr + i * in_tlv_len), attr_struct_ptr. tag_struct_size); } else { /* Incoming structure size is smaller than expected structure size. so copy the number of bytes equal to incoming structure size (other bytes would be zeroes) */ wmi_tlv_OS_MEMCPY(tlv_buf_ptr, (void *)(buf_ptr + i * in_tlv_len), in_tlv_len); } tlv_buf_ptr += attr_struct_ptr.tag_struct_size; } #else { A_UINT8 *src_addr; A_UINT8 *dst_addr; A_UINT32 buf_mov_len; if (tlv_size_diff < 0) { /* Incoming structure size is smaller than expected size then this needs padding for each element in the array */ /* Find amount of bytes to be padded for one element */ num_padding_bytes = tlv_size_diff * -1; /* Move subsequent TLVs by number of bytes to be padded for all elements */ if (param_buf_len > (buf_idx + curr_tlv_len)) { src_addr = buf_ptr + curr_tlv_len; dst_addr = buf_ptr + curr_tlv_len + (num_padding_bytes * num_of_elems); buf_mov_len = param_buf_len - (buf_idx + curr_tlv_len); wmi_tlv_OS_MEMMOVE(dst_addr, src_addr, buf_mov_len); } /* Move subsequent elements of array down by number of bytes to be padded for one element and alse set padding bytes to zero */ tlv_buf_ptr = buf_ptr; for (i = 0; i < num_of_elems; i++) { src_addr = tlv_buf_ptr + in_tlv_len; if (i != (num_of_elems - 1)) { /* Need not move anything for last element in the array */ dst_addr = tlv_buf_ptr + in_tlv_len + num_padding_bytes; buf_mov_len = curr_tlv_len - ((i + 1) * in_tlv_len); wmi_tlv_OS_MEMMOVE (dst_addr, src_addr, buf_mov_len); } /* Set the padding bytes to zeroes */ wmi_tlv_OS_MEMZERO(src_addr, num_padding_bytes); tlv_buf_ptr += attr_struct_ptr. tag_struct_size; } /* Update the number of padding bytes to total number of bytes padded for all elements in the array */ num_padding_bytes = num_padding_bytes * num_of_elems; new_tlv_buf = buf_ptr; } else { /* Incoming structure size is greater than expected size then this needs shrinking for each element in the array */ /* Find amount of bytes to be shrinked for one element */ num_padding_bytes = tlv_size_diff * -1; /* Move subsequent elements of array up by number of bytes to be shrinked for one element */ tlv_buf_ptr = buf_ptr; for (i = 0; i < (num_of_elems - 1); i++) { src_addr = tlv_buf_ptr + in_tlv_len; dst_addr = tlv_buf_ptr + in_tlv_len + num_padding_bytes; buf_mov_len = curr_tlv_len - ((i + 1) * in_tlv_len); wmi_tlv_OS_MEMMOVE(dst_addr, src_addr, buf_mov_len); tlv_buf_ptr += attr_struct_ptr. tag_struct_size; } /* Move subsequent TLVs by number of bytes to be shrinked for all elements */ if (param_buf_len > (buf_idx + curr_tlv_len)) { src_addr = buf_ptr + curr_tlv_len; dst_addr = buf_ptr + curr_tlv_len + (num_padding_bytes * num_of_elems); buf_mov_len = param_buf_len - (buf_idx + curr_tlv_len); wmi_tlv_OS_MEMMOVE(dst_addr, src_addr, buf_mov_len); } /* Update the number of padding bytes to total number of bytes shrinked for all elements in the array */ num_padding_bytes = num_padding_bytes * num_of_elems; new_tlv_buf = buf_ptr; } } #endif cmd_param_tlvs_ptr[tlv_index].tlv_ptr = new_tlv_buf; cmd_param_tlvs_ptr[tlv_index].num_elements = num_of_elems; cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 1; /* Indicates that buffer is allocated */ } else if (tlv_size_diff >= 0) { /* Warning: some parameter truncation */ if (tlv_size_diff > 0) { wmi_tlv_print_verbose ("%s: WARN: TLV truncated. tlv_size_diff=%d, curr_tlv_len=%d\n", __func__, tlv_size_diff, curr_tlv_len); } /* TODO: this next line needs more comments and explanation */ cmd_param_tlvs_ptr[tlv_index].tlv_ptr = (attr_struct_ptr.tag_varied_size && !curr_tlv_len) ? NULL : (void *)buf_ptr; cmd_param_tlvs_ptr[tlv_index].num_elements = num_of_elems; cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 0; /* Indicates that buffer is not allocated */ } else { void *new_tlv_buf = NULL; /* Warning: Needs to allocate a larger structure and pad with zeros */ wmi_tlv_print_verbose ("%s: WARN: TLV needs padding. tlv_size_diff=%d\n", __func__, tlv_size_diff); #ifndef NO_DYNAMIC_MEM_ALLOC /* Dynamic memory allocation is supported */ wmi_tlv_os_mem_alloc(os_handle, new_tlv_buf, (curr_tlv_len - tlv_size_diff)); if (new_tlv_buf == NULL) { /* Error: unable to alloc memory */ wmi_tlv_print_error ("%s: Error: unable to alloc memory (size=%d) for padding the TLV %d\n", __func__, (curr_tlv_len - tlv_size_diff), curr_tlv_tag); goto Error_wmitlv_check_and_pad_tlvs; } wmi_tlv_OS_MEMZERO(new_tlv_buf, (curr_tlv_len - tlv_size_diff)); wmi_tlv_OS_MEMCPY(new_tlv_buf, (void *)buf_ptr, curr_tlv_len); #else /* Dynamic memory allocation is not supported. Padding has to be done with in the existing buffer assuming we have enough space to grow */ { /* Note: tlv_size_diff is a value less than zero */ /* Move the Subsequent TLVs by amount of bytes needs to be padded */ A_UINT8 *src_addr; A_UINT8 *dst_addr; A_UINT32 src_len; num_padding_bytes = (tlv_size_diff * -1); src_addr = buf_ptr + curr_tlv_len; dst_addr = buf_ptr + curr_tlv_len + num_padding_bytes; src_len = param_buf_len - (buf_idx + curr_tlv_len); wmi_tlv_OS_MEMMOVE(dst_addr, src_addr, src_len); /* Set the padding bytes to zeroes */ wmi_tlv_OS_MEMZERO(src_addr, num_padding_bytes); new_tlv_buf = buf_ptr; } #endif cmd_param_tlvs_ptr[tlv_index].tlv_ptr = new_tlv_buf; cmd_param_tlvs_ptr[tlv_index].num_elements = num_of_elems; cmd_param_tlvs_ptr[tlv_index].buf_is_allocated = 1; /* Indicates that buffer is allocated */ } tlv_index++; remaining_expected_tlvs--; buf_ptr += curr_tlv_len + num_padding_bytes; buf_idx += curr_tlv_len + num_padding_bytes; } return (0); Error_wmitlv_check_and_pad_tlvs: if (is_cmd_id) { wmitlv_free_allocated_command_tlvs(wmi_cmd_event_id, wmi_cmd_struct_ptr); } else { wmitlv_free_allocated_event_tlvs(wmi_cmd_event_id, wmi_cmd_struct_ptr); } *wmi_cmd_struct_ptr = NULL; return (-1); }