int ext_gpio_request(int gpio_no) { mutex_lock(&ext_mutex_lock); ASSERT(ext_epub != NULL); if (atomic_read(&ext_epub->sip->state) != SIP_RUN) { mutex_unlock(&ext_mutex_lock); esp_dbg(ESP_DBG_ERROR, "%s esp state is not ok\n", __func__); return -3; } if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) { mutex_unlock(&ext_mutex_lock); esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__); return -1; } if (gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_DISABLE) { mutex_unlock(&ext_mutex_lock); esp_dbg(ESP_DBG_ERROR, "%s gpio is already in used by other\n", __func__); return -2; } else { gpio_list[gpio_no].gpio_mode = EXT_GPIO_MODE_MAX; mutex_unlock(&ext_mutex_lock); return 0; } }
void sif_disable_irq(struct esp_pub *epub) { int err; struct esp_sdio_ctrl *sctrl = (struct esp_sdio_ctrl *)epub->sif; int i = 0; if (atomic_read(&sctrl->irq_installed) == 0) return; sdio_claim_host(sctrl->func); while (atomic_read(&sctrl->irq_handling)) { sdio_release_host(sctrl->func); schedule_timeout(HZ / 100); sdio_claim_host(sctrl->func); if (i++ >= 400) { esp_dbg(ESP_DBG_ERROR, "%s force to stop irq\n", __func__); break; } } err = sdio_release_irq(sctrl->func); if (err) { esp_dbg(ESP_DBG_ERROR, "%s release irq failed\n", __func__); } atomic_set(&sctrl->irq_installed, 0); sdio_release_host(sctrl->func); }
int ext_irq_ack(int gpio_no) { int ret; mutex_lock(&ext_mutex_lock); if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) { esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__); mutex_unlock(&ext_mutex_lock); return -1; } if (gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_INTR_POSEDGE && gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_INTR_NEGEDGE && gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_INTR_LOLEVEL && gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_INTR_HILEVEL) { esp_dbg(ESP_DBG_ERROR, "%s gpio mode is not intr mode\n", __func__); mutex_unlock(&ext_mutex_lock); return -2; } sif_lock_bus(ext_epub); sif_raw_dummy_read(ext_epub); ret = sif_set_gpio_output(ext_epub, 0x00, 1<<gpio_no); sif_unlock_bus(ext_epub); if (ret) { esp_dbg(ESP_DBG_ERROR, "%s gpio intr ack error\n", __func__); mutex_unlock(&ext_mutex_lock); return ret; } mutex_unlock(&ext_mutex_lock); return 0; }
int ext_gpio_get_state(int gpio_no) { int ret; u16 state; u16 mask; mutex_lock(&ext_mutex_lock); if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) { esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__); mutex_unlock(&ext_mutex_lock); return -1; } if (gpio_list[gpio_no].gpio_mode == EXT_GPIO_MODE_OUTPUT) { state = gpio_list[gpio_no].gpio_state; } else if (gpio_list[gpio_no].gpio_mode == EXT_GPIO_MODE_INPUT) { sif_lock_bus(ext_epub); sif_raw_dummy_read(ext_epub); ret = sif_get_gpio_input(ext_epub, &mask, &state); sif_unlock_bus(ext_epub); if (ret) { esp_dbg(ESP_DBG_ERROR, "%s get gpio_input state error\n", __func__); mutex_unlock(&ext_mutex_lock); return ret; } } else { esp_dbg(ESP_DBG_ERROR, "%s gpio_state is not input or output\n", __func__); mutex_unlock(&ext_mutex_lock); return -2; } mutex_unlock(&ext_mutex_lock); return (state & (1<<gpio_no)) ? 1 : 0; }
void show_status(void) { int i=0; for (i = 0; i < MAX_PENDING_INTR_LIST;i++) esp_dbg(ESP_DBG_ERROR, "status[%d] = [0x%04x]\n", i, esp_pending_intr_list.pending_intr_list[i]); esp_dbg(ESP_DBG_ERROR, "start_pos[%d]\n",esp_pending_intr_list.start_pos); esp_dbg(ESP_DBG_ERROR, "end_pos[%d]\n",esp_pending_intr_list.end_pos); esp_dbg(ESP_DBG_ERROR, "curr_num[%d]\n",esp_pending_intr_list.curr_num); }
static void esp_tx_ba_session_op(struct esp_sip *sip, struct esp_node *node, trc_ampdu_state_t state, u8 tid ) { struct esp_tx_tid *txtid; txtid = &node->tid[tid]; if (state == TRC_TX_AMPDU_STOPPED) { if (txtid->state == ESP_TID_STATE_OPERATIONAL) { esp_dbg(ESP_DBG_TXAMPDU, "%s tid %d TXAMPDU GOT STOP EVT\n", __func__, tid); spin_lock_bh(&sip->epub->tx_ampdu_lock); txtid->state = ESP_TID_STATE_WAIT_STOP; spin_unlock_bh(&sip->epub->tx_ampdu_lock); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) ieee80211_stop_tx_ba_session(sip->epub->hw, node->addr, (u16)tid, WLAN_BACK_INITIATOR); #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)) ieee80211_stop_tx_ba_session(sip->epub->hw, node->sta->addr, (u16)tid, WLAN_BACK_INITIATOR); #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) ieee80211_stop_tx_ba_session(node->sta, (u16)tid, WLAN_BACK_INITIATOR); #else ieee80211_stop_tx_ba_session(node->sta, (u16)tid); #endif /* KERNEL_VERSION 2.6.39 */ } else { esp_dbg(ESP_DBG_TXAMPDU, "%s tid %d TXAMPDU GOT STOP EVT IN WRONG STATE %d\n", __func__, tid, txtid->state); } } else if (state == TRC_TX_AMPDU_OPERATIONAL) { if (txtid->state == ESP_TID_STATE_STOP) { esp_dbg(ESP_DBG_TXAMPDU, "%s tid %d TXAMPDU GOT OPERATIONAL\n", __func__, tid); spin_lock_bh(&sip->epub->tx_ampdu_lock); txtid->state = ESP_TID_STATE_TRIGGER; spin_unlock_bh(&sip->epub->tx_ampdu_lock); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) ieee80211_start_tx_ba_session(sip->epub->hw, node->addr, tid); #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)) ieee80211_start_tx_ba_session(sip->epub->hw, node->sta->addr, tid); #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 37)) ieee80211_start_tx_ba_session(node->sta, (u16)tid); #else ieee80211_start_tx_ba_session(node->sta, (u16)tid, 0); #endif /* KERNEL_VERSION 2.6.39 */ } else if(txtid->state == ESP_TID_STATE_OPERATIONAL) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)) sip_send_ampdu_action(sip->epub, SIP_AMPDU_TX_OPERATIONAL, node->addr, tid, node->ifidx, 0); #else sip_send_ampdu_action(sip->epub, SIP_AMPDU_TX_OPERATIONAL, node->sta->addr, tid, node->ifidx, 0); #endif } else { esp_dbg(ESP_DBG_TXAMPDU, "%s tid %d TXAMPDU GOT OPERATIONAL EVT IN WRONG STATE %d\n", __func__, tid, txtid->state); } } }
void sip_send_chip_init(struct esp_sip *sip) { size_t size = 0; #ifndef HAS_INIT_DATA const struct firmware *fw_entry; u8 * esp_init_data = NULL; int ret = 0; #ifdef ANDROID ret = android_request_firmware(&fw_entry, ESP_INIT_NAME, sip->epub->dev); #else ret = request_firmware(&fw_entry, ESP_INIT_NAME, sip->epub->dev); #endif /* ANDROID */ if (ret) { esp_dbg(ESP_DBG_ERROR, "%s =============ERROR! NO INIT DATA!!=================\n", __func__); return; } esp_init_data = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); size = fw_entry->size; #ifdef ANDROID android_release_firmware(fw_entry); #else release_firmware(fw_entry); #endif /* ANDROID */ if (esp_init_data == NULL) { esp_dbg(ESP_DBG_ERROR, "%s =============ERROR! NO MEMORY!!=================\n", __func__); return; } #else size = sizeof(esp_init_data); #endif /* !HAS_INIT_DATA */ #ifdef ANDROID //show_init_buf(esp_init_data,size); fix_init_data(esp_init_data, size); //show_init_buf(esp_init_data,size); #endif atomic_sub(1, &sip->tx_credits); sip_send_cmd(sip, SIP_CMD_INIT, size, (void *)esp_init_data); #ifndef HAS_INIT_DATA kfree(esp_init_data); #endif /* !HAS_INIT_DATA */ }
static int esdio_power_on(struct esp_sdio_ctrl *sctrl) { int err = 0; if (sctrl->off == false) return err; sdio_claim_host(sctrl->func); err = sdio_enable_func(sctrl->func); if (err) { esp_dbg(ESP_DBG_ERROR, "Unable to enable sdio func: %d\n", err); sdio_release_host(sctrl->func); return err; } sdio_release_host(sctrl->func); /* ensure device is up */ msleep(5); sctrl->off = false; return err; }
int sip_send_ps_config(struct esp_pub *epub, struct esp_ps *ps) { struct sip_cmd_ps *pscmd = NULL; struct sk_buff *skb = NULL; struct sip_hdr *shdr = NULL; skb = sip_alloc_ctrl_skbuf(epub->sip, sizeof(struct sip_cmd_ps) + sizeof(struct sip_hdr), SIP_CMD_PS); if (!skb) return -1; shdr = (struct sip_hdr *)skb->data; pscmd = (struct sip_cmd_ps *)(skb->data + sizeof(struct sip_hdr)); pscmd->dtim_period = ps->dtim_period; pscmd->max_sleep_period = ps->max_sleep_period; #if 0 if (atomic_read(&ps->state) == ESP_PM_TURNING_ON) { pscmd->on = 1; SIP_HDR_SET_PM_TURNING_ON(shdr); } else if (atomic_read(&ps->state) == ESP_PM_TURNING_OFF) { pscmd->on = 0; SIP_HDR_SET_PM_TURNING_OFF(shdr); } else { esp_dbg(ESP_DBG_ERROR, "%s PM WRONG STATE %d\n", __func__, atomic_read(&ps->state)); ASSERT(0); } #endif return sip_cmd_enqueue(epub->sip, skb); }
void esp_tx_work(struct work_struct *work) { int i; u16 tmp_intr_status_reg; esp_dbg(ESP_DBG_TRACE, "%s enter\n", __func__); spin_lock(&esp_pending_intr_list.spin_lock); /* assert cycle queue is not empty */ ASSERT(esp_pending_intr_list.curr_num > 0); tmp_intr_status_reg = esp_pending_intr_list.pending_intr_list[esp_pending_intr_list.start_pos]; esp_pending_intr_list.pending_intr_list[esp_pending_intr_list.start_pos] = 0x0000; esp_pending_intr_list.start_pos = (esp_pending_intr_list.start_pos + 1) % MAX_PENDING_INTR_LIST; esp_pending_intr_list.curr_num--; spin_unlock(&esp_pending_intr_list.spin_lock); for (i = 0; i < EXT_GPIO_MAX_NUM; i++) { if (tmp_intr_status_reg & (1<<i) && (gpio_list[i].irq_handler)) gpio_list[i].irq_handler(); } spin_lock(&esp_pending_intr_list.spin_lock); if (esp_pending_intr_list.curr_num > 0) queue_work(ext_irq_wkq, &ext_irq_work); spin_unlock(&esp_pending_intr_list.spin_lock); }
static int esp_sdio_dummy_probe(struct sdio_func *func, const struct sdio_device_id *id) { esp_dbg(ESP_DBG_ERROR, "%s enter\n", __func__); up(&esp_powerup_sem); return 0; }
void esp_debugfs_exit(void) { esp_dbg(ESP_DBG, "esp debugfs exit"); debugfs_remove_recursive(esp_debugfs_root); return; }
int esp_debugfs_init(void) { esp_dbg(ESP_DBG, "esp debugfs init\n"); esp_debugfs_root = debugfs_create_dir("esp_debug", NULL); if (!esp_debugfs_root || IS_ERR_OR_NULL(esp_debugfs_root)) { return -ENOENT; } return 0; }
void sip_scandone_process(struct esp_sip *sip, struct sip_evt_scan_report *scan_report) { struct esp_pub *epub = sip->epub; esp_dbg(ESP_DBG_TRACE, "eagle hw scan report\n"); if (epub->wl.scan_req) { hw_scan_done(epub, scan_report->aborted); epub->wl.scan_req = NULL; } }
void ext_gpio_deinit(void) { esp_dbg(ESP_DBG_LOG, "%s enter\n", __func__); ext_epub = NULL; #ifdef EXT_GPIO_OPS unregister_ext_gpio_ops(); #endif cancel_work_sync(&ext_irq_work); destroy_workqueue(ext_irq_wkq); }
int ext_gpio_set_output_state(int gpio_no, int state) { int ret; mutex_lock(&ext_mutex_lock); if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) { mutex_unlock(&ext_mutex_lock); esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__); return -1; } if (gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_OUTPUT) { mutex_unlock(&ext_mutex_lock); esp_dbg(ESP_DBG_ERROR, "%s gpio is not in output state, please request gpio or set output state\n", __func__); return -2; } if (state != EXT_GPIO_STATE_LOW && state != EXT_GPIO_STATE_HIGH) { mutex_unlock(&ext_mutex_lock); esp_dbg(ESP_DBG_ERROR, "%s gpio state unknown\n", __func__); return -3; } sif_lock_bus(ext_epub); sif_raw_dummy_read(ext_epub); ret = sif_set_gpio_output(ext_epub, 1<<gpio_no, state<<gpio_no); sif_unlock_bus(ext_epub); if (ret) { esp_dbg(ESP_DBG_ERROR, "%s gpio state set error\n", __func__); mutex_unlock(&ext_mutex_lock); return ret; } gpio_list[gpio_no].gpio_state = state; mutex_unlock(&ext_mutex_lock); return 0; }
int sip_send_config(struct esp_pub *epub, struct ieee80211_conf * conf) { struct sk_buff *skb = NULL; struct sip_cmd_config *configcmd; skb = sip_alloc_ctrl_skbuf(epub->sip, sizeof(struct sip_cmd_config) + sizeof(struct sip_hdr), SIP_CMD_CONFIG); if (!skb) return -EINVAL; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) esp_dbg(ESP_DBG_TRACE, "%s config center freq %d\n", __func__, conf->chandef.chan->center_freq); #else esp_dbg(ESP_DBG_TRACE, "%s config center freq %d\n", __func__, conf->channel->center_freq); #endif configcmd = (struct sip_cmd_config *)(skb->data + sizeof(struct sip_hdr)); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) configcmd->center_freq= conf->chandef.chan->center_freq; #else configcmd->center_freq= conf->channel->center_freq; #endif configcmd->duration= 0; return sip_cmd_enqueue(epub->sip, skb); }
int ext_gpio_init(struct esp_pub *epub) { esp_dbg(ESP_DBG_LOG, "%s enter\n", __func__); ext_epub = epub; ASSERT(ext_epub != NULL); ext_irq_wkq = create_singlethread_workqueue("esp_ext_irq_wkq"); if (ext_irq_wkq == NULL) { esp_dbg(ESP_DBG_ERROR, "%s create workqueue error\n", __func__); return -2; } #ifdef EXT_GPIO_OPS register_ext_gpio_ops(&ext_gpio_ops); #endif INIT_WORK(&ext_irq_work, esp_tx_work); mutex_init(&ext_mutex_lock); return 0; }
int ext_gpio_release(int gpio_no) { int ret; mutex_lock(&ext_mutex_lock); ASSERT(ext_epub != NULL); if (atomic_read(&ext_epub->sip->state) != SIP_RUN) { mutex_unlock(&ext_mutex_lock); esp_dbg(ESP_DBG_ERROR, "%s esp state is not ok\n", __func__); return -3; } if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) { mutex_unlock(&ext_mutex_lock); esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__); return -1; } sif_lock_bus(ext_epub); sif_raw_dummy_read(ext_epub); ret = sif_config_gpio_mode(ext_epub, (u8)gpio_no, EXT_GPIO_MODE_DISABLE); sif_unlock_bus(ext_epub); if (ret) { esp_dbg(ESP_DBG_ERROR, "%s gpio release error\n", __func__); mutex_unlock(&ext_mutex_lock); return ret; } gpio_list[gpio_no].gpio_mode = EXT_GPIO_MODE_DISABLE; gpio_list[gpio_no].gpio_state = EXT_GPIO_STATE_IDLE; gpio_list[gpio_no].irq_handler = NULL; intr_mask_reg &= ~(1<<gpio_no); mutex_unlock(&ext_mutex_lock); return 0; }
int sip_send_config(struct esp_pub *epub, struct ieee80211_conf * conf) { struct sk_buff *skb = NULL; struct sip_cmd_config *configcmd; skb = sip_alloc_ctrl_skbuf(epub->sip, sizeof(struct sip_cmd_config) + sizeof(struct sip_hdr), SIP_CMD_CONFIG); if (!skb) return -1; esp_dbg(ESP_DBG_TRACE, "%s config center freq %d\n", __func__, conf->channel->center_freq); configcmd = (struct sip_cmd_config *)(skb->data + sizeof(struct sip_hdr)); configcmd->center_freq= conf->channel->center_freq; configcmd->duration= 0; return sip_cmd_enqueue(epub->sip, skb); }
struct dentry *esp_debugfs_add_sub_dir(const char *name) { struct dentry *sub_dir = NULL; sub_dir = debugfs_create_dir(name, esp_debugfs_root); if (!sub_dir) goto Fail; return sub_dir; Fail: debugfs_remove_recursive(esp_debugfs_root); esp_debugfs_root = NULL; esp_dbg(ESP_DBG_ERROR, "%s failed, debugfs root removed; dir name: %s\n", __FUNCTION__, name); return NULL; }
int ext_gpio_get_mode(int gpio_no) { int gpio_mode; mutex_lock(&ext_mutex_lock); if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) { esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__); mutex_unlock(&ext_mutex_lock); return -1; } gpio_mode = gpio_list[gpio_no].gpio_mode; mutex_unlock(&ext_mutex_lock); return gpio_mode; }
static void /*__exit*/ esp_sdio_exit(void) { esp_dbg(ESP_SHOW, "%s \n", __func__); esp_debugfs_exit(); esp_unregister_early_suspend(); sdio_unregister_driver(&esp_sdio_driver); sif_platform_rescan_card(0); #ifndef FPGA_DEBUG sif_platform_target_poweroff(); #endif /* !FPGA_DEBUG */ esp_wakelock_destroy(); }
void sif_enable_irq(struct esp_pub *epub) { int err; struct esp_sdio_ctrl *sctrl = NULL; sctrl = (struct esp_sdio_ctrl *)epub->sif; sdio_claim_host(sctrl->func); err = sdio_claim_irq(sctrl->func, sif_dsr); if (err) esp_dbg(ESP_DBG_ERROR, "sif %s failed\n", __func__); atomic_set(&epub->sip->state, SIP_BOOT); atomic_set(&sctrl->irq_installed, 1); sdio_release_host(sctrl->func); }
struct dentry *esp_dump_var(const char *name, struct dentry *parent, void *value, esp_type type) { struct dentry *rc = NULL; umode_t mode = 0644; if(!esp_debugfs_root) return NULL; if(!parent) parent = esp_debugfs_root; switch(type) { case ESP_U8: rc = debugfs_create_u8(name, mode, parent, (u8*)value); break; case ESP_U16: rc = debugfs_create_u16(name, mode, parent, (u16*)value); break; case ESP_U32: rc = debugfs_create_u32(name, mode, parent, (u32*)value); break; case ESP_U64: rc = debugfs_create_u64(name, mode, parent, (u64*)value); break; case ESP_BOOL: rc = debugfs_create_bool(name, mode, parent, (u32*)value); break; default: //32 rc = debugfs_create_u32(name, mode, parent, (u32*)value); } if (!rc) goto Fail; else return rc; Fail: debugfs_remove_recursive(esp_debugfs_root); esp_debugfs_root = NULL; esp_dbg(ESP_DBG_ERROR, "%s failed, debugfs root removed; var name: %s\n", __FUNCTION__, name); return NULL; }
void sif_set_clock(struct sdio_func *func, int clk) { struct mmc_host *host = NULL; struct mmc_card *card = NULL; card = func->card; host = card->host; sdio_claim_host(func); //currently only set clock host->ios.clock = clk * 1000000; esp_dbg(ESP_SHOW, "%s clock is %u\n", __func__, host->ios.clock); if (host->ios.clock > host->f_max) { host->ios.clock = host->f_max; } host->ops->set_ios(host, &host->ios); mdelay(2); sdio_release_host(func); }
struct dentry *esp_dump(const char *name, struct dentry *parent, void *data, int size) { struct dentry *rc; umode_t mode = 0644; if(!esp_debugfs_root) return NULL; if(!parent) parent = esp_debugfs_root; rc = debugfs_create_file(name, mode, parent, data, &esp_debugfs_fops); if (!rc) goto Fail; else return rc; Fail: debugfs_remove_recursive(esp_debugfs_root); esp_debugfs_root = NULL; esp_dbg(ESP_DBG_ERROR, "%s failed, debugfs root removed; var name: %s\n", __FUNCTION__, name); return NULL; }
struct dentry *esp_dump_array(const char *name, struct dentry *parent, struct debugfs_blob_wrapper *blob) { struct dentry * rc = NULL; umode_t mode = 0644; if(!esp_debugfs_root) return NULL; if(!parent) parent = esp_debugfs_root; rc = debugfs_create_blob(name, mode, parent, blob); if (!rc) goto Fail; else return rc; Fail: debugfs_remove_recursive(esp_debugfs_root); esp_debugfs_root = NULL; esp_dbg(ESP_DBG_ERROR, "%s failed, debugfs root removed; var name: %s\n", __FUNCTION__, name); return NULL; }
void ext_gpio_int_process(u16 value) { if (value == 0x00) return; esp_dbg(ESP_DBG_TRACE, "%s enter\n", __func__); /* intr cycle queue is full, wait */ while (esp_pending_intr_list.curr_num >= MAX_PENDING_INTR_LIST) { udelay(1); } spin_lock(&esp_pending_intr_list.spin_lock); ASSERT(esp_pending_intr_list.curr_num < MAX_PENDING_INTR_LIST); esp_pending_intr_list.pending_intr_list[esp_pending_intr_list.end_pos] = value; esp_pending_intr_list.end_pos = (esp_pending_intr_list.end_pos + 1) % MAX_PENDING_INTR_LIST; esp_pending_intr_list.curr_num++; queue_work(ext_irq_wkq, &ext_irq_work); spin_unlock(&esp_pending_intr_list.spin_lock); }
int sip_parse_events(struct esp_sip *sip, u8 *buf) { struct sip_hdr *hdr = (struct sip_hdr *)buf; switch (hdr->c_evtid) { case SIP_EVT_TARGET_ON: { /* use rx work queue to send... */ if (atomic_read(&sip->state) == SIP_PREPARE_BOOT || atomic_read(&sip->state) == SIP_BOOT) { atomic_set(&sip->state, SIP_SEND_INIT); queue_work(sip->epub->esp_wkq, &sip->rx_process_work); } else { esp_dbg(ESP_DBG_ERROR, "%s boot during wrong state %d\n", __func__, atomic_read(&sip->state)); } break; } case SIP_EVT_BOOTUP: { struct sip_evt_bootup2 *bootup_evt = (struct sip_evt_bootup2 *)(buf + SIP_CTRL_HDR_LEN); if (sip->rawbuf) kfree(sip->rawbuf); sip_post_init(sip, bootup_evt); if (gl_bootup_cplx) complete(gl_bootup_cplx); break; } case SIP_EVT_RESETTING:{ sip->epub->wait_reset = 1; if (gl_bootup_cplx) complete(gl_bootup_cplx); break; } case SIP_EVT_SLEEP:{ //atomic_set(&sip->epub->ps.state, ESP_PM_ON); break; } case SIP_EVT_TXIDLE:{ //struct sip_evt_txidle *txidle = (struct sip_evt_txidle *)(buf + SIP_CTRL_HDR_LEN); //sip_txdone_clear(sip, txidle->last_seq); break; } #ifndef FAST_TX_STATUS case SIP_EVT_TX_STATUS: { struct sip_evt_tx_report *report = (struct sip_evt_tx_report *)(buf + SIP_CTRL_HDR_LEN); sip_txdoneq_process(sip, report); break; } #endif /* FAST_TX_STATUS */ case SIP_EVT_SCAN_RESULT: { struct sip_evt_scan_report *report = (struct sip_evt_scan_report *)(buf + SIP_CTRL_HDR_LEN); if (atomic_read(&sip->epub->wl.off)) { esp_dbg(ESP_DBG_ERROR, "%s scan result while wlan off\n", __func__); return 0; } sip_scandone_process(sip, report); break; } case SIP_EVT_ROC: { struct sip_evt_roc* report = (struct sip_evt_roc *)(buf + SIP_CTRL_HDR_LEN); esp_rocdone_process(sip->epub->hw, report); break; } #ifdef ESP_RX_COPYBACK_TEST case SIP_EVT_COPYBACK: { u32 len = hdr->len - SIP_CTRL_HDR_LEN; esp_dbg(ESP_DBG_TRACE, "%s copyback len %d seq %u\n", __func__, len, hdr->seq); memcpy(copyback_buf + copyback_offset, pkt->buf + SIP_CTRL_HDR_LEN, len); copyback_offset += len; //show_buf(pkt->buf, 256); //how about totlen % 256 == 0?? if (hdr->hdr.len < 256) { //sip_show_copyback_buf(); kfree(copyback_buf); } } break; #endif /* ESP_RX_COPYBACK_TEST */ case SIP_EVT_CREDIT_RPT: break; #ifdef TEST_MODE case SIP_EVT_WAKEUP: { u8 check_str[12]; struct sip_evt_wakeup* wakeup_evt= (struct sip_evt_wakeup *)(buf + SIP_CTRL_HDR_LEN); sprintf((char *)&check_str, "%d", wakeup_evt->check_data); esp_test_cmd_event(TEST_CMD_WAKEUP, (char *)&check_str); break; } case SIP_EVT_DEBUG: { u8 check_str[100]; int i; char * ptr_str = (char *)& check_str; struct sip_evt_debug* debug_evt = (struct sip_evt_debug *)(buf + SIP_CTRL_HDR_LEN); for(i = 0; i < debug_evt->len; i++) ptr_str += sprintf(ptr_str, "0x%x%s", debug_evt->results[i], i == debug_evt->len -1 ? "":" " ); esp_test_cmd_event(TEST_CMD_DEBUG, (char *)&check_str); break; } case SIP_EVT_LOOPBACK: { u8 check_str[12]; struct sip_evt_loopback *loopback_evt = (struct sip_evt_loopback *)(buf + SIP_CTRL_HDR_LEN); esp_dbg(ESP_DBG_LOG, "%s loopback len %d seq %u\n", __func__,hdr->len, hdr->seq); if(loopback_evt->pack_id!=get_loopback_id()) { sprintf((char *)&check_str, "seq id error %d, expect %d", loopback_evt->pack_id, get_loopback_id()); esp_test_cmd_event(TEST_CMD_LOOPBACK, (char *)&check_str); } if((loopback_evt->pack_id+1) <get_loopback_num()) { inc_loopback_id(); sip_send_loopback_mblk(sip, loopback_evt->txlen, loopback_evt->rxlen, get_loopback_id()); } else { sprintf((char *)&check_str, "test over!"); esp_test_cmd_event(TEST_CMD_LOOPBACK, (char *)&check_str); } break; } #endif /*TEST_MODE*/ case SIP_EVT_SNPRINTF_TO_HOST: { u8 *p = (buf + sizeof(struct sip_hdr) + sizeof(u16)); u16 *len = (u16 *)(buf + sizeof(struct sip_hdr)); char test_res_str[560]; sprintf(test_res_str, "esp_host:%llx\nesp_target: %.*s", DRIVER_VER, *len, p); esp_dbg(ESP_SHOW, "%s\n", test_res_str); #ifdef ANDROID if(*len && sip->epub->sdio_state == ESP_SDIO_STATE_FIRST_INIT){ char filename[256]; if (mod_eagle_path_get() == NULL) sprintf(filename, "%s/%s", FWPATH, "test_results"); else sprintf(filename, "%s/%s", mod_eagle_path_get(), "test_results"); android_readwrite_file(filename, NULL, test_res_str, strlen(test_res_str)); } #endif break; } case SIP_EVT_TRC_AMPDU: { struct sip_evt_trc_ampdu *ep = (struct sip_evt_trc_ampdu*)(buf + SIP_CTRL_HDR_LEN); struct esp_node *node = NULL; int i = 0; if (atomic_read(&sip->epub->wl.off)) { esp_dbg(ESP_DBG_ERROR, "%s scan result while wlan off\n", __func__); return 0; } node = esp_get_node_by_addr(sip->epub, ep->addr); if(node == NULL) break; #if 0 esp_tx_ba_session_op(sip, node, ep->state, ep->tid); #else for (i = 0; i < 8; i++) { if (ep->tid & (1<<i)) { esp_tx_ba_session_op(sip, node, ep->state, i); } } #endif break; } case SIP_EVT_EP: { char *ep = (char *)(buf + SIP_CTRL_HDR_LEN); static int counter = 0; esp_dbg(ESP_ATE, "%s EVT_EP \n\n", __func__); if (counter++ < 2) { esp_dbg(ESP_ATE, "ATE: %s \n", ep); } esp_test_ate_done_cb(ep); break; } case SIP_EVT_INIT_EP: { char *ep = (char *)(buf + SIP_CTRL_HDR_LEN); esp_dbg(ESP_ATE, "Phy Init: %s \n", ep); break; } case SIP_EVT_NOISEFLOOR:{ struct sip_evt_noisefloor *ep = (struct sip_evt_noisefloor *)(buf + SIP_CTRL_HDR_LEN); atomic_set(&sip->noise_floor, ep->noise_floor); break; } default: break; } return 0; }