static void *sqn_dfs_perf_rx_seq_start(struct seq_file *seq, loff_t *pos) { u32 *idx = 0; sqn_pr_enter(); if (0 == *pos) { sqn_pr_dbg("zero pos\n"); idx = SEQ_START_TOKEN; goto out; } else if (SQN_DFS_PERF_STAT_SIZE <= *pos) { /* indicate beyond end of file position */ sqn_pr_dbg("beyond end of file position %llu\n", *pos); idx = 0; goto out; } idx = kmalloc(sizeof(u32), GFP_KERNEL); if (!idx) { sqn_pr_dbg("failed to alloc seq_file iterator\n"); goto out; } *idx = *pos; sqn_pr_dbg("start pos %u\n", *idx); out: sqn_pr_leave(); return idx; }
static void *sqn_dfs_perf_rx_seq_next(struct seq_file *seq, void *v, loff_t *pos) { u32 *idx = 0; sqn_pr_enter(); ++(*pos); if (SEQ_START_TOKEN == v) { idx = kmalloc(sizeof(u32), GFP_KERNEL); if (!idx) { sqn_pr_dbg("failed to alloc seq_file iterator\n"); goto out; } } else { idx = v; } *idx = *pos - 1; sqn_pr_dbg("idx %u, pos %llu\n", *idx, *pos); if (*idx >= SQN_DFS_PERF_STAT_SIZE) { /* indicate end of sequence */ sqn_pr_dbg("end of sequence, idx %u\n", *idx); idx = 0; } out: sqn_pr_leave(); return idx; }
int init_thp(struct net_device* dev) { sqn_pr_enter(); #if THP_DEBUG printk(KERN_WARNING "init_thp +\n"); #endif if (0 == this_device) { if(init_procfs_handler()) { return -1; } if(init_thp_devfile()) return -1; /* Don't call init_thp_handler() here, it will be called from * probe() before interrupts are enabled, to ensure that we will * catch all THP packets as soon as they appear */ /* if (init_thp_handler(dev)) */ /* return -1; */ this_device = dev; sqn_pr_info("KTHP initialized\n"); } #if THP_DEBUG printk(KERN_WARNING "init_thp -\n"); #endif sqn_pr_leave(); return 0; }
static int sqn_handle_memcpy_tag(struct sdio_func *func , struct sqn_tag_memcpy * mcpy_tag) { int rv = 0; sqn_pr_enter(); /* * Convert values accordingly to platform "endianes" * (big or little endian) because bootstrapper file * data is big endian */ mcpy_tag->address = be32_to_cpu(mcpy_tag->address); mcpy_tag->access_size = be32_to_cpu(mcpy_tag->access_size); mcpy_tag->data_size = be32_to_cpu(mcpy_tag->data_size); /* sqn_pr_dbg("----------------------------------------\n"); */ sqn_pr_dbg("address: 0x%02X access_size: %u data_size: %u\n" , mcpy_tag->address, mcpy_tag->access_size , mcpy_tag->data_size); /* sqn_pr_dbg_dump("|", mcpy_tag->data, mcpy_tag->data_size); */ rv = write_data(func, mcpy_tag->address, mcpy_tag->data , mcpy_tag->data_size, mcpy_tag->access_size); sqn_pr_leave(); return rv; }
static unsigned int thp_poll(struct file *filp, poll_table *wait) { unsigned int mask = 0; sqn_pr_enter(); #if THP_DEBUG //printk(KERN_WARNING "thp_poll +\n"); #endif poll_wait(filp, &to_sqntool_wait, wait); if (0 == this_device) { printk(KERN_WARNING "thp_poll() device removed\n"); mask = POLLERR; } else if(skb_queue_empty(&to_sqntool_queue)) { mask = 0; } else { mask = (POLLIN | POLLRDNORM); } #if THP_DEBUG //printk(KERN_WARNING "thp_poll -\n"); #endif sqn_pr_leave(); return mask; }
int sqn_rx_process(struct net_device *dev, struct sk_buff *skb) { int rc = 0; struct sqn_private *priv = netdev_priv(dev); #if DRIVER_DEBUG printk(KERN_WARNING "sqn_rx_process \n"); #endif sqn_pr_enter(); dev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, dev); skb->dev = dev; priv->stats.rx_packets++; priv->stats.rx_bytes += skb->len; #if SKB_DEBUG sqn_pr_info("%s: push skb [0x%p] to kernel, users %d\n", __func__, skb, atomic_read(&skb->users)); #endif netif_rx(skb); /* netif_receive_skb(skb); */ sqn_pr_leave(); return rc; }
int init_procfs_handler(void) { sqn_pr_enter(); kthp_proc_dir = proc_mkdir(procfs_dir, NULL); if (kthp_proc_dir) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) kthp_proc_dir->owner = THIS_MODULE; #endif } else { remove_proc_entry(PROC_DIR_NAME, NULL); return 1; } if(install_entry(IFACE_FILENAME, ifacename_read) || install_entry(DRV_REVISION, drvrev_read)) { return 1; } #if THP_DEBUG printk(KERN_WARNING "drvrev_read -\n"); #endif sqn_pr_leave(); return 0; }
static int get_mac_addr_from_str(u8 *data, u32 length, u8 *result) { int rv = 0; int i = 0; sqn_pr_enter(); if (0 == length) { rv = -1; goto out; } /* * Check if we have delimiters on appropriate places: * * X X : X X : X X : X X : X X : X X * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */ if ( !( ( ':' == data[2] || '-' == data[2]) && ( ':' == data[5] || '-' == data[5]) && ( ':' == data[8] || '-' == data[8]) && ( ':' == data[11] || '-' == data[11]) && ( ':' == data[14] || '-' == data[14]) )) { sqn_pr_err("can't get mac address from firmware" " - incorrect mac address\n"); rv = -1; goto out; } i = 0; while (i < length) { int high = 0; int low = 0; if ((high = char_to_int(data[i])) >= 0 && (low = char_to_int(data[i + 1])) >= 0) { result[i/3] = low; result[i/3] |= high << 4; } else { sqn_pr_err("can't get mac address from firmware" " - incorrect mac address\n"); rv = -1; goto out; } i += 3; } out: if (length > 0) { data[length - 1] = 0; sqn_pr_dbg("mac addr string: %s\n", data); } sqn_pr_leave(); return rv; }
void unregister_android_earlysuspend(void) { sqn_pr_enter(); unregister_early_suspend(&sqn_early_suspend_desc); sqn_pr_leave(); }
int thp_handler(struct sk_buff *skb, struct net_device *pDev, struct packet_type *pPt) #endif { struct sk_buff *skb_thp = 0; struct ethhdr *eth = 0; sqn_pr_enter(); /* We need only ETH_P_802_2 protocol packets with THP mac address */ eth = skb2ethhdr(skb); if(ntohs(skb->protocol) != ETH_P_802_2 || !is_thp_packet(eth->h_dest)) { //for DDTM, drop all NOT THP packets if(drop_packet) { sqn_pr_dbg("HTC CODE: drop packet for DDTM\n"); skb->pkt_type = PACKET_OTHERHOST; } goto not_thp_out; } skb_thp = skb_clone(skb, GFP_ATOMIC); /* Bugz 22554: strip CRC at the end of packet */ skb_trim(skb_thp, skb_thp->len - 4); #if THP_TRACE sqn_pr_info("%s: RX packet, len = %d\n", __func__, skb_thp->len); #endif sqn_pr_dbg("RX THP packet, length %d\n", skb_thp->len); skb_queue_tail(&to_sqntool_queue, skb_thp); if(skb_queue_len(&to_sqntool_queue) == 256){ skb_thp = skb_dequeue(&to_sqntool_queue); kfree_skb(skb_thp); } wake_up_interruptible(&to_sqntool_wait); //Wake up wait queue thp_out: dev_kfree_skb_any(skb); sqn_pr_leave(); return NET_RX_DROP; not_thp_out: dev_kfree_skb_any(skb); sqn_pr_leave(); return NET_RX_SUCCESS; }
static struct net_device_stats *sqn_get_stats(struct net_device *dev) { struct sqn_private *priv = netdev_priv(dev); sqn_pr_enter(); sqn_pr_leave(); return &priv->stats; }
static int sqn_handle_mac_addr_tag(struct sdio_func *func, u8 *data, u32 length) { int rv = 0; struct sqn_private *priv = ((struct sqn_sdio_card *)sdio_get_drvdata(func))->priv; sqn_pr_enter(); /* * This tag could contain one or two mac addresses in string * form, delimited by some symbol (space or something else). * Each mac address written as a string has constant length. * Thus we can determine the number of mac addresses by the * length of the tag: * * mac addr length in string form: XX:XX:XX:XX:XX:XX = 17 bytes * tag length: 17 bytes [ + 1 byte + 17 bytes ] */ #define MAC_ADDR_STRING_LEN 17 /* * If we have only one mac addr we should increment it by one * and use it. * If we have two mac addresses we should use a second one. */ if (MAC_ADDR_STRING_LEN <= length && length < 2 * MAC_ADDR_STRING_LEN + 1) { sqn_pr_dbg("single mac address\n"); /* we have only one mac addr */ get_mac_addr_from_str(data, length, priv->mac_addr); ++(priv->mac_addr[ETH_ALEN - 1]); } else if (2 * MAC_ADDR_STRING_LEN + 1 == length) { /* we have two macs */ sqn_pr_dbg("two mac addresses, using second\n"); get_mac_addr_from_str(data + MAC_ADDR_STRING_LEN + 1 , length - (MAC_ADDR_STRING_LEN + 1), priv->mac_addr); } else { /* incorrect data length */ sqn_pr_err("can't get mac address from bootloader" " - incorrect mac address length\n"); rv = -1; goto out; } sqn_pr_info("setting MAC address from bootloader: " "%02x:%02x:%02x:%02x:%02x:%02x\n", priv->mac_addr[0] , priv->mac_addr[1], priv->mac_addr[2], priv->mac_addr[3] , priv->mac_addr[4], priv->mac_addr[5]); out: sqn_pr_leave(); return rv; }
/** sqn_load_firmware - loads firmware to card * @func: SDIO function, used to transfer data via SDIO interface, * also used to obtain pointer to device structure. * * But now the only work it does - is loading of bootstrapper to card, * because firmware is supposed to be loaded by a userspace program. */ int sqn_load_firmware(struct sdio_func *func) { int rv = 0; const struct firmware *fw = 0; //Create a local firmware_name with path to replace original global firmware_name -- Tony Wu. const char *firmware_name = "../../../data/wimax/Boot.bin"; struct sqn_sdio_card *sqn_card = sdio_get_drvdata(func); sqn_pr_enter(); sqn_pr_info("trying to find bootloader image: \"%s\"\n", firmware_name); if ((rv = request_firmware(&fw, firmware_name, &func->dev))) goto out; if (SQN_1130 == sqn_card->version) { sdio_claim_host(func); /* properly setup registers for firmware loading */ sqn_pr_dbg("setting up SQN_H_SDRAM_NO_EMR register\n"); sdio_writeb(func, 0, SQN_H_SDRAM_NO_EMR, &rv); if (rv) { sdio_release_host(func); goto out; } sqn_pr_dbg("setting up SQN_H_SDRAMCTL_RSTN register\n"); sdio_writeb(func, 1, SQN_H_SDRAMCTL_RSTN, &rv); sdio_release_host(func); if (rv) goto out; } sqn_pr_info("loading bootloader to the card...\n"); if ((rv = sqn_load_bootstrapper(func, (u8*) fw->data, fw->size))) goto out; /* boot the card */ sqn_pr_info("bootting the card...\n"); sdio_claim_host(func); // by daniel sdio_writeb(func, 1, SQN_H_CRSTN, &rv); sdio_release_host(func); // by daniel if (rv) goto out; sqn_pr_info(" done\n"); out: // To avoid kzalloc leakage in /drivers/base/firmware_class.c if (fw) { release_firmware(fw); fw = NULL; } sqn_pr_leave(); return rv; }
static void sqn_tx_timeout(struct net_device *dev) { /* struct sqn_private *priv = netdev_priv(dev); */ sqn_pr_enter(); sqn_pr_err("TX watch dog timeout\n"); sqn_pr_leave(); }
static void sqn_handle_android_late_resume(struct early_suspend *h) { sqn_pr_enter(); sqn_pr_info("%s: enter\n", __func__); mmc_wimax_enable_host_wakeup(0); sqn_pr_info("%s: leave\n", __func__); sqn_pr_leave(); }
void sqn_dfs_cleanup(void) { sqn_pr_enter(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) debugfs_remove_recursive(sqn_dfs_rootdir); #endif sqn_dfs_rootdir = 0; sqn_pr_leave(); }
struct sqn_private *sqn_add_card(void *card, struct device *realdev) { struct sqn_private *priv = 0; u8 dummy_wimax_mac_addr[ETH_ALEN] = { 0x00, 0x16, 0x08, 0x00, 0x06, 0x53 }; /* Allocate an Ethernet device and register it */ struct net_device *dev = alloc_netdev(sizeof(struct sqn_private), "wimax%d", ether_setup); sqn_pr_enter(); if (!dev) { sqn_pr_err("init wimaxX device failed\n"); goto done; } priv = netdev_priv(dev); memset(priv, 0, sizeof(struct sqn_private)); /* * Use dummy WiMAX mac address for development version (boot from * flash) of WiMAX SDIO cards. Production cards use mac address from * firmware which is loaded by driver. Random ethernet address can't be * used if IPv4 convergence layer is enabled on WiMAX base station. */ memcpy(priv->mac_addr, dummy_wimax_mac_addr, ETH_ALEN); spin_lock_init(&priv->drv_lock); /* Fill the private stucture */ priv->dev = dev; priv->card = card; /* Setup the OS Interface to our functions */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) dev->open = sqn_dev_open; dev->stop = sqn_dev_stop; dev->hard_start_xmit = sqn_hard_start_xmit; dev->tx_timeout = sqn_tx_timeout; dev->get_stats = sqn_get_stats; #else dev->netdev_ops = &sqn_netdev_ops; #endif /* TODO: Make multicast possible */ dev->flags &= ~IFF_MULTICAST; //wimax interface mtu must be 1400 (in spec) dev->mtu = 1400; SET_NETDEV_DEV(dev, realdev); done: sqn_pr_leave(); return priv; }
int sqn_stop_card(struct sqn_private *priv) { struct net_device *dev = priv->dev; sqn_pr_enter(); unregister_netdev(dev); sqn_pr_leave(); return 0; }
int sqn_notify_host_wakeup(struct sdio_func *func) { int rv = 0; sqn_pr_enter(); rv = sqn_wakeup_fw(func); sqn_pr_leave(); return rv; };
static int sqn_set_fw_power_mode(struct sdio_func *func, enum sqn_fw_power_mode pm) { int rv = 0; sqn_pr_enter(); rv = sqn_set_power_mode_helper(func, THSP_SET_FW_POWER_MODE, pm); sqn_pr_leave(); return rv; }
static int sqn_dfs_perf_rx_open(struct inode *i, struct file *f) { int rv = 0; sqn_pr_enter(); rv = seq_open(f, &sqn_dfs_perf_rx_seq_ops); sqn_pr_leave(); return rv; }
static size_t sqn_alloc_big_buffer(u8 **buf, size_t size, gfp_t gfp_flags) { size_t real_size = size; // int retries = 6; // int retries = 3; sqn_pr_enter(); /* Try to allocate buffer of requested size, if it failes try to * allocate a twice smaller buffer. Repeat this <retries> number of * times. */ /* do { *buf = kmalloc(real_size, gfp_flags); printk("%s: kmalloc %d in %x trial:%d\n", __func__, real_size, *buf, retries); if (!(*buf)) { printk("%s: kmalloc %d failed, trial:%d\n", __func__, real_size, retries); // real_size /= 2; real_size /= 4; // adjust the size to be a multiple of 4 real_size += real_size % 4 ? 4 - real_size % 4 : 0; } } while (retries-- > 0 && !(*buf)); */ // If all retries failed, then allocate 4KB buffer if (!(*buf)) { real_size = 8 * 1024; if (size >= real_size) { *buf = kmalloc(real_size, gfp_flags); // printk("%s: kmalloc %d in %x\n", __func__, real_size, *buf); // If it also failed, then just return 0, indicating // that we failed to alloc buffer if (!(*buf)) real_size = 0; } else { // We should _not_ return buffer bigger than requested // real_size = 0; // printk("%s: We should _not_ return buffer bigger than requested size:%d real_size:%d\n", __func__, size, real_size); *buf = kmalloc(size, gfp_flags); real_size = size; } } sqn_pr_leave(); return real_size; }
static void free_last_request(void) { sqn_pr_enter(); spin_lock(&g_last_request_lock); if (0 != g_last_request_skb) { dev_kfree_skb_any(g_last_request_skb); g_last_request_skb = 0; } spin_unlock(&g_last_request_lock); sqn_pr_leave(); }
static ssize_t thp_release(struct inode *inode, struct file *filp) { sqn_pr_enter(); once_open_flag = 0; if(!skb_queue_empty(&to_sqntool_queue)) skb_queue_purge(&to_sqntool_queue); sqn_pr_leave(); return 0; }
static int sqn_dev_stop(struct net_device *dev) { struct sqn_private *priv = netdev_priv(dev); sqn_pr_enter(); spin_lock(&priv->drv_lock); netif_stop_queue(dev); spin_unlock(&priv->drv_lock); sqn_pr_leave(); return 0; }
int sqn_stop_tx_thread(struct sqn_private *priv) { int rv = 0; sqn_pr_enter(); kthread_stop(priv->tx_thread); wake_up_interruptible(&priv->tx_waitq); sqn_pr_leave(); return rv; }
static void handle_sqn_state_change_msg(struct sqn_private *priv , struct sqn_lsp_packet *lsp) { struct sqn_sdio_card *card = priv->card; struct sk_buff *skb_reply = 0; unsigned long irq_flags = 0; const int card_state = ntohl(lsp->lsp_header.u.fw_state.state); sqn_pr_enter(); switch (card_state) { case LSP_SQN_ACTIVE: sqn_pr_info("card switched to ACTIVE state (OPT)\n"); spin_lock_irqsave(&priv->drv_lock, irq_flags); card->is_card_sleeps = 0; spin_unlock_irqrestore(&priv->drv_lock, irq_flags); break; case LSP_SQN_IDLE: sqn_pr_info("card switched to IDLE state (LPM)\n"); spin_lock_irqsave(&priv->drv_lock, irq_flags); card->is_card_sleeps = 1; spin_unlock_irqrestore(&priv->drv_lock, irq_flags); break; case LSP_SQN_DROPPED: sqn_pr_info("card switched to DROPPED state (LPM)\n"); spin_lock_irqsave(&priv->drv_lock, irq_flags); card->is_card_sleeps = 1; spin_unlock_irqrestore(&priv->drv_lock, irq_flags); break; case LSP_SQN_REENTRY: sqn_pr_info("card switched to REENTRY state (LPM)\n"); spin_lock_irqsave(&priv->drv_lock, irq_flags); card->is_card_sleeps = 1; spin_unlock_irqrestore(&priv->drv_lock, irq_flags); break; default: sqn_pr_info("card switched to UNSUPPORTED mode %d/0x%x\n" , card_state, card_state); spin_lock_irqsave(&priv->drv_lock, irq_flags); card->is_card_sleeps = 0; spin_unlock_irqrestore(&priv->drv_lock, irq_flags); break; } skb_reply = construct_lsp_packet(THSP_SQN_STATE_CHANGE_REPLY , ntohl(lsp->lsp_header.u.thp_avl.tid), 0); if (0 != (skb_reply = sqn_sdio_prepare_skb_for_tx(skb_reply))) sqn_sdio_tx_skb(card, skb_reply, 0); wake_up_interruptible(&g_card_sleep_waitq); sqn_pr_leave(); }
static void handle_thp_avl_msg(struct sqn_private *priv , struct sqn_lsp_packet *lsp) { struct sqn_sdio_card *card = priv->card; struct sk_buff *skb_reply = 0; enum sqn_thp_available_reply thp_rpl; unsigned long irq_flags = 0; sqn_pr_enter(); spin_lock_irqsave(&priv->drv_lock, irq_flags); /* if (card->is_card_sleeps) { */ if (priv->is_tx_queue_empty(priv)) { if (mmc_wimax_get_sdio_lsp_log()) { sqn_pr_info("TX queue empty, thp_rpl=FINISH\n"); } /* sqn_pr_dbg("card was asleep, thp_rpl=FINISH\n"); */ thp_rpl = LSP_THPA_FINISHED; card->is_card_sleeps = 1; gHostWakeupFWEvent = 0; /* } else if (priv->is_tx_queue_empty(priv)) { */ /* sqn_pr_dbg("card was not asleep and tx_queue is empty, thp_rpl=FINISHED\n"); */ /* thp_rpl = LSP_THPA_FINISHED; */ /* card->is_card_sleeps = 1; */ } else { /* sqn_pr_info("card was not asleep but tx_queue is no empty, thp_rpl=EXIT\n"); */ if (mmc_wimax_get_sdio_lsp_log()) { sqn_pr_info("TX queue not empty, thp_rpl=ACK\n"); } /* sqn_pr_dbg("card was not asleep, thp_rpl=ACK\n"); */ thp_rpl = LSP_THPA_ACK; card->is_card_sleeps = 0; } spin_unlock_irqrestore(&priv->drv_lock, irq_flags); skb_reply = construct_lsp_packet(THSP_THP_AVAILABLE_REPLY , ntohl(lsp->lsp_header.u.thp_avl.tid) , thp_rpl); if (0 != (skb_reply = sqn_sdio_prepare_skb_for_tx(skb_reply))) sqn_sdio_tx_skb(card, skb_reply, 0); wake_up_interruptible(&g_card_sleep_waitq); if (netif_queue_stopped(priv->dev)) netif_wake_queue(priv->dev); if (!card->is_card_sleeps && !gHostWakeupFWEvent) { gHostWakeupFWEvent = 1; // Dump next TX packet after LSP ThpAvailableReply(ACK); } sqn_pr_leave(); }
static void signal_pm_request_completion(struct sqn_private *priv) { struct sqn_sdio_card *card = priv->card; unsigned long irq_flags = 0; sqn_pr_enter(); spin_lock_irqsave(&priv->drv_lock, irq_flags); card->pm_complete = 1; spin_unlock_irqrestore(&priv->drv_lock, irq_flags); wake_up_interruptible(&card->pm_waitq); sqn_pr_leave(); }
void signal_card_sleep_completion(struct sqn_private *priv) { struct sqn_sdio_card *card = priv->card; unsigned long irq_flags = 0; sqn_pr_enter(); spin_lock_irqsave(&priv->drv_lock, irq_flags); card->is_card_sleeps = 0; spin_unlock_irqrestore(&priv->drv_lock, irq_flags); wake_up_interruptible(&g_card_sleep_waitq); sqn_pr_dbg("card sleep completion is signaled\n"); sqn_pr_leave(); }