/*ARGSUSED*/ static int ippctl_open( dev_t *devp, int flag, int otyp, cred_t *credp) { minor_t minor = getminor(*devp); #define LIMIT 4 DBG0(DBG_CBOPS, "open\n"); /* * Only allow privileged users to open our device. */ if (secpolicy_net_config(credp, B_FALSE) != 0) { DBG0(DBG_CBOPS, "not privileged user\n"); return (EPERM); } /* * Sanity check other arguments. */ if (minor != 0) { DBG0(DBG_CBOPS, "bad minor\n"); return (ENXIO); } if (otyp != OTYP_CHR) { DBG0(DBG_CBOPS, "bad device type\n"); return (EINVAL); } /* * This is also a single dev_t driver. */ mutex_enter(&ippctl_lock); if (ippctl_busy) { mutex_exit(&ippctl_lock); return (EBUSY); } ippctl_busy = B_TRUE; mutex_exit(&ippctl_lock); /* * Allocate data buffer array (starting with length LIMIT, defined * at the start of this function). */ ippctl_alloc(LIMIT); DBG0(DBG_CBOPS, "success\n"); return (0); #undef LIMIT }
int get_rx_buffers(void *priv, void **pkt_priv, void **buffer, int size) { struct net_device *dev = (struct net_device *) priv; struct sk_buff *skb = NULL; void *ptr = NULL; DBG0("[%s] dev:%s\n", __func__, dev->name); skb = __dev_alloc_skb(size, GFP_ATOMIC); if (skb == NULL) { DBG0("%s: unable to alloc skb\n", __func__); return -ENOMEM; } /* TODO skb_reserve(skb, NET_IP_ALIGN); for ethernet mode */ /* Populate some params now. */ skb->dev = dev; ptr = skb_put(skb, size); skb_set_network_header(skb, 0); /* done with skb setup, return the buffer pointer. */ *pkt_priv = skb; *buffer = ptr; return 0; }
static int __rmnet_open(struct net_device *dev) { int r; struct rmnet_private *p = netdev_priv(dev); DBG0("[%s] __rmnet_open()\n", dev->name); if (p->device_up == DEVICE_UNINITIALIZED) { r = msm_bam_dmux_open(p->ch_id, dev, bam_notify); if (r < 0) { DBG0("%s: ch=%d failed with rc %d\n", __func__, p->ch_id, r); return -ENODEV; } r = platform_driver_register(p->bam_pdev); if (r) { pr_err("%s: bam pdev registration failed n=%d rc=%d\n", __func__, p->ch_id, r); msm_bam_dmux_close(p->ch_id); return r; } } p->device_up = DEVICE_ACTIVE; return 0; }
/*ARGSUSED*/ static int ippctl_close( dev_t dev, int flag, int otyp, cred_t *credp) { minor_t minor = getminor(dev); DBG0(DBG_CBOPS, "close\n"); ASSERT(minor == 0); /* * Free the data buffer array. */ ippctl_free(); mutex_enter(&ippctl_lock); ippctl_busy = B_FALSE; mutex_exit(&ippctl_lock); DBG0(DBG_CBOPS, "success\n"); return (0); }
static int bam_rmnet_probe(struct platform_device *pdev) { int i; char name[BAM_DMUX_CH_NAME_MAX_LEN]; struct rmnet_private *p; DBG0("[%s] bam_rmnet_probe()\n", pdev->name); for (i = 0; i < RMNET_DEVICE_COUNT; ++i) { scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i); if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN)) break; } p = netdev_priv(netdevs[i]); if (p->in_reset) { DBG0("[%s] is reset\n", pdev->name); p->in_reset = 0; msm_bam_dmux_open(p->ch_id, netdevs[i], bam_notify); netif_carrier_on(netdevs[i]); netif_start_queue(netdevs[i]); } return 0; }
static void dissect_icep_request(tvbuff_t *tvb, guint32 offset, packet_info *pinfo, proto_tree *icep_tree, proto_item* icep_item) { /* p. 612, chapter 23.3.2: * * struct RequestData { * int requestID; * Ice::Identity id; * Ice::StringSeq facet; * string operation; * byte mode; * Ice::Context context; * Encapsulation params; * } */ proto_item *ti = NULL; proto_tree *icep_sub_tree = NULL; gint32 consumed = 0; guint32 reqid = 0; DBG0("dissect request\n"); /* check for req id */ if ( !tvb_bytes_exist(tvb, offset, 4) ) { expert_add_info_format(pinfo, icep_item, &ei_icep_length, "too short header"); col_append_str(pinfo->cinfo, COL_INFO, " (too short header)"); return; } /* got at least 4 bytes */ /* create display subtree for this message type */ reqid = tvb_get_letohl(tvb, offset); ti = proto_tree_add_text(icep_tree, tvb, offset, -1, "Request Message Body"); icep_sub_tree = proto_item_add_subtree(ti, ett_icep_msg); proto_tree_add_item(icep_sub_tree, hf_icep_request_id, tvb, offset, 4, ENC_LITTLE_ENDIAN); if ( reqid != 0 ) { col_append_fstr(pinfo->cinfo, COL_INFO, "(%d):", tvb_get_letohl(tvb, offset)); } else col_append_str(pinfo->cinfo, COL_INFO, "(oneway):"); offset += 4; DBG0("consumed --> 4\n"); dissect_icep_request_common(tvb, offset, pinfo, icep_sub_tree, ti, &consumed); if ( consumed == -1 ) return; /*offset += consumed;*/ DBG1("consumed --> %d\n", consumed); }
static int rmnet_ioctl_extended(struct net_device *dev, struct ifreq *ifr) { struct rmnet_ioctl_extended_s ext_cmd; int rc = 0; struct usbnet *unet = netdev_priv(dev); rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data, sizeof(struct rmnet_ioctl_extended_s)); if (rc) { DBG0("%s(): copy_from_user() failed\n", __func__); return rc; } switch (ext_cmd.extended_ioctl) { case RMNET_IOCTL_GET_SUPPORTED_FEATURES: ext_cmd.u.data = 0; break; case RMNET_IOCTL_SET_MRU: if (test_bit(EVENT_DEV_OPEN, &unet->flags)) return -EBUSY; /* 16K max */ if ((size_t)ext_cmd.u.data > 0x4000) return -EINVAL; unet->rx_urb_size = (size_t) ext_cmd.u.data; DBG0("[%s] rmnet_ioctl(): SET MRU to %u\n", dev->name, unet->rx_urb_size); break; case RMNET_IOCTL_GET_MRU: ext_cmd.u.data = (uint32_t)unet->rx_urb_size; break; case RMNET_IOCTL_GET_DRIVER_NAME: strlcpy(ext_cmd.u.if_name, unet->driver_name, sizeof(ext_cmd.u.if_name)); break; case RMNET_IOCTL_GET_EPID: ext_cmd.u.data = unet->intf->cur_altsetting->desc.bInterfaceNumber; break; case RMNET_IOCTL_SET_SLEEP_STATE: rmnet_usb_disable_hsic_autosuspend(unet, ext_cmd.u.data); break; } rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd, sizeof(struct rmnet_ioctl_extended_s)); if (rc) DBG0("%s(): copy_to_user() failed\n", __func__); return rc; }
static int __rmnet_open(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); DBG0("[%s] __rmnet_open()\n", dev->name); if (p->device_state == DEVICE_ACTIVE) { return 0; } else { DBG0("[%s] Platform inactive\n", dev->name); return -ENODEV; } }
/*ARGSUSED*/ static int ippctl_detach( dev_info_t *dip, ddi_detach_cmd_t cmd) { switch (cmd) { case DDI_DETACH: break; case DDI_PM_SUSPEND: /*FALLTHRU*/ case DDI_SUSPEND: /*FALLTHRU*/ default: return (DDI_FAILURE); } DBG0(DBG_DEVOPS, "DDI_DETACH\n"); ASSERT(dip == ippctl_dip); ddi_remove_minor_node(dip, NULL); mutex_destroy(&ippctl_lock); ippctl_dip = NULL; return (DDI_SUCCESS); }
int _info( struct modinfo *modinfop) { DBG0(DBG_MODLINK, "calling mod_info\n"); return (mod_info(&modlinkage, modinfop)); }
static int bam_rmnet_remove(struct platform_device *pdev) { int i; char name[BAM_DMUX_CH_NAME_MAX_LEN]; struct rmnet_private *p; DBG0("[%s] bam_rmnet_remove()\n", pdev->name); for (i = 0; i < RMNET_DEVICE_COUNT; ++i) { scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i); if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN)) break; } p = netdev_priv(netdevs[i]); p->in_reset = 1; if (p->waiting_for_ul_skb != NULL) { dev_kfree_skb_any(p->waiting_for_ul_skb); p->waiting_for_ul_skb = NULL; } msm_bam_dmux_close(p->ch_id); netif_carrier_off(netdevs[i]); netif_stop_queue(netdevs[i]); return 0; }
static void _rmnet_free_bam_later(struct work_struct *work) { struct rmnet_free_bam_work *fwork; fwork = container_of(work, struct rmnet_free_bam_work, work); DBG0("%s: unregister_netdev, done", __func__); if (bam_rmnet_drivers[fwork->ch_id].remove) { platform_driver_unregister(&bam_rmnet_drivers[fwork->ch_id]); bam_rmnet_drivers[fwork->ch_id].remove = NULL; } DBG0("%s: free_netdev, done", __func__); kfree(work); }
END_TEST START_TEST(test_bliss_fft_speed) { bliss_fft_t *fft; struct timespec start, stop; uint16_t n = bliss_fft_12289_512.n; uint32_t x[n], X[n]; int i, m, count = 10000; for (i = 0; i < n; i++) { x[i] = i; } fft = bliss_fft_create(&bliss_fft_12289_512); clock_gettime(CLOCK_THREAD_CPUTIME_ID, &start); for (m = 0; m < count; m++) { fft->transform(fft, x, X, FALSE); fft->transform(fft, X, x, TRUE); } clock_gettime(CLOCK_THREAD_CPUTIME_ID, &stop); DBG0(DBG_LIB, "%d FFT loops in %d ms\n", count, (stop.tv_nsec - start.tv_nsec) / 1000000 + (stop.tv_sec - start.tv_sec) * 1000); for (i = 0; i < n; i++) { ck_assert(x[i] == i); } fft->destroy(fft); }
static void bam_write_done(void *dev, struct sk_buff *skb) { struct rmnet_private *p = netdev_priv(dev); u32 opmode = p->operation_mode; unsigned long flags; DBG1("%s: write complete\n", __func__); if (RMNET_IS_MODE_IP(opmode) || count_this_packet(skb->data, skb->len)) { p->stats.tx_packets++; p->stats.tx_bytes += skb->len; #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_xmit += rmnet_cause_wakeup(p); #endif } DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n", ((struct net_device *)(dev))->name, p->stats.tx_packets, skb->len, skb->mark); dev_kfree_skb_any(skb); spin_lock_irqsave(&p->tx_queue_lock, flags); if (netif_queue_stopped(dev) && msm_bam_dmux_is_ch_low(p->ch_id)) { DBG0("%s: Low WM hit, waking queue=%p\n", __func__, skb); netif_wake_queue(dev); } spin_unlock_irqrestore(&p->tx_queue_lock, flags); }
static int rmnet_stop(struct net_device *dev) { DBG0("[%s] rmnet_stop()\n", dev->name); netif_stop_queue(dev); return 0; }
static int __rmnet_open(struct net_device *dev) { int r; struct rmnet_private *p = netdev_priv(dev); DBG0("[%s] __rmnet_open()\n", dev->name); if (p->device_up == DEVICE_UNINITIALIZED) { r = msm_bam_dmux_open(p->ch_id, dev, bam_notify); if (r < 0) { DBG0("%s: ch=%d failed with rc %d\n", __func__, p->ch_id, r); return -ENODEV; } } p->device_up = DEVICE_ACTIVE; return 0; }
static int rmnet_change_mtu(struct net_device *dev, int new_mtu) { if (0 > new_mtu || RMNET_DATA_LEN < new_mtu) return -EINVAL; DBG0("[%s] MTU change: old=%d new=%d\n", dev->name, dev->mtu, new_mtu); dev->mtu = new_mtu; return 0; }
static int rmnet_stop(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); DBG0("[%s] rmnet_stop()\n", dev->name); netif_stop_queue(dev); tasklet_kill(&p->tsklt); return 0; }
static int rmnet_open(struct net_device *dev) { int rc = 0; DBG0("[%s] rmnet_open()\n", dev->name); rc = __rmnet_open(dev); if (rc == 0) netif_start_queue(dev); return rc; }
int _init( void) { int rc; if ((rc = mod_install(&modlinkage)) != 0) { DBG0(DBG_MODLINK, "mod_install failed\n"); return (rc); } return (rc); }
int _fini( void) { int rc; if ((rc = mod_remove(&modlinkage)) == 0) { return (rc); } DBG0(DBG_MODLINK, "mod_remove failed\n"); return (rc); }
static int __rmnet_close(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); int rc = 0; DBG0("[%s] __rmnet_close()\n", dev->name); if (p->device_up) { /* do not close rmnet port once up, this causes remote side to hang if tried to open again */ p->device_up = DEVICE_INACTIVE; return rc; } else return -EBADF; }
static void smd_net_notify(void *_dev, unsigned event) { struct rmnet_private *p = netdev_priv((struct net_device *)_dev); switch (event) { case SMD_EVENT_DATA: spin_lock(&p->lock); if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) { smd_disable_read_intr(p->ch); tasklet_hi_schedule(&p->tsklt); } spin_unlock(&p->lock); if (smd_read_avail(p->ch) && (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) { smd_net_data_tasklet.data = (unsigned long) _dev; tasklet_schedule(&smd_net_data_tasklet); } break; case SMD_EVENT_OPEN: DBG0("%s: opening SMD port\n", __func__); netif_carrier_on(_dev); if (netif_queue_stopped(_dev)) { DBG0("%s: re-starting if queue\n", __func__); netif_wake_queue(_dev); } break; case SMD_EVENT_CLOSE: DBG0("%s: closing SMD port\n", __func__); netif_carrier_off(_dev); break; } }
static void smux_write_done(void *dev, const void *meta_data) { struct rmnet_private *p = netdev_priv(dev); u32 opmode; struct sk_buff *skb = NULL; const struct smux_meta_write *write_meta_info = meta_data; unsigned long flags; if (!dev || !write_meta_info) { DBG1("%s: ERR:invalid WRITE_DONE callback recieved", __func__); return; } skb = (struct sk_buff *) write_meta_info->pkt_priv; if (!skb) { DBG1("%s: ERR:skb pointer NULL in WRITE_DONE" " CALLBACK", __func__); return; } spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); DBG1("%s: write complete\n", __func__); if (RMNET_IS_MODE_IP(opmode) || count_this_packet(skb->data, skb->len)) { p->stats.tx_packets++; p->stats.tx_bytes += skb->len; #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_xmit += rmnet_cause_wakeup(p); #endif } DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n", ((struct net_device *)(dev))->name, p->stats.tx_packets, skb->len, skb->mark); dev_kfree_skb_any(skb); spin_lock_irqsave(&p->tx_queue_lock, flags); if (netif_queue_stopped(dev) && msm_smux_is_ch_low(p->ch_id)) { DBG0("%s: Low WM hit, waking queue=%p\n", __func__, skb); netif_wake_queue(dev); } spin_unlock_irqrestore(&p->tx_queue_lock, flags); }
static int ippctl_data( char **dbufp, size_t *dbuflenp, size_t *nextbuflenp) { int i; DBG0(DBG_CBOPS, "called\n"); /* * Get the next data buffer from the array by looking at the * 'read index'. If this is the same as the 'write index' then * there's no more buffers in the array. */ i = ippctl_rindex; if (i == ippctl_windex) return (ENOENT); /* * Extract the buffer details. It is a pre-packed nvlist. */ *dbufp = ippctl_array[i].buf; *dbuflenp = ippctl_array[i].buflen; DBG2(DBG_CBOPS, "accessing nvlist[%d], length %lu\n", i, *dbuflenp); ASSERT(*dbufp != NULL); /* * Advance the 'read index' and check if there's another buffer. * If there is then we need to pass back its length to libipp so that * another data ioctl will be issued. */ i++; if (i < ippctl_windex) *nextbuflenp = ippctl_array[i].buflen; else *nextbuflenp = 0; ippctl_rindex = i; return (0); }
void messageToKeyHAL(BYTE *p,UINT length) { if (length > KEY_MESSAGE_BUFF_LENGTH - 1) { DBG0("\nKEY_MESSAGE_BUFF_LENGTH"); return; } down(&pGlobalHardwareInfo->semKeyMessage); pGlobalHardwareInfo->keyMessageBuff[pGlobalHardwareInfo->keyMessageBuffHx][0] = length; memcpy(&pGlobalHardwareInfo->keyMessageBuff[pGlobalHardwareInfo->keyMessageBuffHx][1],p,length); pGlobalHardwareInfo->keyMessageBuffHx++; if (pGlobalHardwareInfo->keyMessageBuffHx >= KEY_MESSAGE_BUFF_SIZE) { pGlobalHardwareInfo->keyMessageBuffHx = 0; } up(&pGlobalHardwareInfo->semKeyMessage); wake_up_interruptible(&pGlobalHardwareInfo->read_wait); }
static int __rmnet_open(struct net_device *dev) { int r; struct rmnet_private *p = netdev_priv(dev); DBG0("[%s] __rmnet_open()\n", dev->name); if (!p->device_up) { r = msm_sdio_dmux_open(p->ch_id, dev, sdio_recv_notify, sdio_write_done); if (r < 0) return -ENODEV; } p->device_up = DEVICE_ACTIVE; return 0; }
static int ippctl_attach( dev_info_t *dip, ddi_attach_cmd_t cmd) { switch (cmd) { case DDI_ATTACH: break; case DDI_PM_RESUME: /*FALLTHRU*/ case DDI_RESUME: /*FALLTHRU*/ default: return (DDI_FAILURE); } DBG0(DBG_DEVOPS, "DDI_ATTACH\n"); /* * This is strictly a single instance driver. */ if (ippctl_dip != NULL) return (DDI_FAILURE); /* * Create minor node. */ if (ddi_create_minor_node(dip, "ctl", S_IFCHR, 0, DDI_PSEUDO, 0) != DDI_SUCCESS) return (DDI_FAILURE); /* * No need for per-instance structure, just store vital data in * globals. */ ippctl_dip = dip; mutex_init(&ippctl_lock, NULL, MUTEX_DRIVER, NULL); ippctl_busy = B_FALSE; return (DDI_SUCCESS); }
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); if (netif_queue_stopped(dev)) { pr_err("[%s]fatal: rmnet_xmit called when " "netif_queue is stopped", dev->name); return 0; } _rmnet_xmit(skb, dev); if (msm_sdio_dmux_is_ch_full(p->ch_id)) { netif_stop_queue(dev); DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb); } return 0; }
void messageToServiceHAL(BYTE *p,UINT length) { if (length > SERVICE_MESSAGE_BUFF_LENGTH - 1) { DBG0("\nSERVICE_MESSAGE_BUFF_LENGTH"); return; } down(&pGlobalHardwareInfo->semServiceMessage); pGlobalHardwareInfo->serviceMessageBuff[pGlobalHardwareInfo->serviceMessageBuffHx][0] = length; memcpy(&pGlobalHardwareInfo->serviceMessageBuff[pGlobalHardwareInfo->serviceMessageBuffHx][1],p,length); pGlobalHardwareInfo->serviceMessageBuffHx++; if (pGlobalHardwareInfo->serviceMessageBuffHx >= SERVICE_MESSAGE_BUFF_SIZE) { pGlobalHardwareInfo->serviceMessageBuffHx = 0; } up(&pGlobalHardwareInfo->semServiceMessage); wake_up_interruptible(&pGlobalHardwareInfo->read_wait); DBG("qqqq1"); }