static int bluetooth_set_power(void *data, enum rfkill_user_states state) { int ret = 0; int irq; /* BT Host Wake IRQ */ irq = IRQ_BT_HOST_WAKE; switch (state) { case RFKILL_USER_STATE_UNBLOCKED: pr_debug("[BT] Device Powering ON\n"); s3c_setup_uart_cfg_gpio(0); if (gpio_is_valid(GPIO_WLAN_BT_EN)) gpio_direction_output(GPIO_WLAN_BT_EN, GPIO_LEVEL_HIGH); if (gpio_is_valid(GPIO_BT_nRST)) gpio_direction_output(GPIO_BT_nRST, GPIO_LEVEL_LOW); pr_debug("[BT] GPIO_BT_nRST = %d\n", gpio_get_value(GPIO_BT_nRST)); /* Set GPIO_BT_WLAN_REG_ON high */ s3c_gpio_setpull(GPIO_WLAN_BT_EN, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_WLAN_BT_EN, GPIO_LEVEL_HIGH); s3c_gpio_slp_cfgpin(GPIO_WLAN_BT_EN, S3C_GPIO_SLP_OUT1); s3c_gpio_slp_setpull_updown(GPIO_WLAN_BT_EN, S3C_GPIO_PULL_NONE); pr_debug("[BT] GPIO_WLAN_BT_EN = %d\n", gpio_get_value(GPIO_WLAN_BT_EN)); /* * FIXME sleep should be enabled disabled since the device is * not booting if its enabled */ /* * 100msec, delay between reg_on & rst. * (bcm4329 powerup sequence) */ msleep(100); /* Set GPIO_BT_nRST high */ s3c_gpio_setpull(GPIO_BT_nRST, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_BT_nRST, GPIO_LEVEL_HIGH); s3c_gpio_slp_cfgpin(GPIO_BT_nRST, S3C_GPIO_SLP_OUT1); s3c_gpio_slp_setpull_updown(GPIO_BT_nRST, S3C_GPIO_PULL_NONE); pr_debug("[BT] GPIO_BT_nRST = %d\n", gpio_get_value(GPIO_BT_nRST)); /* * 50msec, delay after bt rst * (bcm4329 powerup sequence) */ msleep(50); ret = enable_irq_wake(irq); if (ret < 0) pr_err("[BT] set wakeup src failed\n"); enable_irq(irq); break; case RFKILL_USER_STATE_SOFT_BLOCKED: pr_debug("[BT] Device Powering OFF\n"); #ifdef CONFIG_CPU_DIDLE bt_running = false; #endif ret = disable_irq_wake(irq); if (ret < 0) pr_err("[BT] unset wakeup src failed\n"); disable_irq(irq); wake_unlock(&rfkill_wake_lock); s3c_gpio_setpull(GPIO_BT_nRST, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_BT_nRST, GPIO_LEVEL_LOW); s3c_gpio_slp_cfgpin(GPIO_BT_nRST, S3C_GPIO_SLP_OUT0); s3c_gpio_slp_setpull_updown(GPIO_BT_nRST, S3C_GPIO_PULL_NONE); pr_debug("[BT] GPIO_BT_nRST = %d\n", gpio_get_value(GPIO_BT_nRST)); if (gpio_get_value(GPIO_WLAN_nRST) == 0) { s3c_gpio_setpull(GPIO_WLAN_BT_EN, S3C_GPIO_PULL_NONE); gpio_set_value(GPIO_WLAN_BT_EN, GPIO_LEVEL_LOW); s3c_gpio_slp_cfgpin(GPIO_WLAN_BT_EN, S3C_GPIO_SLP_OUT0); s3c_gpio_slp_setpull_updown(GPIO_WLAN_BT_EN, S3C_GPIO_PULL_NONE); pr_debug("[BT] GPIO_WLAN_BT_EN = %d\n", gpio_get_value(GPIO_WLAN_BT_EN)); } break; default: pr_err("[BT] Bad bluetooth rfkill state %d\n", state); } return 0; }
static void gumstix_udc_init(void) { pr_debug("Gumstix udc is disabled\n"); }
static void msm8960_shutdown(struct snd_pcm_substream *substream) { pr_debug("%s(): substream = %s stream = %d\n", __func__, substream->name, substream->stream); }
static int mtk_dl1_awb_remove(struct platform_device *pdev) { pr_debug("%s\n", __func__); snd_soc_unregister_platform(&pdev->dev); return 0; }
/* Validate changes from /proc interface. */ static int ipv4_ping_group_range(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct user_namespace *user_ns = current_user_ns(); int ret; gid_t urange[2]; kgid_t low, high; struct ctl_table tmp = { .data = &urange, .maxlen = sizeof(urange), .mode = table->mode, .extra1 = &ip_ping_group_range_min, .extra2 = &ip_ping_group_range_max, }; inet_get_ping_group_range_table(table, &low, &high); urange[0] = from_kgid_munged(user_ns, low); urange[1] = from_kgid_munged(user_ns, high); ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && ret == 0) { low = make_kgid(user_ns, urange[0]); high = make_kgid(user_ns, urange[1]); if (!gid_valid(low) || !gid_valid(high)) return -EINVAL; if (urange[1] < urange[0] || gid_lt(high, low)) { low = make_kgid(&init_user_ns, 1); high = make_kgid(&init_user_ns, 0); } set_ping_group_range(table, low, high); } return ret; } static int ipv4_fwd_update_priority(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net; int ret; net = container_of(table->data, struct net, ipv4.sysctl_ip_fwd_update_priority); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (write && ret == 0) call_netevent_notifiers(NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, net); return ret; } static int proc_tcp_congestion_control(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net = container_of(ctl->data, struct net, ipv4.tcp_congestion_control); char val[TCP_CA_NAME_MAX]; struct ctl_table tbl = { .data = val, .maxlen = TCP_CA_NAME_MAX, }; int ret; tcp_get_default_congestion_control(net, val); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); if (write && ret == 0) ret = tcp_set_default_congestion_control(net, val); return ret; } static int proc_tcp_available_congestion_control(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, }; int ret; tbl.data = kmalloc(tbl.maxlen, GFP_USER); if (!tbl.data) return -ENOMEM; tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); kfree(tbl.data); return ret; } static int proc_allowed_congestion_control(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; int ret; tbl.data = kmalloc(tbl.maxlen, GFP_USER); if (!tbl.data) return -ENOMEM; tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); if (write && ret == 0) ret = tcp_set_allowed_congestion_control(tbl.data); kfree(tbl.data); return ret; } static int proc_tcp_fastopen_key(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net = container_of(table->data, struct net, ipv4.sysctl_tcp_fastopen); struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; struct tcp_fastopen_context *ctxt; u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ __le32 key[4]; int ret, i; tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); if (!tbl.data) return -ENOMEM; rcu_read_lock(); ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); if (ctxt) memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); else memset(key, 0, sizeof(key)); rcu_read_unlock(); for (i = 0; i < ARRAY_SIZE(key); i++) user_key[i] = le32_to_cpu(key[i]); snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", user_key[0], user_key[1], user_key[2], user_key[3]); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); if (write && ret == 0) { if (sscanf(tbl.data, "%x-%x-%x-%x", user_key, user_key + 1, user_key + 2, user_key + 3) != 4) { ret = -EINVAL; goto bad_key; } for (i = 0; i < ARRAY_SIZE(user_key); i++) key[i] = cpu_to_le32(user_key[i]); tcp_fastopen_reset_cipher(net, NULL, key, TCP_FASTOPEN_KEY_LENGTH); } bad_key: pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", user_key[0], user_key[1], user_key[2], user_key[3], (char *)tbl.data, ret); kfree(tbl.data); return ret; } static void proc_configure_early_demux(int enabled, int protocol) { struct net_protocol *ipprot; #if IS_ENABLED(CONFIG_IPV6) struct inet6_protocol *ip6prot; #endif rcu_read_lock(); ipprot = rcu_dereference(inet_protos[protocol]); if (ipprot) ipprot->early_demux = enabled ? ipprot->early_demux_handler : NULL; #if IS_ENABLED(CONFIG_IPV6) ip6prot = rcu_dereference(inet6_protos[protocol]); if (ip6prot) ip6prot->early_demux = enabled ? ip6prot->early_demux_handler : NULL; #endif rcu_read_unlock(); } static int proc_tcp_early_demux(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = 0; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (write && !ret) { int enabled = init_net.ipv4.sysctl_tcp_early_demux; proc_configure_early_demux(enabled, IPPROTO_TCP); } return ret; } static int proc_udp_early_demux(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = 0; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (write && !ret) { int enabled = init_net.ipv4.sysctl_udp_early_demux; proc_configure_early_demux(enabled, IPPROTO_UDP); } return ret; }
/** * sst_alloc_stream - Send msg for a new stream ID * * @params: stream params * @stream_ops: operation of stream PB/capture * @codec: codec for stream * @device: device stream to be allocated for * * This function is called by any function which wants to start * a new stream. This also check if a stream exists which is idle * it initializes idle stream id to this request */ int sst_alloc_stream_ctp(char *params, struct sst_block *block) { struct ipc_post *msg = NULL; struct snd_sst_alloc_params alloc_param; unsigned int pcm_slot = 0x03, num_ch; int str_id; struct snd_sst_params *str_params; struct snd_sst_stream_params *sparams; struct snd_sst_alloc_params_ext *aparams; struct stream_info *str_info; unsigned int stream_ops, device; unsigned long irq_flags; u8 codec; pr_debug("In %s\n", __func__); BUG_ON(!params); str_params = (struct snd_sst_params *)params; stream_ops = str_params->ops; codec = str_params->codec; device = str_params->device_type; sparams = &str_params->sparams; aparams = &str_params->aparams; num_ch = sst_get_num_channel(str_params); pr_debug("period_size = %d\n", aparams->frag_size); pr_debug("ring_buf_addr = 0x%x\n", aparams->ring_buf_info[0].addr); pr_debug("ring_buf_size = %d\n", aparams->ring_buf_info[0].size); pr_debug("In alloc device_type=%d\n", str_params->device_type); pr_debug("In alloc sg_count =%d\n", aparams->sg_count); str_id = str_params->stream_id; if (str_id <= 0) return -EBUSY; /*allocate device type context*/ sst_init_stream(&sst_drv_ctx->streams[str_id], codec, str_id, stream_ops, pcm_slot); /* send msg to FW to allocate a stream */ if (sst_create_ipc_msg(&msg, true)) return -ENOMEM; alloc_param.str_type.codec_type = codec; alloc_param.str_type.str_type = str_params->stream_type; alloc_param.str_type.operation = stream_ops; alloc_param.str_type.protected_str = 0; /* non drm */ alloc_param.str_type.time_slots = pcm_slot; alloc_param.str_type.reserved = 0; alloc_param.str_type.result = 0; memcpy(&alloc_param.stream_params, sparams, sizeof(struct snd_sst_stream_params)); memcpy(&alloc_param.alloc_params, aparams, sizeof(struct snd_sst_alloc_params_ext)); block->drv_id = str_id; block->msg_id = IPC_IA_ALLOC_STREAM; sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, str_id); msg->header.part.data = sizeof(alloc_param) + sizeof(u32); memcpy(msg->mailbox_data, &msg->header, sizeof(u32)); memcpy(msg->mailbox_data + sizeof(u32), &alloc_param, sizeof(alloc_param)); str_info = &sst_drv_ctx->streams[str_id]; str_info->num_ch = num_ch; spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags); list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list); spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); sst_drv_ctx->ops->post_message(&sst_drv_ctx->ipc_post_msg_wq); return str_id; }
/** * sst_free_stream - Frees a stream * @str_id: stream ID * * This function is called by any function which wants to free * a stream. */ int sst_free_stream(int str_id) { int retval = 0; unsigned int pvt_id; struct ipc_post *msg = NULL; struct stream_info *str_info; struct intel_sst_ops *ops; unsigned long irq_flags; struct ipc_dsp_hdr dsp_hdr; struct sst_block *block; pr_debug("SST DBG:sst_free_stream for %d\n", str_id); mutex_lock(&sst_drv_ctx->sst_lock); if (sst_drv_ctx->sst_state == SST_UN_INIT) { mutex_unlock(&sst_drv_ctx->sst_lock); return -ENODEV; } mutex_unlock(&sst_drv_ctx->sst_lock); str_info = get_stream_info(str_id); if (!str_info) return -EINVAL; ops = sst_drv_ctx->ops; mutex_lock(&str_info->lock); if (str_info->status != STREAM_UN_INIT) { str_info->prev = str_info->status; str_info->status = STREAM_UN_INIT; mutex_unlock(&str_info->lock); if (!sst_drv_ctx->use_32bit_ops) { pvt_id = sst_assign_pvt_id(sst_drv_ctx); retval = sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block, IPC_CMD, pvt_id); if (retval) return retval; sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD, str_info->task_id, 1, pvt_id); msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr); sst_fill_header_dsp(&dsp_hdr, IPC_IA_FREE_STREAM_MRFLD, str_info->pipe_id, 0); memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr)); } else { retval = sst_create_block_and_ipc_msg(&msg, false, sst_drv_ctx, &block, IPC_IA_FREE_STREAM, str_id); if (retval) return retval; sst_fill_header(&msg->header, IPC_IA_FREE_STREAM, 0, str_id); } spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags); list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list); spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); if (!sst_drv_ctx->use_32bit_ops) { /*FIXME: do we need to wake up drain stream here, * how to get the pvt_id and msg_id */ } else { sst_wake_up_block(sst_drv_ctx, 0, str_id, IPC_IA_DRAIN_STREAM, NULL, 0); } ops->post_message(&sst_drv_ctx->ipc_post_msg_wq); retval = sst_wait_timeout(sst_drv_ctx, block); pr_debug("sst: wait for free returned %d\n", retval); mutex_lock(&sst_drv_ctx->stream_lock); sst_clean_stream(str_info); mutex_unlock(&sst_drv_ctx->stream_lock); pr_debug("SST DBG:Stream freed\n"); sst_free_block(sst_drv_ctx, block); } else { mutex_unlock(&str_info->lock); retval = -EBADRQC; pr_debug("SST DBG:BADQRC for stream\n"); } return retval; }
/** * This function initialized the PCD portion of the driver. * */ int pcd_init( struct platform_device *_dev ) { dwc_otg_device_t *otg_dev = platform_get_drvdata(_dev); struct sprd_usb_platform_data *pdata= _dev->dev.platform_data; int retval = 0; int irq; int plug_irq; DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _dev); device_dwc_otg = &(_dev->dev); if (dwc_otg_is_dma_enable(otg_dev->core_if)) { _dev->dev.dma_mask = &dwc_otg_pcd_dmamask; _dev->dev.coherent_dma_mask = dwc_otg_pcd_dmamask; } else { _dev->dev.dma_mask = (void *)0; _dev->dev.coherent_dma_mask = 0; } wake_lock_init(&usb_wake_lock, WAKE_LOCK_SUSPEND, "usb_work"); // wake_lock(&usb_wake_lock); otg_dev->pcd = dwc_otg_pcd_init(otg_dev->core_if); if (!otg_dev->pcd) { DWC_ERROR("dwc_otg_pcd_init failed\n"); return -ENOMEM; } gadget_wrapper = alloc_wrapper(_dev); /* * Initialize EP structures */ gadget_add_eps(gadget_wrapper); /* * Setup interupt handler */ irq = platform_get_irq(_dev, 0); DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n", irq); retval = request_irq(irq, dwc_otg_pcd_irq, IRQF_SHARED, gadget_wrapper->gadget.name, otg_dev->pcd); //SA_SHIRQ, gadget_wrapper->gadget.name, if (retval != 0) { DWC_ERROR("request of irq%d failed\n", irq); free_wrapper(gadget_wrapper); return -EBUSY; } /* * initialize a timer for checking cable type. */ #ifdef USB_SETUP_TIMEOUT_RESTART { setup_timer(&setup_transfer_timer, setup_transfer_timer_fun, (unsigned long)gadget_wrapper); setup_transfer_timer_start = 0; } #endif INIT_DELAYED_WORK(&gadget_wrapper->cable2pc, cable2pc_detect_works); gadget_wrapper->cable2pc_wq = create_singlethread_workqueue("usb 2 pc wq"); #ifdef CONFIG_USB_EXTERNAL_DETECT register_otg_func(NULL, dwc_peripheral_start, otg_dev); #else /* * setup usb cable detect interupt */ #ifndef CONFIG_MFD_SM5504 { plug_irq = usb_alloc_vbus_irq(pdata->gpio_chgdet); if (plug_irq < 0) { pr_warning("cannot alloc vbus irq\n"); return -EBUSY; } usb_set_vbus_irq_type(plug_irq, VBUS_PLUG_IN); #ifdef CONFIG_SC_FPGA gadget_wrapper->vbus = 1; #else gadget_wrapper->vbus = usb_get_vbus_state(); #endif pr_info("now usb vbus is :%d\n", gadget_wrapper->vbus); retval = request_irq(plug_irq, usb_detect_handler, IRQF_SHARED | IRQF_NO_SUSPEND, "usb detect", otg_dev->pcd); #ifndef CONFIG_MUIC_CABLE_DETECT disable_irq(plug_irq); #endif } //gadget_wrapper->vbus = 1;//used when debug in FPGA, which doesn't have vbus operation #endif spin_lock_init(&gadget_wrapper->lock); #ifdef CONFIG_SC_FPGA gadget_wrapper->vbus = 1; #endif INIT_WORK(&gadget_wrapper->detect_work, usb_detect_works); gadget_wrapper->detect_wq = create_singlethread_workqueue("usb detect wq"); #endif /* * register a switch device for sending pnp message, * for the user app need be notified immediately * when plug in & plug out happen; */ gadget_wrapper->sdev.name = "charger_cable"; retval = switch_dev_register(&gadget_wrapper->sdev); if (retval){ pr_warning("register switch dev error:%s\n", __func__); } dwc_otg_pcd_start(gadget_wrapper->pcd, &fops); /* * dwc driver is ok, check if the cable is insert, if no, * shutdown udc for saving power. */ if (!gadget_wrapper->vbus){ pr_debug("vbus is not power now \n"); gadget_wrapper->udc_startup = 1; __udc_shutdown(); } gadget_wrapper->udc_startup = gadget_wrapper->vbus; gadget_wrapper->enabled = 0; retval = usb_add_gadget_udc(&_dev->dev, &gadget_wrapper->gadget); if (!retval) return retval; return retval; }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret; int flag = 0; unsigned long timeout; pr_debug("Starting secondary CPU %d\n", cpu); /* Set preset_lpj to avoid subsequent lpj recalculations */ preset_lpj = loops_per_jiffy; if (cpu > 0 && cpu < ARRAY_SIZE(cold_boot_flags)) flag = cold_boot_flags[cpu]; else __WARN(); if (per_cpu(cold_boot_done, cpu) == false) { ret = scm_set_boot_addr((void *) virt_to_phys(msm_secondary_startup), flag); if (ret == 0) release_secondary(cpu); else printk(KERN_DEBUG "Failed to set secondary core boot " "address\n"); per_cpu(cold_boot_done, cpu) = true; } /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ write_pen_release(cpu_logical_map(cpu)); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ gic_raise_softirq(cpumask_of(cpu), 1); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
/** * kim_int_recv - receive function called during firmware download * firmware download responses on different UART drivers * have been observed to come in bursts of different * tty_receive and hence the logic */ void kim_int_recv(struct kim_data_s *kim_gdata, const unsigned char *data, long count) { const unsigned char *ptr; int len = 0, type = 0; unsigned char *plen; pr_debug("%s", __func__); /* Decode received bytes here */ ptr = data; if (unlikely(ptr == NULL)) { pr_err(" received null from TTY "); return; } while (count) { if (kim_gdata->rx_count) { len = min_t(unsigned int, kim_gdata->rx_count, count); memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len); kim_gdata->rx_count -= len; count -= len; ptr += len; if (kim_gdata->rx_count) continue; /* Check ST RX state machine , where are we? */ switch (kim_gdata->rx_state) { /* Waiting for complete packet ? */ case ST_W4_DATA: pr_debug("Complete pkt received"); validate_firmware_response(kim_gdata); kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_skb = NULL; continue; /* Waiting for Bluetooth event header ? */ case ST_W4_HEADER: plen = (unsigned char *)&kim_gdata->rx_skb->data[1]; pr_debug("event hdr: plen 0x%02x\n", *plen); kim_check_data_len(kim_gdata, *plen); continue; } /* end of switch */ } /* end of if rx_state */ switch (*ptr) { /* Bluetooth event packet? */ case 0x04: kim_gdata->rx_state = ST_W4_HEADER; kim_gdata->rx_count = 2; type = *ptr; break; default: pr_info("unknown packet\n"); ptr++; count--; continue; } ptr++; count--; kim_gdata->rx_skb = alloc_skb(1024+8, GFP_ATOMIC); if (!kim_gdata->rx_skb) { pr_err("can't allocate mem for new packet"); kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_count = 0; return; } skb_reserve(kim_gdata->rx_skb, 8); kim_gdata->rx_skb->cb[0] = 4; kim_gdata->rx_skb->cb[1] = 0; }
static void brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data) { uint i, status, reason; bool group = false, flush_txq = false, link = false; char *auth_str, *event_name; unsigned char *buf; char err_msg[256], eabuf[ETHER_ADDR_STR_LEN]; static struct { uint event; char *event_name; } event_names[] = { { BRCMF_E_SET_SSID, "SET_SSID"}, { BRCMF_E_JOIN, "JOIN"}, { BRCMF_E_START, "START"}, { BRCMF_E_AUTH, "AUTH"}, { BRCMF_E_AUTH_IND, "AUTH_IND"}, { BRCMF_E_DEAUTH, "DEAUTH"}, { BRCMF_E_DEAUTH_IND, "DEAUTH_IND"}, { BRCMF_E_ASSOC, "ASSOC"}, { BRCMF_E_ASSOC_IND, "ASSOC_IND"}, { BRCMF_E_REASSOC, "REASSOC"}, { BRCMF_E_REASSOC_IND, "REASSOC_IND"}, { BRCMF_E_DISASSOC, "DISASSOC"}, { BRCMF_E_DISASSOC_IND, "DISASSOC_IND"}, { BRCMF_E_QUIET_START, "START_QUIET"}, { BRCMF_E_QUIET_END, "END_QUIET"}, { BRCMF_E_BEACON_RX, "BEACON_RX"}, { BRCMF_E_LINK, "LINK"}, { BRCMF_E_MIC_ERROR, "MIC_ERROR"}, { BRCMF_E_NDIS_LINK, "NDIS_LINK"}, { BRCMF_E_ROAM, "ROAM"}, { BRCMF_E_TXFAIL, "TXFAIL"}, { BRCMF_E_PMKID_CACHE, "PMKID_CACHE"}, { BRCMF_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, { BRCMF_E_PRUNE, "PRUNE"}, { BRCMF_E_AUTOAUTH, "AUTOAUTH"}, { BRCMF_E_EAPOL_MSG, "EAPOL_MSG"}, { BRCMF_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, { BRCMF_E_ADDTS_IND, "ADDTS_IND"}, { BRCMF_E_DELTS_IND, "DELTS_IND"}, { BRCMF_E_BCNSENT_IND, "BCNSENT_IND"}, { BRCMF_E_BCNRX_MSG, "BCNRX_MSG"}, { BRCMF_E_BCNLOST_MSG, "BCNLOST_MSG"}, { BRCMF_E_ROAM_PREP, "ROAM_PREP"}, { BRCMF_E_PFN_NET_FOUND, "PNO_NET_FOUND"}, { BRCMF_E_PFN_NET_LOST, "PNO_NET_LOST"}, { BRCMF_E_RESET_COMPLETE, "RESET_COMPLETE"}, { BRCMF_E_JOIN_START, "JOIN_START"}, { BRCMF_E_ROAM_START, "ROAM_START"}, { BRCMF_E_ASSOC_START, "ASSOC_START"}, { BRCMF_E_IBSS_ASSOC, "IBSS_ASSOC"}, { BRCMF_E_RADIO, "RADIO"}, { BRCMF_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, { BRCMF_E_PROBREQ_MSG, "PROBREQ_MSG"}, { BRCMF_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, { BRCMF_E_PSK_SUP, "PSK_SUP"}, { BRCMF_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, { BRCMF_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, { BRCMF_E_ICV_ERROR, "ICV_ERROR"}, { BRCMF_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, { BRCMF_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, { BRCMF_E_TRACE, "TRACE"}, { BRCMF_E_ACTION_FRAME, "ACTION FRAME"}, { BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, { BRCMF_E_IF, "IF"}, { BRCMF_E_RSSI, "RSSI"}, { BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"} }; uint event_type, flags, auth_type, datalen; static u32 seqnum_prev; struct msgtrace_hdr hdr; u32 nblost; char *s, *p; event_type = be32_to_cpu(event->event_type); flags = be16_to_cpu(event->flags); status = be32_to_cpu(event->status); reason = be32_to_cpu(event->reason); auth_type = be32_to_cpu(event->auth_type); datalen = be32_to_cpu(event->datalen); sprintf(eabuf, "%pM", event->addr); event_name = "UNKNOWN"; for (i = 0; i < ARRAY_SIZE(event_names); i++) { if (event_names[i].event == event_type) event_name = event_names[i].event_name; } brcmf_dbg(EVENT, "EVENT: %s, event ID = %d\n", event_name, event_type); brcmf_dbg(EVENT, "flags 0x%04x, status %d, reason %d, auth_type %d MAC %s\n", flags, status, reason, auth_type, eabuf); if (flags & BRCMF_EVENT_MSG_LINK) link = true; if (flags & BRCMF_EVENT_MSG_GROUP) group = true; if (flags & BRCMF_EVENT_MSG_FLUSHTXQ) flush_txq = true; switch (event_type) { case BRCMF_E_START: case BRCMF_E_DEAUTH: case BRCMF_E_DISASSOC: brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf); break; case BRCMF_E_ASSOC_IND: case BRCMF_E_REASSOC_IND: brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf); break; case BRCMF_E_ASSOC: case BRCMF_E_REASSOC: if (status == BRCMF_E_STATUS_SUCCESS) brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf); else if (status == BRCMF_E_STATUS_TIMEOUT) brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf); else if (status == BRCMF_E_STATUS_FAIL) brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, FAILURE, reason %d\n", event_name, eabuf, (int)reason); else brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, unexpected status %d\n", event_name, eabuf, (int)status); break; case BRCMF_E_DEAUTH_IND: case BRCMF_E_DISASSOC_IND: brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason); break; case BRCMF_E_AUTH: case BRCMF_E_AUTH_IND: if (auth_type == WLAN_AUTH_OPEN) auth_str = "Open System"; else if (auth_type == WLAN_AUTH_SHARED_KEY) auth_str = "Shared Key"; else { sprintf(err_msg, "AUTH unknown: %d", (int)auth_type); auth_str = err_msg; } if (event_type == BRCMF_E_AUTH_IND) brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str); else if (status == BRCMF_E_STATUS_SUCCESS) brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, SUCCESS\n", event_name, eabuf, auth_str); else if (status == BRCMF_E_STATUS_TIMEOUT) brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, TIMEOUT\n", event_name, eabuf, auth_str); else if (status == BRCMF_E_STATUS_FAIL) { brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n", event_name, eabuf, auth_str, (int)reason); } break; case BRCMF_E_JOIN: case BRCMF_E_ROAM: case BRCMF_E_SET_SSID: if (status == BRCMF_E_STATUS_SUCCESS) brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf); else if (status == BRCMF_E_STATUS_FAIL) brcmf_dbg(EVENT, "MACEVENT: %s, failed\n", event_name); else if (status == BRCMF_E_STATUS_NO_NETWORKS) brcmf_dbg(EVENT, "MACEVENT: %s, no networks found\n", event_name); else brcmf_dbg(EVENT, "MACEVENT: %s, unexpected status %d\n", event_name, (int)status); break; case BRCMF_E_BEACON_RX: if (status == BRCMF_E_STATUS_SUCCESS) brcmf_dbg(EVENT, "MACEVENT: %s, SUCCESS\n", event_name); else if (status == BRCMF_E_STATUS_FAIL) brcmf_dbg(EVENT, "MACEVENT: %s, FAIL\n", event_name); else brcmf_dbg(EVENT, "MACEVENT: %s, status %d\n", event_name, status); break; case BRCMF_E_LINK: brcmf_dbg(EVENT, "MACEVENT: %s %s\n", event_name, link ? "UP" : "DOWN"); break; case BRCMF_E_MIC_ERROR: brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, Group %d, Flush %d\n", event_name, eabuf, group, flush_txq); break; case BRCMF_E_ICV_ERROR: case BRCMF_E_UNICAST_DECODE_ERROR: case BRCMF_E_MULTICAST_DECODE_ERROR: brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf); break; case BRCMF_E_TXFAIL: brcmf_dbg(EVENT, "MACEVENT: %s, RA %s\n", event_name, eabuf); break; case BRCMF_E_SCAN_COMPLETE: case BRCMF_E_PMKID_CACHE: brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name); break; case BRCMF_E_PFN_NET_FOUND: case BRCMF_E_PFN_NET_LOST: case BRCMF_E_PFN_SCAN_COMPLETE: brcmf_dbg(EVENT, "PNOEVENT: %s\n", event_name); break; case BRCMF_E_PSK_SUP: case BRCMF_E_PRUNE: brcmf_dbg(EVENT, "MACEVENT: %s, status %d, reason %d\n", event_name, (int)status, (int)reason); break; case BRCMF_E_TRACE: buf = (unsigned char *) event_data; memcpy(&hdr, buf, sizeof(struct msgtrace_hdr)); if (hdr.version != MSGTRACE_VERSION) { brcmf_dbg(ERROR, "MACEVENT: %s [unsupported version --> brcmf" " version:%d dongle version:%d]\n", event_name, MSGTRACE_VERSION, hdr.version); datalen = 0; break; } *(buf + sizeof(struct msgtrace_hdr) + be16_to_cpu(hdr.len)) = '\0'; if (be32_to_cpu(hdr.discarded_bytes) || be32_to_cpu(hdr.discarded_printf)) brcmf_dbg(ERROR, "WLC_E_TRACE: [Discarded traces in dongle -->" " discarded_bytes %d discarded_printf %d]\n", be32_to_cpu(hdr.discarded_bytes), be32_to_cpu(hdr.discarded_printf)); nblost = be32_to_cpu(hdr.seqnum) - seqnum_prev - 1; if (nblost > 0) brcmf_dbg(ERROR, "WLC_E_TRACE: [Event lost --> seqnum " " %d nblost %d\n", be32_to_cpu(hdr.seqnum), nblost); seqnum_prev = be32_to_cpu(hdr.seqnum); p = (char *)&buf[sizeof(struct msgtrace_hdr)]; while ((s = strstr(p, "\n")) != NULL) { *s = '\0'; pr_debug("%s\n", p); p = s + 1; } pr_debug("%s\n", p); datalen = 0; break; case BRCMF_E_RSSI: brcmf_dbg(EVENT, "MACEVENT: %s %d\n", event_name, be32_to_cpu(*((__be32 *)event_data))); break; default: brcmf_dbg(EVENT, "MACEVENT: %s %d, MAC %s, status %d, reason %d, " "auth %d\n", event_name, event_type, eabuf, (int)status, (int)reason, (int)auth_type); break; } if (datalen) { buf = (unsigned char *) event_data; brcmf_dbg(EVENT, " data (%d) : ", datalen); for (i = 0; i < datalen; i++) brcmf_dbg(EVENT, " 0x%02x ", *buf++); brcmf_dbg(EVENT, "\n"); } }
static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int dataoff, H245_TransportAddress *taddr, __be16 port, __be16 rtp_port, struct nf_conntrack_expect *rtp_exp, struct nf_conntrack_expect *rtcp_exp) { struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; int dir = CTINFO2DIR(ctinfo); int i; u_int16_t nated_port; /* Set expectations for NAT */ rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port; rtp_exp->expectfn = nf_nat_follow_master; rtp_exp->dir = !dir; rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port; rtcp_exp->expectfn = nf_nat_follow_master; rtcp_exp->dir = !dir; /* Lookup existing expects */ for (i = 0; i < H323_RTP_CHANNEL_MAX; i++) { if (info->rtp_port[i][dir] == rtp_port) { /* Expected */ /* Use allocated ports first. This will refresh * the expects */ rtp_exp->tuple.dst.u.udp.port = info->rtp_port[i][dir]; rtcp_exp->tuple.dst.u.udp.port = htons(ntohs(info->rtp_port[i][dir]) + 1); break; } else if (info->rtp_port[i][dir] == 0) { /* Not expected */ break; } } /* Run out of expectations */ if (i >= H323_RTP_CHANNEL_MAX) { if (net_ratelimit()) pr_notice("nf_nat_h323: out of expectations\n"); return 0; } /* Try to get a pair of ports. */ for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port); nated_port != 0; nated_port += 2) { rtp_exp->tuple.dst.u.udp.port = htons(nated_port); if (nf_ct_expect_related(rtp_exp) == 0) { rtcp_exp->tuple.dst.u.udp.port = htons(nated_port + 1); if (nf_ct_expect_related(rtcp_exp) == 0) break; nf_ct_unexpect_related(rtp_exp); } } if (nated_port == 0) { /* No port available */ if (net_ratelimit()) pr_notice("nf_nat_h323: out of RTP ports\n"); return 0; } /* Modify signal */ if (set_h245_addr(skb, data, dataoff, taddr, &ct->tuplehash[!dir].tuple.dst.u3, htons((port & htons(1)) ? nated_port + 1 : nated_port)) == 0) { /* Save ports */ info->rtp_port[i][dir] = rtp_port; info->rtp_port[i][!dir] = htons(nated_port); } else { nf_ct_unexpect_related(rtp_exp); nf_ct_unexpect_related(rtcp_exp); return -1; } /* Success */ pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n", &rtp_exp->tuple.src.u3.ip, ntohs(rtp_exp->tuple.src.u.udp.port), &rtp_exp->tuple.dst.u3.ip, ntohs(rtp_exp->tuple.dst.u.udp.port)); pr_debug("nf_nat_h323: expect RTCP %pI4:%hu->%pI4:%hu\n", &rtcp_exp->tuple.src.u3.ip, ntohs(rtcp_exp->tuple.src.u.udp.port), &rtcp_exp->tuple.dst.u3.ip, ntohs(rtcp_exp->tuple.dst.u.udp.port)); return 0; }
/** * omap2_dpll_round_rate - round a target rate for an OMAP DPLL * @clk: struct clk * for a DPLL * @target_rate: desired DPLL clock rate * * Given a DPLL and a desired target rate, round the target rate to a * possible, programmable rate for this DPLL. Attempts to select the * minimum possible n. Stores the computed (m, n) in the DPLL's * dpll_data structure so set_rate() will not need to call this * (expensive) function again. Returns ~0 if the target rate cannot * be rounded, or the rounded rate upon success. */ long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate) { int m, n, r, scaled_max_m; unsigned long scaled_rt_rp; unsigned long new_rate = 0; struct dpll_data *dd; unsigned long bestrate = 0, diff, bestdiff = ULONG_MAX; int bestm = 0, bestn = 0; struct dpll_rate_list *rs, *rate_cache; if (!clk || !clk->dpll_data) return ~0; dd = clk->dpll_data; rate_cache = dd->rate_cache; for (rs = rate_cache; rs; rs = rs->next) if (rs->target_rate == target_rate) { dd->last_rounded_m = rs->m; dd->last_rounded_n = rs->n; dd->last_rounded_rate = rs->actual_rate; return rs->actual_rate; } pr_debug("clock: %s: starting DPLL round_rate, target rate %ld\n", clk->name, target_rate); scaled_rt_rp = target_rate / (dd->clk_ref->rate / DPLL_SCALE_FACTOR); scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; dd->last_rounded_rate = 0; for (n = dd->min_divider; n <= dd->max_divider; n++) { /* Is the (input clk, divider) pair valid for the DPLL? */ r = _dpll_test_fint(clk, n); if (r == DPLL_FINT_UNDERFLOW) break; else if (r == DPLL_FINT_INVALID) continue; /* Compute the scaled DPLL multiplier, based on the divider */ m = scaled_rt_rp * n; /* * Since we're counting n up, a m overflow means we * can bail out completely (since as n increases in * the next iteration, there's no way that m can * increase beyond the current m) */ if (m > scaled_max_m) break; r = _dpll_test_mult(&m, n, &new_rate, target_rate, dd->clk_ref->rate); /* m can't be set low enough for this n - try with a larger n */ if (r == DPLL_MULT_UNDERFLOW) continue; #ifdef DEBUG pr_err("clock: target=%ld %s: m = %d: n = %d: new_rate = %ld\n", target_rate, clk->name, m, n, new_rate); #endif if (target_rate > new_rate) diff = target_rate - new_rate; else diff = new_rate - target_rate; if (diff < bestdiff) { bestm = m; bestn = n; bestrate = new_rate; bestdiff = diff; } if (new_rate == target_rate) break; } /* * The following if verifies that the new frequency is within 0.01% of * the target frequency. */ if (bestdiff < (ULONG_MAX / 10000) && ((bestdiff * 10000) / target_rate) < 1) { dd->last_rounded_m = bestm; dd->last_rounded_n = bestn; dd->last_rounded_rate = bestrate; rs = kzalloc(sizeof (struct dpll_rate_list), GFP_ATOMIC); if (rs) { rs->m = bestm; rs->n = bestn; rs->target_rate = target_rate; rs->actual_rate = bestrate; if (rate_cache == dd->rate_cache) { rs->next = dd->rate_cache; dd->rate_cache = rs; } else kzfree(rs); } return bestrate; } else return 0; }
/** * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. * */ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device *pdev) { int retval; struct usb_hcd *hcd; struct pxaohci_platform_data *inf; inf = pdev->dev.platform_data; if (!inf) return -ENODEV; if (pdev->resource[1].flags != IORESOURCE_IRQ) { pr_debug ("resource[1] is not IORESOURCE_IRQ"); return -ENOMEM; } hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x"); if (!hcd) return -ENOMEM; hcd->rsrc_start = pdev->resource[0].start; hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_debug("request_mem_region failed"); retval = -EBUSY; goto err1; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { pr_debug("ioremap failed"); retval = -ENOMEM; goto err2; } if ((retval = pxa27x_start_hc(&pdev->dev)) < 0) { pr_debug("pxa27x_start_hc failed"); goto err3; } /* Select Power Management Mode */ pxa27x_ohci_select_pmm(inf->port_mode); if (inf->power_budget) hcd->power_budget = inf->power_budget; ohci_hcd_init(hcd_to_ohci(hcd)); retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED); if (retval == 0) return retval; pxa27x_stop_hc(&pdev->dev); err3: iounmap(hcd->regs); err2: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err1: usb_put_hcd(hcd); return retval; }
int sst_alloc_stream_mfld(char *params, struct sst_block *block) { struct ipc_post *msg = NULL; struct snd_sst_alloc_params_mfld alloc_param; struct snd_sst_params *str_params; struct stream_info *str_info; unsigned int stream_ops, device; struct snd_sst_stream_params_mfld *sparams; unsigned int pcm_slot = 0, num_ch, pcm_wd_sz, sfreq; int str_id; u8 codec; u32 rb_size, rb_addr, period_count; unsigned long irq_flags; pr_debug("In %s\n", __func__); BUG_ON(!params); str_params = (struct snd_sst_params *)params; stream_ops = str_params->ops; codec = str_params->codec; device = str_params->device_type; num_ch = sst_get_num_channel(str_params); sfreq = sst_get_sfreq(str_params); pcm_wd_sz = sst_get_wdsize(str_params); rb_size = str_params->aparams.ring_buf_info[0].size; rb_addr = str_params->aparams.ring_buf_info[0].addr; period_count = str_params->aparams.frag_size / 4; pr_debug("period_size = %d\n", period_count); pr_debug("ring_buf_addr = 0x%x\n", rb_addr); pr_debug("ring_buf_size = %d\n", rb_size); pr_debug("device_type=%d\n", device); pr_debug("sfreq =%d\n", sfreq); pr_debug("stream_ops%d codec%d device%d\n", stream_ops, codec, device); sparams = kzalloc(sizeof(*sparams), GFP_KERNEL); if (!sparams) { pr_err("Unable to allocate snd_sst_stream_params\n"); return -ENOMEM; } sparams->uc.pcm_params.codec = codec; sparams->uc.pcm_params.num_chan = num_ch; sparams->uc.pcm_params.pcm_wd_sz = pcm_wd_sz; sparams->uc.pcm_params.reserved = 0; sparams->uc.pcm_params.sfreq = sfreq; sparams->uc.pcm_params.ring_buffer_size = rb_size; sparams->uc.pcm_params.period_count = period_count; sparams->uc.pcm_params.ring_buffer_addr = rb_addr; mutex_lock(&sst_drv_ctx->stream_lock); str_id = device; mutex_unlock(&sst_drv_ctx->stream_lock); pr_debug("slot %x\n", pcm_slot); /*allocate device type context*/ sst_init_stream(&sst_drv_ctx->streams[str_id], codec, str_id, stream_ops, pcm_slot); /* send msg to FW to allocate a stream */ if (sst_create_ipc_msg(&msg, true)) { kfree(sparams); return -ENOMEM; } alloc_param.str_type.codec_type = codec; alloc_param.str_type.str_type = SST_STREAM_TYPE_MUSIC; alloc_param.str_type.operation = stream_ops; alloc_param.str_type.protected_str = 0; /* non drm */ alloc_param.str_type.time_slots = pcm_slot; alloc_param.str_type.reserved = 0; alloc_param.str_type.result = 0; block->drv_id = str_id; block->msg_id = IPC_IA_ALLOC_STREAM; sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, str_id); msg->header.part.data = sizeof(alloc_param) + sizeof(u32); memcpy(&alloc_param.stream_params, sparams, sizeof(*sparams)); kfree(sparams); memcpy(msg->mailbox_data, &msg->header, sizeof(u32)); memcpy(msg->mailbox_data + sizeof(u32), &alloc_param, sizeof(alloc_param)); str_info = &sst_drv_ctx->streams[str_id]; spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags); list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list); spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); sst_drv_ctx->ops->post_message(&sst_drv_ctx->ipc_post_msg_wq); pr_debug("SST DBG:alloc stream done\n"); return str_id; }
static void __cpuinit check_temp(struct work_struct *work) { static int limit_init; struct tsens_device tsens_dev; long temp = 0; uint32_t max_freq = limited_max_freq; int cpu = 0; int ret = 0; tsens_dev.sensor_num = msm_thermal_info.sensor_id; ret = tsens_get_temp(&tsens_dev, &temp); if (ret) { pr_debug("%s: Unable to read TSENS sensor %d\n", KBUILD_MODNAME, tsens_dev.sensor_num); goto reschedule; } if (!limit_init) { ret = msm_thermal_get_freq_table(); if (ret) goto reschedule; else limit_init = 1; } do_core_control(temp); if (temp >= msm_thermal_info.limit_temp_degC) { if (limit_idx == limit_idx_low) goto reschedule; limit_idx -= msm_thermal_info.freq_step; if (limit_idx < limit_idx_low) limit_idx = limit_idx_low; max_freq = table[limit_idx].frequency; } else if (temp < msm_thermal_info.limit_temp_degC - msm_thermal_info.temp_hysteresis_degC) { if (limit_idx == limit_idx_high) goto reschedule; limit_idx += msm_thermal_info.freq_step; if (limit_idx >= limit_idx_high) { limit_idx = limit_idx_high; max_freq = MSM_CPUFREQ_NO_LIMIT; } else max_freq = table[limit_idx].frequency; } if (max_freq == limited_max_freq) goto reschedule; /* Update new limits */ for_each_possible_cpu(cpu) { ret = update_cpu_max_freq(cpu, max_freq); if (ret) pr_debug( "%s: Unable to limit cpu%d max freq to %d\n", KBUILD_MODNAME, cpu, max_freq); } reschedule: if (enabled) schedule_delayed_work(&check_temp_work, msecs_to_jiffies(msm_thermal_info.poll_ms)); }
int sst_send_byte_stream_mrfld(void *sbytes) { struct ipc_post *msg = NULL; struct snd_sst_bytes_v2 *bytes = (struct snd_sst_bytes_v2 *) sbytes; unsigned long irq_flags; u32 length; int pvt_id, ret = 0; struct sst_block *block = NULL; pr_debug("%s:\ntype:%d\nipc_msg:%x\nblock:%d\ntask_id:%x\npipe: %d\nlength:%d\n", __func__, bytes->type, bytes->ipc_msg, bytes->block, bytes->task_id, bytes->pipe_id, bytes->len); /* need some err check as this is user data, perhpas move this to the * platform driver and pass the struct */ if (sst_create_ipc_msg(&msg, true)) return -ENOMEM; pvt_id = sst_assign_pvt_id(sst_drv_ctx); sst_fill_header_mrfld(&msg->mrfld_header, bytes->ipc_msg, bytes->task_id, 1, pvt_id); msg->mrfld_header.p.header_high.part.res_rqd = bytes->block; length = bytes->len; msg->mrfld_header.p.header_low_payload = length; pr_debug("length is %d\n", length); memcpy(msg->mailbox_data, &bytes->bytes, bytes->len); if (bytes->block) { block = sst_create_block(sst_drv_ctx, bytes->ipc_msg, pvt_id); if (block == NULL) { kfree(msg); return -ENOMEM; } } spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags); list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list); spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); sst_drv_ctx->ops->post_message(&sst_drv_ctx->ipc_post_msg_wq); pr_debug("msg->mrfld_header.p.header_low_payload:%d", msg->mrfld_header.p.header_low_payload); if (bytes->block) { ret = sst_wait_timeout(sst_drv_ctx, block); if (ret) { pr_err("%s: fw returned err %d\n", __func__, ret); sst_free_block(sst_drv_ctx, block); return ret; } } if (bytes->type == SND_SST_BYTES_GET) { /* copy the reply and send back * we need to update only sz and payload */ if (bytes->block) { unsigned char *r = block->data; pr_debug("read back %d bytes", bytes->len); memcpy(bytes->bytes, r, bytes->len); } } if (bytes->block) sst_free_block(sst_drv_ctx, block); return 0; }
static int axp_pinctrl_parse_pin_cfg(struct platform_device *pdev) { int mainkey_count; int mainkey_idx; /* get main key count */ mainkey_count = script_get_main_key_count(); pr_debug("mainkey total count : %d\n", mainkey_count); for (mainkey_idx = 0; mainkey_idx < mainkey_count; mainkey_idx++) { char *mainkey_name; script_item_u *pin_list; int pin_count; int pin_index; int map_index; struct pinctrl_map *maps; /* get main key name by index */ mainkey_name = script_get_main_key_name(mainkey_idx); if (!mainkey_name) { /* get mainkey name failed */ pr_debug("get mainkey [%s] name failed\n", mainkey_name); continue; } /* get main-key(device) pin configuration */ pin_count = script_get_pio_list(mainkey_name, &pin_list); pr_debug("mainkey name : %s, pin count : %d\n", mainkey_name, pin_count); if (pin_count == 0) { /* the mainkey have no pin configuration */ continue; } /* allocate pinctrl_map table, * max map table size = pin count * 2 : * mux map and config map. */ maps = kzalloc(sizeof(*maps) * (pin_count * 2), GFP_KERNEL); if (!maps) { pr_err("allocate memory for sunxi pinctrl map table failed\n"); return -ENOMEM; } map_index = 0; for (pin_index = 0; pin_index < pin_count; pin_index++) { /* convert struct sunxi_pin_cfg to struct pinctrl_map */ map_index += axp_pin_cfg_to_pin_map(pdev, &(pin_list[pin_index].gpio), &(maps[map_index]), mainkey_name); } if (map_index) { /* register maps to pinctrl */ pr_debug("map mainkey [%s] to pinctrl, map number [%d]\n", mainkey_name, map_index); pinctrl_register_mappings(maps, map_index); } /* free pinctrl_map table directly, * pinctrl subsytem will dup this map table */ kfree(maps); } return 0; }
/** * sst_resume_stream - Send msg for resuming stream * @str_id: stream ID * * This function is called by any function which wants to resume * an already paused stream. */ int sst_resume_stream(int str_id) { int retval = 0; struct ipc_post *msg = NULL; struct stream_info *str_info; struct intel_sst_ops *ops; unsigned long irq_flags; struct sst_block *block = NULL; int pvt_id, len; struct ipc_dsp_hdr dsp_hdr; pr_debug("SST DBG:sst_resume_stream for %d\n", str_id); str_info = get_stream_info(str_id); if (!str_info) return -EINVAL; ops = sst_drv_ctx->ops; if (str_info->status == STREAM_RUNNING) return 0; if (str_info->status == STREAM_PAUSED) { if (!sst_drv_ctx->use_32bit_ops) { pvt_id = sst_assign_pvt_id(sst_drv_ctx); retval = sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block, IPC_CMD, pvt_id); if (retval) return retval; sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD, str_info->task_id, 1, pvt_id); msg->mrfld_header.p.header_high.part.res_rqd = 1; len = sizeof(dsp_hdr); msg->mrfld_header.p.header_low_payload = len; sst_fill_header_dsp(&dsp_hdr, IPC_IA_RESUME_STREAM_MRFLD, str_info->pipe_id, 0); memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr)); } else { retval = sst_create_block_and_ipc_msg(&msg, false, sst_drv_ctx, &block, IPC_IA_RESUME_STREAM, str_id); if (retval) return retval; sst_fill_header(&msg->header, IPC_IA_RESUME_STREAM, 0, str_id); } spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags); list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list); spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); ops->post_message(&sst_drv_ctx->ipc_post_msg_wq); retval = sst_wait_timeout(sst_drv_ctx, block); sst_free_block(sst_drv_ctx, block); if (!retval) { if (str_info->prev == STREAM_RUNNING) str_info->status = STREAM_RUNNING; else str_info->status = STREAM_INIT; str_info->prev = STREAM_PAUSED; } else if (retval == -SST_ERR_INVALID_STREAM_ID) { retval = -EINVAL; mutex_lock(&sst_drv_ctx->stream_lock); sst_clean_stream(str_info); mutex_unlock(&sst_drv_ctx->stream_lock); } } else { retval = -EBADRQC; pr_err("SST ERR: BADQRC for stream\n"); } return retval; }
/** * usb_hcd_ppc_soc_probe - initialize On-Chip HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller. * * Store this function in the HCD's struct pci_driver as probe(). */ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver, struct usb_hcd **hcd_out, struct platform_device *pdev) { int retval; struct usb_hcd *hcd = 0; struct ohci_hcd *ohci; struct resource *res; int irq; pr_debug("initializing PPC-SOC USB Controller\n"); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { pr_debug(__FILE__ ": no irq\n"); return -ENODEV; } irq = res->start; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { pr_debug(__FILE__ ": no reg addr\n"); return -ENODEV; } if (!request_mem_region(res->start, res->end - res->start + 1, hcd_name)) { pr_debug(__FILE__ ": request_mem_region failed\n"); return -EBUSY; } hcd = driver->hcd_alloc (); if (!hcd){ pr_debug(__FILE__ ": hcd_alloc failed\n"); retval = -ENOMEM; goto err1; } ohci = hcd_to_ohci(hcd); ohci->flags |= OHCI_BIG_ENDIAN; hcd->driver = (struct hc_driver *) driver; hcd->description = driver->description; hcd->irq = irq; hcd->regs = (struct ohci_regs *) ioremap(res->start, res->end - res->start + 1); if (!hcd->regs) { pr_debug(__FILE__ ": ioremap failed\n"); retval = -ENOMEM; goto err2; } hcd->self.controller = &pdev->dev; retval = hcd_buffer_create(hcd); if (retval) { pr_debug(__FILE__ ": pool alloc fail\n"); goto err3; } retval = request_irq(hcd->irq, usb_hcd_irq, SA_INTERRUPT, hcd_name, hcd); if (retval) { pr_debug(__FILE__ ": request_irq failed, returned %d\n", retval); retval = -EBUSY; goto err4; } info("%s (PPC-SOC) at 0x%p, irq %d\n", hcd_name, hcd->regs, hcd->irq); usb_bus_init(&hcd->self); hcd->self.op = &usb_hcd_operations; hcd->self.release = & usb_hcd_release; hcd->self.hcpriv = (void *) hcd; hcd->self.bus_name = "PPC-SOC USB"; hcd->product_desc = "PPC-SOC OHCI"; INIT_LIST_HEAD(&hcd->dev_list); usb_register_bus(&hcd->self); if ((retval = driver->start(hcd)) < 0) { usb_hcd_ppc_soc_remove(hcd, pdev); return retval; } *hcd_out = hcd; return 0; err4: hcd_buffer_destroy(hcd); err3: iounmap(hcd->regs); err2: dev_set_drvdata(&pdev->dev, NULL); err1: pr_debug("Removing PPC-SOC USB Controller\n"); release_mem_region(res->start, res->end - res->start + 1); return retval; }
static int help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { unsigned int dataoff; const struct iphdr *iph; const struct tcphdr *th; struct tcphdr _tcph; const char *data_limit; char *data, *ib_ptr; int dir = CTINFO2DIR(ctinfo); struct nf_conntrack_expect *exp; struct nf_conntrack_tuple *tuple; __be32 dcc_ip; u_int16_t dcc_port; __be16 port; int i, ret = NF_ACCEPT; char *addr_beg_p, *addr_end_p; typeof(nf_nat_irc_hook) nf_nat_irc; /* If packet is coming from IRC server */ if (dir == IP_CT_DIR_REPLY) return NF_ACCEPT; /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; /* Not a full tcp header? */ th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); if (th == NULL) return NF_ACCEPT; /* No data? */ dataoff = protoff + th->doff*4; if (dataoff >= skb->len) return NF_ACCEPT; spin_lock_bh(&irc_buffer_lock); ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff, irc_buffer); BUG_ON(ib_ptr == NULL); data = ib_ptr; data_limit = ib_ptr + skb->len - dataoff; /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ while (data < data_limit - (19 + MINMATCHLEN)) { if (memcmp(data, "\1DCC ", 5)) { data++; continue; } data += 5; /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ iph = ip_hdr(skb); pr_debug("DCC found in master %pI4:%u %pI4:%u\n", &iph->saddr, ntohs(th->source), &iph->daddr, ntohs(th->dest)); for (i = 0; i < ARRAY_SIZE(dccprotos); i++) { if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) { /* no match */ continue; } data += strlen(dccprotos[i]); pr_debug("DCC %s detected\n", dccprotos[i]); /* we have at least * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid * data left (== 14/13 bytes) */ if (parse_dcc(data, data_limit, &dcc_ip, &dcc_port, &addr_beg_p, &addr_end_p)) { pr_debug("unable to parse dcc command\n"); continue; } pr_debug("DCC bound ip/port: %pI4:%u\n", &dcc_ip, dcc_port); /* dcc_ip can be the internal OR external (NAT'ed) IP */ tuple = &ct->tuplehash[dir].tuple; if (tuple->src.u3.ip != dcc_ip && tuple->dst.u3.ip != dcc_ip) { net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", &tuple->src.u3.ip, &dcc_ip, dcc_port); continue; } exp = nf_ct_expect_alloc(ct); if (exp == NULL) { nf_ct_helper_log(skb, ct, "cannot alloc expectation"); ret = NF_DROP; goto out; } tuple = &ct->tuplehash[!dir].tuple; port = htons(dcc_port); nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, tuple->src.l3num, NULL, &tuple->dst.u3, IPPROTO_TCP, NULL, &port); nf_nat_irc = rcu_dereference(nf_nat_irc_hook); if (nf_nat_irc && ct->status & IPS_NAT_MASK) ret = nf_nat_irc(skb, ctinfo, protoff, addr_beg_p - ib_ptr, addr_end_p - addr_beg_p, exp); else if (nf_ct_expect_related(exp) != 0) { nf_ct_helper_log(skb, ct, "cannot add expectation"); ret = NF_DROP; } nf_ct_expect_put(exp); goto out; } } out: spin_unlock_bh(&irc_buffer_lock); return ret; }
static int msm_dai_q6_auxpcm_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev); int rc = 0; struct msm_dai_auxpcm_pdata *auxpcm_pdata = (struct msm_dai_auxpcm_pdata *) dai->dev->platform_data; mutex_lock(&aux_pcm_mutex); if (aux_pcm_count == 2) { dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count is 2. Just" " return.\n", __func__, dai->id); mutex_unlock(&aux_pcm_mutex); return 0; } else if (aux_pcm_count > 2) { dev_err(dai->dev, "%s(): ERROR: dai->id %d" " aux_pcm_count = %d > 2\n", __func__, dai->id, aux_pcm_count); mutex_unlock(&aux_pcm_mutex); return 0; } aux_pcm_count++; if (aux_pcm_count == 2) { dev_dbg(dai->dev, "%s(): dai->id %d aux_pcm_count = %d after " " increment\n", __func__, dai->id, aux_pcm_count); mutex_unlock(&aux_pcm_mutex); return 0; } pr_debug("%s:dai->id:%d aux_pcm_count = %d. opening afe\n", __func__, dai->id, aux_pcm_count); rc = afe_q6_interface_prepare(); if (IS_ERR_VALUE(rc)) dev_err(dai->dev, "fail to open AFE APR\n"); /* * For AUX PCM Interface the below sequence of clk * settings and afe_open is a strict requirement. * * Also using afe_open instead of afe_port_start_nowait * to make sure the port is open before deasserting the * clock line. This is required because pcm register is * not written before clock deassert. Hence the hw does * not get updated with new setting if the below clock * assert/deasset and afe_open sequence is not followed. */ clk_reset(pcm_clk, CLK_RESET_ASSERT); afe_open(PCM_RX, &dai_data->port_config, dai_data->rate); afe_open(PCM_TX, &dai_data->port_config, dai_data->rate); rc = clk_set_rate(pcm_clk, auxpcm_pdata->pcm_clk_rate); if (rc < 0) { pr_err("%s: clk_set_rate failed\n", __func__); return rc; } clk_enable(pcm_clk); //HTC_AUD++ //There is downlink no sound when receiving a MT-call. //Adding a delay to solve this issue. usleep(15000); //HTC_AUD-- clk_reset(pcm_clk, CLK_RESET_DEASSERT); mutex_unlock(&aux_pcm_mutex); return rc; }
static long acdb_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { int32_t result = 0; int32_t size; int32_t map_fd; uint32_t topology; uint32_t data[MAX_IOCTL_DATA]; struct msm_spk_prot_status prot_status; struct msm_spk_prot_status acdb_spk_status; pr_debug("%s\n", __func__); mutex_lock(&acdb_data.acdb_mutex); switch (cmd) { case AUDIO_REGISTER_PMEM: pr_debug("AUDIO_REGISTER_PMEM\n"); result = deregister_memory(); if (result < 0) pr_err("%s: deregister_memory failed returned %d!\n", __func__, result); if (copy_from_user(&map_fd, (void *)arg, sizeof(map_fd))) { pr_err("%s: fail to copy memory handle!\n", __func__); result = -EFAULT; } else { acdb_data.map_handle = map_fd; result = register_memory(); } goto done; case AUDIO_DEREGISTER_PMEM: pr_debug("AUDIO_DEREGISTER_PMEM\n"); result = deregister_memory(); goto done; case AUDIO_SET_VOICE_RX_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_voice_rx_topology(topology); goto done; case AUDIO_SET_VOICE_TX_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_voice_tx_topology(topology); goto done; case AUDIO_SET_ADM_RX_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_adm_rx_topology(topology); goto done; case AUDIO_SET_ADM_TX_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_adm_tx_topology(topology); goto done; case AUDIO_SET_ASM_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_asm_topology(topology); goto done; case AUDIO_SET_SPEAKER_PROT: if (copy_from_user(&acdb_data.spk_prot_cfg, (void *)arg, sizeof(acdb_data.spk_prot_cfg))) { pr_err("%s fail to copy spk_prot_cfg\n", __func__); result = -EFAULT; } goto done; case AUDIO_GET_SPEAKER_PROT: /*Indicates calibration was succesfull*/ if (acdb_data.spk_prot_cfg.mode == MSM_SPKR_PROT_CALIBRATED) { prot_status.r0 = acdb_data.spk_prot_cfg.r0; prot_status.status = 0; } else if (acdb_data.spk_prot_cfg.mode == MSM_SPKR_PROT_CALIBRATION_IN_PROGRESS) { /*Call AFE to query the status*/ acdb_spk_status.status = -EINVAL; acdb_spk_status.r0 = -1; get_spk_protection_status(&acdb_spk_status); prot_status.r0 = acdb_spk_status.r0; prot_status.status = acdb_spk_status.status; if (!acdb_spk_status.status) { acdb_data.spk_prot_cfg.mode = MSM_SPKR_PROT_CALIBRATED; acdb_data.spk_prot_cfg.r0 = prot_status.r0; } } else { /*Indicates calibration data is invalid*/ prot_status.status = -EINVAL; prot_status.r0 = -1; } if (copy_to_user((void *)arg, &prot_status, sizeof(prot_status))) { pr_err("%s: Failed to update prot_status\n", __func__); } goto done; case AUDIO_REGISTER_VOCPROC_VOL_TABLE: result = register_vocvol_table(); goto done; case AUDIO_DEREGISTER_VOCPROC_VOL_TABLE: result = deregister_vocvol_table(); goto done; case AUDIO_SET_HW_DELAY_RX: result = store_hw_delay(RX_CAL, (void *)arg); goto done; case AUDIO_SET_HW_DELAY_TX: result = store_hw_delay(TX_CAL, (void *)arg); goto done; } if (copy_from_user(&size, (void *) arg, sizeof(size))) { result = -EFAULT; goto done; } if ((size <= 0) || (size > sizeof(data))) { pr_err("%s: Invalid size sent to driver: %d\n", __func__, size); result = -EFAULT; goto done; } switch (cmd) { case AUDIO_SET_VOCPROC_COL_CAL: result = store_voice_col_data(VOCPROC_CAL, size, (uint32_t *)arg); goto done; case AUDIO_SET_VOCSTRM_COL_CAL: result = store_voice_col_data(VOCSTRM_CAL, size, (uint32_t *)arg); goto done; case AUDIO_SET_VOCVOL_COL_CAL: result = store_voice_col_data(VOCVOL_CAL, size, (uint32_t *)arg); goto done; } if (copy_from_user(data, (void *)(arg + sizeof(size)), size)) { pr_err("%s: fail to copy table size %d\n", __func__, size); result = -EFAULT; goto done; } if (data == NULL) { pr_err("%s: NULL pointer sent to driver!\n", __func__); result = -EFAULT; goto done; } if (size > sizeof(struct cal_block)) pr_err("%s: More cal data for ioctl 0x%x then expected, size received: %d\n", __func__, cmd, size); switch (cmd) { case AUDIO_SET_AUDPROC_TX_CAL: result = store_audproc_cal(TX_CAL, (struct cal_block *)data); goto done; case AUDIO_SET_AUDPROC_RX_CAL: result = store_audproc_cal(RX_CAL, (struct cal_block *)data); goto done; case AUDIO_SET_AUDPROC_TX_STREAM_CAL: result = store_audstrm_cal(TX_CAL, (struct cal_block *)data); goto done; case AUDIO_SET_AUDPROC_RX_STREAM_CAL: result = store_audstrm_cal(RX_CAL, (struct cal_block *)data); goto done; case AUDIO_SET_AUDPROC_TX_VOL_CAL: result = store_audvol_cal(TX_CAL, (struct cal_block *)data); goto done; case AUDIO_SET_AUDPROC_RX_VOL_CAL: result = store_audvol_cal(RX_CAL, (struct cal_block *)data); goto done; case AUDIO_SET_AFE_TX_CAL: result = store_afe_cal(TX_CAL, (struct cal_block *)data); goto done; case AUDIO_SET_AFE_RX_CAL: result = store_afe_cal(RX_CAL, (struct cal_block *)data); goto done; case AUDIO_SET_VOCPROC_CAL: result = store_vocproc_cal((struct cal_block *)data); goto done; case AUDIO_SET_VOCPROC_STREAM_CAL: result = store_vocstrm_cal((struct cal_block *)data); goto done; case AUDIO_SET_VOCPROC_VOL_CAL: result = store_vocvol_cal((struct cal_block *)data); goto done; case AUDIO_SET_VOCPROC_DEV_CFG_CAL: result = store_vocproc_dev_cfg_cal((struct cal_block *)data); goto done; case AUDIO_SET_SIDETONE_CAL: store_sidetone_cal((struct sidetone_cal *)data); goto done; case AUDIO_SET_ANC_CAL: result = store_anc_cal((struct cal_block *)data); goto done; case AUDIO_SET_LSM_CAL: result = store_lsm_cal((struct cal_block *)data); goto done; case AUDIO_SET_ADM_CUSTOM_TOPOLOGY: result = store_adm_custom_topology((struct cal_block *)data); goto done; case AUDIO_SET_ASM_CUSTOM_TOPOLOGY: result = store_asm_custom_topology((struct cal_block *)data); goto done; case AUDIO_SET_AANC_CAL: result = store_aanc_cal((struct cal_block *)data); goto done; default: pr_err("ACDB=> ACDB ioctl not found!\n"); result = -EFAULT; goto done; } done: mutex_unlock(&acdb_data.acdb_mutex); return result; }
/* * Package up a bounce condition. */ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) { u32 fpscr, orig_fpscr, fpsid, exceptions; pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); /* * At this point, FPEXC can have the following configuration: * * EX DEX IXE * 0 1 x - synchronous exception * 1 x 0 - asynchronous exception * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later * 0 0 1 - synchronous on VFP9 (non-standard subarch 1 * implementation), undefined otherwise * * Clear various bits and enable access to the VFP so we can * handle the bounce. */ fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)); fpsid = fmrx(FPSID); orig_fpscr = fpscr = fmrx(FPSCR); /* * Check for the special VFP subarch 1 and FPSCR.IXE bit case */ if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT) && (fpscr & FPSCR_IXE)) { /* * Synchronous exception, emulate the trigger instruction */ goto emulate; } if (fpexc & FPEXC_EX) { #ifndef CONFIG_CPU_FEROCEON /* * Asynchronous exception. The instruction is read from FPINST * and the interrupted instruction has to be restarted. */ trigger = fmrx(FPINST); regs->ARM_pc -= 4; #endif } else if (!(fpexc & FPEXC_DEX)) { /* * Illegal combination of bits. It can be caused by an * unallocated VFP instruction but with FPSCR.IXE set and not * on VFP subarch 1. */ vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); goto exit; } /* * Modify fpscr to indicate the number of iterations remaining. * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates * whether FPEXC.VECITR or FPSCR.LEN is used. */ if (fpexc & (FPEXC_EX | FPEXC_VV)) { u32 len; len = fpexc + (1 << FPEXC_LENGTH_BIT); fpscr &= ~FPSCR_LENGTH_MASK; fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); } /* * Handle the first FP instruction. We used to take note of the * FPEXC bounce reason, but this appears to be unreliable. * Emulate the bounced instruction instead. */ exceptions = vfp_emulate_instruction(trigger, fpscr, regs); if (exceptions) vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); /* * If there isn't a second FP instruction, exit now. Note that * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. */ if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) goto exit; /* * The barrier() here prevents fpinst2 being read * before the condition above. */ barrier(); trigger = fmrx(FPINST2); emulate: exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); if (exceptions) vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); exit: preempt_enable(); }
static void __init gumstix_mmc_init(void) { pr_debug("Gumstix mmc disabled\n"); }
static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev) { struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data; union cvmx_mio_boot_dma_timx dma_tim; unsigned int oe_a; unsigned int oe_n; unsigned int dma_ackh; unsigned int dma_arq; unsigned int pause; unsigned int T0, Tkr, Td; unsigned int tim_mult; const struct ata_timing *timing; timing = ata_timing_find_mode(dev->dma_mode); T0 = timing->cycle; Td = timing->active; Tkr = timing->recover; dma_ackh = timing->dmack_hold; dma_tim.u64 = 0; /* dma_tim.s.tim_mult = 0 --> 4x */ tim_mult = 4; /* not spec'ed, value in eclocks, not affected by tim_mult */ dma_arq = 8; pause = 25 - dma_arq * 1000 / (octeon_get_clock_rate() / 1000000); /* Tz */ oe_a = Td; /* Tkr from cf spec, lengthened to meet T0 */ oe_n = max(T0 - oe_a, Tkr); dma_tim.s.dmack_pi = 1; dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n); dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a); /* * This is tI, C.F. spec. says 0, but Sony CF card requires * more, we use 20 nS. */ dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20); dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh); dma_tim.s.dmarq = dma_arq; dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause); dma_tim.s.rd_dly = 0; /* Sample right on edge */ /* writes only */ dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n); dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a); pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60, ns_to_tim_reg(tim_mult, 60)); pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: " "%d, dmarq: %d, pause: %d\n", dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause); cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine), dma_tim.u64); }
static void gumstix_bluetooth_init(void) { pr_debug("Gumstix Bluetooth is disabled\n"); }
int sst_alloc_stream_mrfld(char *params, struct sst_block *block) { struct ipc_post *msg = NULL; struct snd_sst_alloc_mrfld alloc_param; struct ipc_dsp_hdr dsp_hdr; struct snd_sst_params *str_params; struct snd_sst_tstamp fw_tstamp; unsigned int str_id, pipe_id, pvt_id, task_id; u32 len = 0; struct stream_info *str_info; unsigned long irq_flags; int i, num_ch; pr_debug("In %s\n", __func__); BUG_ON(!params); str_params = (struct snd_sst_params *)params; memset(&alloc_param, 0, sizeof(alloc_param)); alloc_param.operation = str_params->ops; alloc_param.codec_type = str_params->codec; alloc_param.sg_count = str_params->aparams.sg_count; alloc_param.ring_buf_info[0].addr = str_params->aparams.ring_buf_info[0].addr; alloc_param.ring_buf_info[0].size = str_params->aparams.ring_buf_info[0].size; alloc_param.frag_size = str_params->aparams.frag_size; memcpy(&alloc_param.codec_params, &str_params->sparams, sizeof(struct snd_sst_stream_params)); /* fill channel map params for multichannel support. * Ideally channel map should be received from upper layers * for multichannel support. * Currently hardcoding as per FW reqm. */ num_ch = sst_get_num_channel(str_params); for (i = 0; i < 8; i++) { if (i < num_ch) alloc_param.codec_params.uc.pcm_params.channel_map[i] = i; else alloc_param.codec_params.uc.pcm_params.channel_map[i] = 0xFF; } str_id = str_params->stream_id; pipe_id = str_params->device_type; task_id = str_params->task; sst_drv_ctx->streams[str_id].pipe_id = pipe_id; sst_drv_ctx->streams[str_id].task_id = task_id; sst_drv_ctx->streams[str_id].num_ch = num_ch; pvt_id = sst_assign_pvt_id(sst_drv_ctx); alloc_param.ts = (struct snd_sst_tstamp *) (sst_drv_ctx->mailbox_add + sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp))); pr_debug("alloc tstamp location = 0x%p\n", alloc_param.ts); pr_debug("assigned pipe id 0x%x to task %d\n", pipe_id, task_id); /*allocate device type context*/ sst_init_stream(&sst_drv_ctx->streams[str_id], alloc_param.codec_type, str_id, alloc_param.operation, 0); /* send msg to FW to allocate a stream */ if (sst_create_ipc_msg(&msg, true)) return -ENOMEM; block->drv_id = pvt_id; block->msg_id = IPC_CMD; sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD, task_id, 1, pvt_id); pr_debug("header:%x\n", msg->mrfld_header.p.header_high.full); msg->mrfld_header.p.header_high.part.res_rqd = 1; len = msg->mrfld_header.p.header_low_payload = sizeof(alloc_param) + sizeof(dsp_hdr); sst_fill_header_dsp(&dsp_hdr, IPC_IA_ALLOC_STREAM_MRFLD, pipe_id, sizeof(alloc_param)); memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr)); memcpy(msg->mailbox_data + sizeof(dsp_hdr), &alloc_param, sizeof(alloc_param)); str_info = &sst_drv_ctx->streams[str_id]; pr_debug("header:%x\n", msg->mrfld_header.p.header_high.full); pr_debug("response rqd: %x", msg->mrfld_header.p.header_high.part.res_rqd); spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags); list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list); spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); pr_debug("calling post_message\n"); sst_drv_ctx->ops->post_message(&sst_drv_ctx->ipc_post_msg_wq); return str_id; }
static void msm8960_enable_ext_spk_amp_gpio(u32 spk_amp_gpio) { int ret = 0; struct pm_gpio param = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 1, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_S4, .out_strength = PM_GPIO_STRENGTH_MED, .function = PM_GPIO_FUNC_NORMAL, }; if (spk_amp_gpio == bottom_spk_pamp_gpio) { ret = gpio_request(bottom_spk_pamp_gpio, "BOTTOM_SPK_AMP"); if (ret) { pr_err("%s: Error requesting BOTTOM SPK AMP GPIO %u\n", __func__, bottom_spk_pamp_gpio); return; } ret = pm8xxx_gpio_config(bottom_spk_pamp_gpio, ¶m); if (ret) pr_err("%s: Failed to configure Bottom Spk Ampl" " gpio %u\n", __func__, bottom_spk_pamp_gpio); else { pr_debug("%s: enable Bottom spkr amp gpio\n", __func__); gpio_direction_output(bottom_spk_pamp_gpio, 1); } } else if (spk_amp_gpio == top_spk_pamp_gpio) { ret = gpio_request(top_spk_pamp_gpio, "TOP_SPK_AMP"); if (ret) { pr_err("%s: Error requesting GPIO %d\n", __func__, top_spk_pamp_gpio); return; } ret = pm8xxx_gpio_config(top_spk_pamp_gpio, ¶m); if (ret) pr_err("%s: Failed to configure Top Spk Ampl" " gpio %u\n", __func__, top_spk_pamp_gpio); else { pr_debug("%s: enable Top spkr amp gpio\n", __func__); gpio_direction_output(top_spk_pamp_gpio, 1); } } else { pr_err("%s: ERROR : Invalid External Speaker Ampl GPIO." " gpio = %u\n", __func__, spk_amp_gpio); return; } } static void msm8960_ext_spk_power_amp_on(u32 spk) { if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) { if ((msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { pr_debug("%s() External Bottom Speaker Ampl already " "turned on. spk = 0x%08x\n", __func__, spk); return; } msm8960_ext_bottom_spk_pamp |= spk; if ((msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_POS) && (msm8960_ext_bottom_spk_pamp & BOTTOM_SPK_AMP_NEG)) { msm8960_enable_ext_spk_amp_gpio(bottom_spk_pamp_gpio); pr_debug("%s: slepping 4 ms after turning on external " " Bottom Speaker Ampl\n", __func__); usleep_range(4000, 4000); } } else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) { if ((msm8960_ext_top_spk_pamp & TOP_SPK_AMP_POS) && (msm8960_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) { pr_debug("%s() External Top Speaker Ampl already" "turned on. spk = 0x%08x\n", __func__, spk); return; } msm8960_ext_top_spk_pamp |= spk; if ((msm8960_ext_top_spk_pamp & TOP_SPK_AMP_POS) && (msm8960_ext_top_spk_pamp & TOP_SPK_AMP_NEG)) { msm8960_enable_ext_spk_amp_gpio(top_spk_pamp_gpio); pr_debug("%s: sleeping 4 ms after turning on " " external Top Speaker Ampl\n", __func__); usleep_range(4000, 4000); } } else { pr_err("%s: ERROR : Invalid External Speaker Ampl. spk = 0x%08x\n", __func__, spk); return; } } static void msm8960_ext_spk_power_amp_off(u32 spk) { if (spk & (BOTTOM_SPK_AMP_POS | BOTTOM_SPK_AMP_NEG)) { if (!msm8960_ext_bottom_spk_pamp) return; gpio_direction_output(bottom_spk_pamp_gpio, 0); gpio_free(bottom_spk_pamp_gpio); msm8960_ext_bottom_spk_pamp = 0; pr_debug("%s: sleeping 4 ms after turning off external Bottom" " Speaker Ampl\n", __func__); usleep_range(4000, 4000); } else if (spk & (TOP_SPK_AMP_POS | TOP_SPK_AMP_NEG)) { if (!msm8960_ext_top_spk_pamp) return; gpio_direction_output(top_spk_pamp_gpio, 0); gpio_free(top_spk_pamp_gpio); msm8960_ext_top_spk_pamp = 0; pr_debug("%s: sleeping 4 ms after turning off external Top" " Spkaker Ampl\n", __func__); usleep_range(4000, 4000); } else { pr_err("%s: ERROR : Invalid Ext Spk Ampl. spk = 0x%08x\n", __func__, spk); return; } } static void msm8960_ext_control(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; mutex_lock(&dapm->codec->mutex); pr_debug("%s: msm8960_spk_control = %d", __func__, msm8960_spk_control); if (msm8960_spk_control == MSM8960_SPK_ON) { snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos"); snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg"); } else { snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Pos"); snd_soc_dapm_disable_pin(dapm, "Ext Spk Bottom Neg"); snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Pos"); snd_soc_dapm_disable_pin(dapm, "Ext Spk Top Neg"); } snd_soc_dapm_sync(dapm); mutex_unlock(&dapm->codec->mutex); } static int msm8960_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { pr_debug("%s: msm8960_spk_control = %d", __func__, msm8960_spk_control); ucontrol->value.integer.value[0] = msm8960_spk_control; return 0; }
static int __init aries_rfkill_probe(struct platform_device *pdev) { int irq; int ret; /* Initialize wake locks */ wake_lock_init(&rfkill_wake_lock, WAKE_LOCK_SUSPEND, "bt_host_wake"); ret = gpio_request(GPIO_WLAN_BT_EN, "GPB"); if (ret < 0) { pr_err("[BT] Failed to request GPIO_WLAN_BT_EN!\n"); goto err_req_gpio_wlan_bt_en; } ret = gpio_request(GPIO_BT_nRST, "GPB"); if (ret < 0) { pr_err("[BT] Failed to request GPIO_BT_nRST!\n"); goto err_req_gpio_bt_nrst; } /* BT Host Wake IRQ */ irq = IRQ_BT_HOST_WAKE; ret = request_irq(irq, bt_host_wake_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "bt_host_wake_irq_handler", NULL); if (ret < 0) { pr_err("[BT] Request_irq failed\n"); goto err_req_irq; } disable_irq(irq); bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, &bt_rfkill_ops, NULL); if (!bt_rfk) { pr_err("[BT] bt_rfk : rfkill_alloc is failed\n"); ret = -ENOMEM; goto err_alloc; } rfkill_init_sw_state(bt_rfk, 0); pr_debug("[BT] rfkill_register(bt_rfk)\n"); ret = rfkill_register(bt_rfk); if (ret) { pr_debug("********ERROR IN REGISTERING THE RFKILL********\n"); goto err_register; } rfkill_set_sw_state(bt_rfk, 1); bluetooth_set_power(NULL, RFKILL_USER_STATE_SOFT_BLOCKED); return ret; err_register: rfkill_destroy(bt_rfk); err_alloc: free_irq(irq, NULL); err_req_irq: gpio_free(GPIO_BT_nRST); err_req_gpio_bt_nrst: gpio_free(GPIO_WLAN_BT_EN); err_req_gpio_wlan_bt_en: return ret; }