int read_psh_data(struct psh_ia_priv *ia_data) { #if 0 struct psh_ext_if *psh_if_info = (struct psh_ext_if *)ia_data->platform_priv; int cur_read = 0, ret = 0; struct frame_head fh; struct timespec t1,t2,t3,t4; struct spi_message msg; struct spi_transfer xfer_fh = { .rx_buf = (void *)&fh, .len = sizeof(fh) }; struct spi_transfer xfer_payload = { .rx_buf = (void *)&psh_if_info->psh_frame }; int gpio_val = -1; int sequent_dummy = 0; static int loop = 0; #ifdef ENABLE_RPM /* We may need to zero all the buffer */ pm_runtime_get_sync(&psh_if_info->pshc->dev); #endif psh_if_info->gpio_psh_int = GPIO_PSH_INT; gpio_val = gpio_get_value(psh_if_info->gpio_psh_int); dev_dbg(&psh_if_info->pshc->dev, "%s, gpio_val=%d\n", __func__, gpio_val); /* Loop read till error or no more data */ while (!gpio_get_value(psh_if_info->gpio_psh_int)) { char *ptr; int len; if (ia_data->cmd_in_progress == CMD_RESET) break; else if (ia_data->cmd_in_progress != CMD_INVALID) schedule(); if (sequent_dummy >= 2) { /* something wrong, check FW */ dev_dbg(&psh_if_info->pshc->dev, "2 sequent dummy frame header read!"); break; } spi_message_init(&msg); spi_message_add_tail(&xfer_fh, &msg); ktime_get_ts(&t1); ret = spi_sync(psh_if_info->pshc, &msg); ktime_get_ts(&t3); if (ret) { dev_err(&psh_if_info->pshc->dev, "Read frame header error!" " ret=%d\n", ret); loop++; break; } dev_dbg(&psh_if_info->pshc->dev, "sign=0x%x(0x4853), len=%d\n", fh.sign, fh.length); if (fh.sign == LBUF_CELL_SIGN) { if (fh.length > LBUF_MAX_CELL_SIZE) { dev_err(&psh_if_info->pshc->dev, "frame size is too big!\n"); ret = -EPERM; break; } sequent_dummy = 0; } else { if (fh.sign || fh.length) { dev_err(&psh_if_info->pshc->dev, "wrong fh (0x%x, 0x%x)\n", fh.sign, fh.length); ret = -EPERM; break; } sequent_dummy++; continue; } //len = frame_size(fh.length) - sizeof(fh); len = fh.length; xfer_payload.len = len; dev_dbg(&psh_if_info->pshc->dev, "%s xfer_payload.len=%d\n", __func__, len); spi_message_init(&msg); spi_message_add_tail(&xfer_payload, &msg); ret = spi_sync(psh_if_info->pshc, &msg); if (ret) { dev_err(&psh_if_info->pshc->dev, "Read main frame error!" " ret=%d\n", ret); break; } ptr = psh_if_info->psh_frame; ktime_get_ts(&t4); //dump_cmd_resp(ptr, len); while (len > 0) { struct cmd_resp *resp = (struct cmd_resp *)ptr; u32 size = sizeof(*resp) + resp->data_len; ret = ia_handle_frame(ia_data, ptr, size); if (ret > 0) { cur_read += ret; if (cur_read > 250) { cur_read = 0; sysfs_notify(&psh_if_info->pshc->dev.kobj, NULL, "data_size"); } } //ptr += frame_size(size); //len -= frame_size(size); ptr += size; len -= size; } } #ifdef ENABLE_RPM pm_runtime_mark_last_busy(&psh_if_info->pshc->dev); pm_runtime_put_autosuspend(&psh_if_info->pshc->dev); #endif if (cur_read){ sysfs_notify(&psh_if_info->pshc->dev.kobj, NULL, "data_size"); ktime_get_ts(&t2); long elapsedTime_t12 = timespec_to_ns(&t2) - timespec_to_ns(&t1); long elapsedTime_t13 = timespec_to_ns(&t3) - timespec_to_ns(&t1); long elapsedTime_t34 = timespec_to_ns(&t4) - timespec_to_ns(&t3); long elapsedTime_t42 = timespec_to_ns(&t2) - timespec_to_ns(&t4); dev_dbg(&psh_if_info->pshc->dev, "elapsedTime_t12 = %lld ns, t13 = %lld ns, t34 = %lld ns, t42 = %lld ns\n", elapsedTime_t12, elapsedTime_t13, elapsedTime_t34, elapsedTime_t42); } if (loop > 8) { queue_work(psh_if_info->wq, &psh_if_info->work); loop = 0; } return ret; #else return 0; #endif } #define CMD_START_STREAMING (3) #define CMD_STOP_STREAMING (4) void dump_tx_buf(struct ia_cmd *cmd, int len) { struct sensor_cfg_param *stream_cfg; printk(KERN_DEBUG "%s, tran_id=%d, cmd_id=%d, sensor_id=%d\n", __func__, cmd->tran_id, cmd->cmd_id, cmd->sensor_id); if(cmd->cmd_id == CMD_START_STREAMING){ stream_cfg = (struct sensor_cfg_param *)cmd->param; printk(KERN_DEBUG"sample_freq=%d, buffer_delay=%d\n", stream_cfg->sample_freq, stream_cfg->buff_delay); } } #define HOST2PSH_PACKET_LEN (16) #if 0 int process_send_cmd(struct psh_ia_priv *ia_data, int ch, struct ia_cmd *cmd, int len) { struct psh_ext_if *psh_if_info = (struct psh_ext_if *)ia_data->platform_priv; int ret = 0; int i = 0; char cmd_buf[HOST2PSH_PACKET_LEN]; // fix host2psh package len to 16 len = HOST2PSH_PACKET_LEN; memset(cmd_buf, '\0', HOST2PSH_PACKET_LEN); memcpy(cmd_buf, (char *)cmd, len); struct spi_message msg; struct spi_transfer xfer = { .len = len, .tx_buf = (void *)cmd_buf }; for(i=0; i<len; i++) dev_dbg(&psh_if_info->pshc->dev," %d ", cmd_buf[i]); dev_dbg(&psh_if_info->pshc->dev,"\n"); pm_runtime_get_sync(&psh_if_info->pshc->dev); /* Host needs to reset FW for each boot up by sending CMD_RESET Once FW reboot, host will enable interrupt and wait data from sensorhub */ /* process_send_cmd+0x11f/0x1d0 ia_send_cmd+0x7f/0x140 ia_start_control+0xe5/0x1a0 dev_attr_store+0x18/0x30 sysfs_write_file+0xe7/0x160 vfs_write+0xbe/0x1e0 SyS_write+0x4d/0xa0 ia32_do_call+0x13/0x13 */ if (ch == 0 && cmd->cmd_id == CMD_RESET) { if (psh_if_info->irq_disabled == 0) { disable_irq(psh_if_info->pshc->irq); psh_if_info->irq_disabled = 1; dev_info(&psh_if_info->pshc->dev, "%s disable irq %d\n", psh_if_info->pshc->irq); } /* first send soft reset to disable sensors running, or sensor I2C bus may hang */ spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(psh_if_info->pshc, &msg); msleep(200); gpio_set_value(psh_if_info->gpio_psh_rst, 0); usleep_range(10000, 10000); gpio_set_value(psh_if_info->gpio_psh_rst, 1); /* wait for pshfw to run */ msleep(1000); if (psh_if_info->irq_disabled == 1) { dev_info(&psh_if_info->pshc->dev, "%s enable irq %d\n", psh_if_info->pshc->irq); enable_irq(psh_if_info->pshc->irq); psh_if_info->irq_disabled = 0; } } else if (ch == 0 && cmd->cmd_id == CMD_FW_UPDATE) { if (psh_if_info->irq_disabled == 0) { dev_info(&psh_if_info->pshc->dev, "%s disable irq %d\n", psh_if_info->pshc->irq); disable_irq(psh_if_info->pshc->irq); psh_if_info->irq_disabled = 1; } msleep(1000); ret = 0; goto exit; } else if (ch == 0 && psh_if_info->irq_disabled == 1) { /* prevent sending command during firmware updating, * or update will fail. */ ret = -EPERM; goto exit; } while(!gpio_get_value(psh_if_info->gpio_psh_int)){ msleep(1); } spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(psh_if_info->pshc, &msg); dump_tx_buf(cmd, len); if (ret) { dev_err(&psh_if_info->pshc->dev, "sendcmd through spi fail!\n"); ret = -EIO; } else { ret = 0; } #ifdef DRV_POLLING_MODE if(cmd->cmd_id == CMD_START_STREAMING) { dev_err(&psh_if_info->pshc->dev, "%s start_stream\n", __func__); queue_delayed_work(psh_if_info->wq, &psh_if_info->dwork, POLLING_HZ); } else if (cmd->cmd_id == CMD_STOP_STREAMING) { dev_err(&psh_if_info->pshc->dev, "%s stop_stream\n", __func__); cancel_delayed_work(&psh_if_info->dwork); } #endif pm_runtime_mark_last_busy(&psh_if_info->pshc->dev); exit: pm_runtime_put_autosuspend(&psh_if_info->pshc->dev); return ret; } #else static void build_transfer_buffer(void* lp_new_package_buffer, void* lp_buffer, int buffer_size) { struct frame_head* lp_fh = (struct frame_head*)lp_new_package_buffer; INIT_FRAME_HEAD(lp_fh, buffer_size); memcpy(lp_fh + 1, lp_buffer, buffer_size); return; } /*buffer size is just fh's palyload total size, not include fh head size*/ struct send_list_entry* build_send_list_entry(void* lp_buffer, int buffer_size) { struct send_list_entry* lp_new_entry = NULL; int total_size = buffer_size + SIZE_OF_FRAME_HEAD; if (!lp_buffer || total_size > MAX_SEND_DATA_SIZE) { return NULL; } lp_new_entry = kzalloc(sizeof(struct send_list_entry), GFP_KERNEL); if (lp_new_entry) { build_transfer_buffer(lp_new_entry->data, lp_buffer, buffer_size); lp_new_entry->used_size = total_size; lp_new_entry->debug_index = 0; } return lp_new_entry; } void insert_send_data_entry_to_list(struct psh_ext_if* lp_psh_if_info, struct send_list_entry* lp_new_entry) { mutex_lock(&lp_psh_if_info->send_data_list.lock); //add new to tail of the list list_add(&lp_new_entry->list_entry, lp_psh_if_info->send_data_list.head.prev); mutex_unlock(&lp_psh_if_info->send_data_list.lock); } struct send_list_entry* remove_send_data_entry_from_list(struct psh_ext_if* lp_psh_if_info) { struct send_list_entry* lp_removed_entry; mutex_lock(&lp_psh_if_info->send_data_list.lock); lp_removed_entry = NULL; if (!list_empty(&lp_psh_if_info->send_data_list.head)) { lp_removed_entry = list_entry(lp_psh_if_info->send_data_list.head.next, struct send_list_entry, list_entry); list_del(lp_psh_if_info->send_data_list.head.next); } mutex_unlock(&lp_psh_if_info->send_data_list.lock); return lp_removed_entry; } /* The len is actual cmd size include parameter sizes*/ int process_send_cmd(struct psh_ia_priv *ia_data, int ch, struct ia_cmd *cmd, int len) { struct psh_ext_if *psh_if_info = (struct psh_ext_if *)ia_data->platform_priv; int ret = 0; int i = 0; char cmd_buf[HOST2PSH_PACKET_LEN]; struct send_list_entry* lp_new_entry = NULL; /*yy: remove this limitation*/ // fix host2psh package len to 16 //len = HOST2PSH_PACKET_LEN; //len += (sizeof(struct ia_cmd) - CMD_PARAM_MAX_SIZE); // memset(cmd_buf, '\0', HOST2PSH_PACKET_LEN); // memcpy(cmd_buf, (char *)cmd, len); lp_new_entry = build_send_list_entry(cmd, len); if (!lp_new_entry) { dev_err(&psh_if_info->pshc->dev," drop send data becuause no enough memory.\n"); return -1; } for(i=0; i<len; i++) dev_dbg(&psh_if_info->pshc->dev," %d ", cmd_buf[i]); dev_dbg(&psh_if_info->pshc->dev,"\n"); pm_runtime_get_sync(&psh_if_info->pshc->dev); /* Host needs to reset FW for each boot up by sending CMD_RESET Once FW reboot, host will enable interrupt and wait data from sensorhub */ /* process_send_cmd+0x11f/0x1d0 ia_send_cmd+0x7f/0x140 ia_start_control+0xe5/0x1a0 dev_attr_store+0x18/0x30 sysfs_write_file+0xe7/0x160 vfs_write+0xbe/0x1e0 SyS_write+0x4d/0xa0 ia32_do_call+0x13/0x13 */ /* put the send data entry to send list and request delay worker */ insert_send_data_entry_to_list(psh_if_info, lp_new_entry); /* #ifdef DRV_POLLING_MODE if(cmd->cmd_id == CMD_START_STREAMING) { dev_err(&psh_if_info->pshc->dev, "%s start_stream\n", __func__); psh_if_info->task_flag = TASK_FLAG_REQUEST_LOOP; queue_delayed_work(psh_if_info->wq, &psh_if_info->dwork, POLLING_HZ); } else if (cmd->cmd_id == CMD_STOP_STREAMING) { dev_err(&psh_if_info->pshc->dev, "%s stop_stream\n", __func__); cancel_delayed_work(&psh_if_info->dwork); } #endif */ pm_runtime_mark_last_busy(&psh_if_info->pshc->dev); pm_runtime_put_autosuspend(&psh_if_info->pshc->dev); return ret; } /* The len is actual cmd size include parameter sizes please make sure you already paused pulling thread before you call this api! */ int process_send_cmd_sync(struct psh_ia_priv *ia_data, int ch, struct ia_cmd *cmd, int len) { int ret = -1; struct psh_ext_if *psh_if_info = (struct psh_ext_if *)ia_data->platform_priv; struct send_list_entry* lp_new_entry = NULL; //len += (sizeof(struct ia_cmd) - CMD_PARAM_MAX_SIZE); pm_runtime_get_sync(&psh_if_info->pshc->dev); lp_new_entry = build_send_list_entry(cmd, len); if (lp_new_entry) { ret = do_transfer(psh_if_info, lp_new_entry->data, lp_new_entry->used_size, NULL); kfree(lp_new_entry); } pm_runtime_mark_last_busy(&psh_if_info->pshc->dev); pm_runtime_put_autosuspend(&psh_if_info->pshc->dev); return ret; }
int do_setup_ddr(struct device *dev) { struct psh_ia_priv *ia_data = (struct psh_ia_priv *)dev_get_drvdata(dev); struct psh_plt_priv *plt_priv = (struct psh_plt_priv *)ia_data->platform_priv; uintptr_t ddr_phy = plt_priv->ddr_phy; uintptr_t imr2_phy = plt_priv->imr2_phy; const struct firmware *fw_entry; struct ia_cmd cmd_user = { .cmd_id = CMD_SETUP_DDR, .sensor_id = 0, }; static int fw_load_done; int load_default = 0; char fname[40]; if (fw_load_done) return 0; #ifdef VPROG2_SENSOR intel_scu_ipc_msic_vprog2(1); msleep(500); #endif again: if (!request_firmware(&fw_entry, fname, dev)) { if (!fw_entry) return -ENOMEM; psh_debug("psh fw size %d virt:0x%p\n", (int)fw_entry->size, fw_entry->data); if (fw_entry->size > APP_IMR_SIZE) { psh_err("psh fw size too big\n"); } else { struct ia_cmd cmd = { .cmd_id = CMD_RESET, .sensor_id = 0, }; memcpy(plt_priv->imr2, fw_entry->data, fw_entry->size); *(uintptr_t *)(&cmd.param) = imr2_phy; cmd.tran_id = 0x1; if (process_send_cmd(ia_data, PSH2IA_CHANNEL3, &cmd, 7)) return -1; ia_data->load_in_progress = 1; wait_for_completion_timeout(&ia_data->cmd_load_comp, 3 * HZ); fw_load_done = 1; } release_firmware(fw_entry); } else { psh_err("cannot find psh firmware(%s)\n", fname); if (!load_default) { psh_err("try to load default psh.bin\n"); snprintf(fname, 20, "psh.bin"); load_default = 1; goto again; } } ia_lbuf_read_reset(ia_data->lbuf); *(unsigned long *)(&cmd_user.param) = ddr_phy; return ia_send_cmd(ia_data, &cmd_user, 7); } static void psh2ia_channel_handle(u32 msg, u32 param, void *data) { struct pci_dev *pdev = (struct pci_dev *)data; struct psh_ia_priv *ia_data = (struct psh_ia_priv *)dev_get_drvdata(&pdev->dev); struct psh_plt_priv *plt_priv = (struct psh_plt_priv *)ia_data->platform_priv; u8 *dbuf = NULL; u16 size = 0; if (unlikely(ia_data->load_in_progress)) { ia_data->load_in_progress = 0; complete(&ia_data->cmd_load_comp); return; } while (!ia_lbuf_read_next(ia_data, &plt_priv->lbuf, &dbuf, &size)) { ia_handle_frame(ia_data, dbuf, size); } sysfs_notify(&pdev->dev.kobj, NULL, "data_size"); } static int psh_imr_init(struct pci_dev *pdev, int imr_src, uintptr_t *phy_addr, void **virt_addr, unsigned size, int bar) { struct page *pg; void __iomem *mem; int ret = 0; unsigned long start = 0, len; if (imr_src == imr_allocate) { /* dynamic alloct memory region */ pg = alloc_pages(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO, get_order(size)); if (!pg) { dev_err(&pdev->dev, "can not allocate app page imr buffer\n"); ret = -ENOMEM; goto err; } *phy_addr = page_to_phys(pg); *virt_addr = page_address(pg); } else if (imr_src == imr_pci_shim) { /* dedicate isolated memory region */ start = pci_resource_start(pdev, bar); len = pci_resource_len(pdev, bar); if (!start || !len) { dev_err(&pdev->dev, "bar %d address not set\n", bar); ret = -EINVAL; goto err; } ret = pci_request_region(pdev, bar, "psh"); if (ret) { dev_err(&pdev->dev, "failed to request psh region " "0x%lx-0x%lx\n", start, (unsigned long)pci_resource_end(pdev, bar)); goto err; } mem = ioremap_nocache(start, len); if (!mem) { dev_err(&pdev->dev, "can not ioremap app imr address\n"); ret = -EINVAL; goto err_ioremap; } *phy_addr = start; *virt_addr = (void *)mem; } else { dev_err(&pdev->dev, "Invalid chip imr source\n"); ret = -EINVAL; goto err; } return 0; err_ioremap: pci_release_region(pdev, bar); err: return ret; } static void psh_imr_free(int imr_src, void *virt_addr, unsigned size) { if (imr_src == imr_allocate) __free_pages(virt_to_page(virt_addr), get_order(size)); else if (imr_src == imr_pci_shim) iounmap((void __iomem *)virt_addr); } static int psh_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret = -1; struct psh_ia_priv *ia_data; struct psh_plt_priv *plt_priv; ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "fail to enable psh pci device\n"); goto pci_err; } plt_priv = kzalloc(sizeof(*plt_priv), GFP_KERNEL); if (!plt_priv) { dev_err(&pdev->dev, "can not allocate plt_priv\n"); goto plt_err; } switch (intel_mid_identify_cpu()) { case INTEL_MID_CPU_CHIP_TANGIER: if (intel_mid_soc_stepping() == 0) plt_priv->imr_src = imr_allocate; else plt_priv->imr_src = imr_pci_shim; break; case INTEL_MID_CPU_CHIP_ANNIEDALE: plt_priv->imr_src = imr_pci_shim; break; default: dev_err(&pdev->dev, "error memory region\n"); goto psh_imr2_err; break; } /* init IMR2 */ ret = psh_imr_init(pdev, plt_priv->imr_src, &plt_priv->imr2_phy, &plt_priv->imr2, APP_IMR_SIZE, 0); if (ret) goto psh_imr2_err; /* init IMR3 */ ret = psh_imr_init(pdev, plt_priv->imr_src, &plt_priv->ddr_phy, &plt_priv->ddr, BUF_IA_DDR_SIZE, 1); if (ret) goto psh_ddr_err; ret = psh_ia_common_init(&pdev->dev, &ia_data); if (ret) { dev_err(&pdev->dev, "fail to init psh_ia_common\n"); goto psh_ia_err; } ia_lbuf_read_init(&plt_priv->lbuf, plt_priv->ddr, BUF_IA_DDR_SIZE, NULL); ia_data->lbuf = &plt_priv->lbuf; plt_priv->hwmon_dev = hwmon_device_register(&pdev->dev); if (!plt_priv->hwmon_dev) { dev_err(&pdev->dev, "fail to register hwmon device\n"); goto hwmon_err; } ia_data->platform_priv = plt_priv; ret = intel_psh_ipc_bind(PSH_RECV_CH0, psh2ia_channel_handle, pdev); if (ret) { dev_err(&pdev->dev, "fail to bind channel\n"); goto irq_err; } /* just put this dev into suspend status always, since this is fake */ pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; irq_err: hwmon_device_unregister(plt_priv->hwmon_dev); hwmon_err: psh_ia_common_deinit(&pdev->dev); psh_ia_err: psh_imr_free(plt_priv->imr_src, plt_priv->ddr, BUF_IA_DDR_SIZE); psh_ddr_err: psh_imr_free(plt_priv->imr_src, plt_priv->imr2, APP_IMR_SIZE); psh_imr2_err: kfree(plt_priv); plt_err: pci_dev_put(pdev); pci_err: return ret; }
static void process_received_data(struct psh_ext_if *lp_psh_if_info, u8* lp_buffer, int buffer_size, u64 receive_ts_ns) { int ret_value; int cur_read = 0; int processed_size = 0; int total_buffer_size; int one_frame_data_size; struct psh_ia_priv* lp_ia_data = lp_psh_if_info->ia_data; union { struct frame_head* lp_fh; u8* lp_u8; }check_ptr; check_ptr.lp_u8 = lp_buffer; total_buffer_size = buffer_size; while(processed_size < total_buffer_size) { lp_buffer = (u8*)(check_ptr.lp_fh + 1); buffer_size = check_ptr.lp_fh->length; one_frame_data_size = check_ptr.lp_fh->length; cur_read = 0; while (buffer_size > 0) { struct cmd_resp *resp = (struct cmd_resp *)lp_buffer; u32 size = sizeof(*resp) + resp->data_len; //printk("yy 7\n"); ret_value = ia_handle_frame(lp_ia_data, lp_buffer, size, receive_ts_ns); // printk("yy 8\n"); if (ret_value > 0) { cur_read += ret_value; if (cur_read > 250) { cur_read = 0; //printk("yy 9\n"); sysfs_notify(&lp_psh_if_info->pshc->dev.kobj, NULL, "data_size"); //printk("yy 10\n"); dev_err(&lp_psh_if_info->pshc->dev, "request daemon to fetch data\n"); } } //ptr += frame_size(size); //len -= frame_size(size); lp_buffer += size; buffer_size -= size; } if (cur_read) { //printk("yy 11\n"); sysfs_notify(&lp_psh_if_info->pshc->dev.kobj, NULL, "data_size"); //printk("yy 12\n"); } check_ptr.lp_u8 += (SIZE_OF_FRAME_HEAD + one_frame_data_size); processed_size += (SIZE_OF_FRAME_HEAD + one_frame_data_size); } }