static int start_ipc(struct link_device *ld, struct io_device *iod) { struct sk_buff *skb; char data[1] = {'a'}; int err; struct usb_link_device *usb_ld = to_usb_link_device(ld); struct link_pm_data *pm_data = usb_ld->link_pm_data; struct device *dev = &usb_ld->usbdev->dev; struct if_usb_devdata *pipe_data = &usb_ld->devdata[IF_USB_FMT_EP]; if (!usb_ld->if_usb_connected) { mif_err("HSIC not connected, skip start ipc\n"); err = -ENODEV; goto exit; } retry: if (ld->mc->phone_state != STATE_ONLINE) { mif_err("MODEM is not online, skip start ipc\n"); err = -ENODEV; goto exit; } /* check usb runtime pm first */ if (dev->power.runtime_status != RPM_ACTIVE) { if (!pm_data->resume_requested) { mif_debug("QW PM\n"); INIT_COMPLETION(pm_data->active_done); queue_delayed_work(pm_data->wq, &pm_data->link_pm_work, 0); } mif_debug("Wait pm\n"); err = wait_for_completion_timeout(&pm_data->active_done, msecs_to_jiffies(500)); /* timeout or -ERESTARTSYS */ if (err <= 0) goto retry; } pm_runtime_get_sync(dev); mif_err("send 'a'\n"); skb = alloc_skb(16, GFP_ATOMIC); if (unlikely(!skb)) { pm_runtime_put(dev); return -ENOMEM; } memcpy(skb_put(skb, 1), data, 1); skbpriv(skb)->iod = iod; skbpriv(skb)->ld = ld; if (!usb_ld->if_usb_connected || !usb_ld->usbdev) return -ENODEV; usb_mark_last_busy(usb_ld->usbdev); err = usb_tx_urb_with_skb(usb_ld->usbdev, skb, pipe_data); if (err < 0) { mif_err("usb_tx_urb fail\n"); dev_kfree_skb_any(skb); } pm_runtime_put(dev); exit: return err; }
static int __issue_avs_command(struct private_data *priv, int cmd, bool is_send, u32 args[]) { unsigned long time_left = msecs_to_jiffies(AVS_TIMEOUT); void __iomem *base = priv->base; unsigned int i; int ret; u32 val; ret = down_interruptible(&priv->sem); if (ret) return ret; /* * Make sure no other command is currently running: cmd is 0 if AVS * co-processor is idle. Due to the guard above, we should almost never * have to wait here. */ for (i = 0, val = 1; val != 0 && i < AVS_LOOP_LIMIT; i++) val = readl(base + AVS_MBOX_COMMAND); /* Give the caller a chance to retry if AVS is busy. */ if (i == AVS_LOOP_LIMIT) { ret = -EAGAIN; goto out; } /* Clear status before we begin. */ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS); /* We need to send arguments for this command. */ if (args && is_send) { for (i = 0; i < AVS_MAX_CMD_ARGS; i++) writel(args[i], base + AVS_MBOX_PARAM(i)); } /* Protect from spurious interrupts. */ reinit_completion(&priv->done); /* Now issue the command & tell firmware to wake up to process it. */ writel(cmd, base + AVS_MBOX_COMMAND); writel(AVS_CPU_L2_INT_MASK, priv->avs_intr_base + AVS_CPU_L2_SET0); /* Wait for AVS co-processor to finish processing the command. */ time_left = wait_for_completion_timeout(&priv->done, time_left); /* * If the AVS status is not in the expected range, it means AVS didn't * complete our command in time, and we return an error. Also, if there * is no "time left", we timed out waiting for the interrupt. */ val = readl(base + AVS_MBOX_STATUS); if (time_left == 0 || val == 0 || val > AVS_STATUS_MAX) { dev_err(priv->dev, "AVS command %#x didn't complete in time\n", cmd); dev_err(priv->dev, " Time left: %u ms, AVS status: %#x\n", jiffies_to_msecs(time_left), val); ret = -ETIMEDOUT; goto out; } /* This command returned arguments, so we read them back. */ if (args && !is_send) { for (i = 0; i < AVS_MAX_CMD_ARGS; i++) args[i] = readl(base + AVS_MBOX_PARAM(i)); } /* Clear status to tell AVS co-processor we are done. */ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS); /* Convert firmware errors to errno's as much as possible. */ switch (val) { case AVS_STATUS_INVALID: ret = -EINVAL; break; case AVS_STATUS_NO_SUPP: ret = -ENOTSUPP; break; case AVS_STATUS_NO_MAP: ret = -ENOENT; break; case AVS_STATUS_MAP_SET: ret = -EEXIST; break; case AVS_STATUS_FAILURE: ret = -EIO; break; } out: up(&priv->sem); return ret; }
static int lsm330dlc_gyro_fifo_self_test(struct lsm330dlc_gyro_data *data,\ u8 *cal_pass, s16 *zero_rate_data) { struct gyro_t raw_data; int err; int i, j; s16 raw[3] = {0,}, zero_rate_delta[3] = {0,}; int sum_raw[3] = {0,}; bool zero_rate_read_2nd = false; u8 reg[5]; u8 fifo_pass = 2; u8 status_reg; struct file *cal_filp = NULL; mm_segment_t old_fs; /* fifo mode, enable interrupt, 500dps */ reg[0] = 0x6F; reg[1] = 0x00; reg[2] = 0x04; reg[3] = 0x90; reg[4] = 0x40; for (i = 0; i < 10; i++) { err = i2c_smbus_write_i2c_block_data(data->client, CTRL_REG1 | AC, sizeof(reg), reg); if (err >= 0) break; } if (err < 0) { pr_err("%s: CTRL_REGs i2c writing failed\n", __func__); goto exit; } /* Power up, wait for 800ms for stable output */ msleep(800); read_zero_rate_again: for (i = 0; i < 10; i++) { err = i2c_smbus_write_byte_data(data->client, FIFO_CTRL_REG, BYPASS_MODE); if (err >= 0) break; } if (err < 0) { pr_err("%s : failed to set bypass_mode\n", __func__); goto exit; } for (i = 0; i < 10; i++) { err = i2c_smbus_write_byte_data(data->client, FIFO_CTRL_REG, FIFO_MODE | FIFO_TEST_WTM); if (err >= 0) break; } if (err < 0) { pr_err("%s: failed to set fifo_mode\n", __func__); goto exit; } /* if interrupt mode */ if (!data->enable && data->interruptible) { enable_irq(data->client->irq); err = wait_for_completion_timeout(&data->data_ready, 5*HZ); msleep(200); if (err <= 0) { disable_irq(data->client->irq); if (!err) pr_err("%s: wait timed out\n", __func__); goto exit; } /* if polling mode */ } else msleep(200); /* check out watermark status */ status_reg = i2c_smbus_read_byte_data(data->client, FIFO_SRC_REG); if (!(status_reg & 0x80)) { pr_err("%s: Watermark level is not enough(0x%x)\n", __func__, status_reg); goto exit; } /* read fifo entries */ err = lsm330dlc_gyro_read_values(data->client, &raw_data, FIFO_TEST_WTM + 2); if (err < 0) { pr_err("%s: lsm330dlc_gyro_read_values() failed\n", __func__); goto exit; } /* print out fifo data */ printk(KERN_INFO "[gyro_self_test] fifo data\n"); for (i = 0; i < sizeof(raw_data) * (FIFO_TEST_WTM + 1); i += sizeof(raw_data)) { raw[0] = (data->fifo_data[i+1] << 8) | data->fifo_data[i]; raw[1] = (data->fifo_data[i+3] << 8) | data->fifo_data[i+2]; raw[2] = (data->fifo_data[i+5] << 8) | data->fifo_data[i+4]; pr_info("%2dth: %8d %8d %8d\n", i/6, raw[0], raw[1], raw[2]); /* for calibration of gyro sensor data */ sum_raw[0] += raw[0]; sum_raw[1] += raw[1]; sum_raw[2] += raw[2]; for (j = 0; j < 3; j++) { if (raw[j] < MIN_ZERO_RATE || raw[j] > MAX_ZERO_RATE) { pr_err("%s: %dth data(%d) is out of zero-rate", __func__, i/6, raw[j]); pr_err("%s: fifo test failed\n", __func__); fifo_pass = 0; *cal_pass = 0; goto exit; } } } /* zero_rate_data */ zero_rate_data[0] = raw[0] * 175 / 10000; zero_rate_data[1] = raw[1] * 175 / 10000; zero_rate_data[2] = raw[2] * 175 / 10000; if (zero_rate_read_2nd == true) { /* check zero_rate second time */ zero_rate_delta[0] -= zero_rate_data[0]; zero_rate_delta[1] -= zero_rate_data[1]; zero_rate_delta[2] -= zero_rate_data[2]; pr_info("[gyro_self_test] zero rate second: %8d %8d %8d\n", zero_rate_data[0], zero_rate_data[1], zero_rate_data[2]); pr_info("[gyro_self_test] zero rate delta: %8d %8d %8d\n", zero_rate_delta[0], zero_rate_delta[1], zero_rate_delta[2]); if ((-5 < zero_rate_delta[0] && zero_rate_delta[0] < 5) && (-5 < zero_rate_delta[1] && zero_rate_delta[1] < 5) && (-5 < zero_rate_delta[2] && zero_rate_delta[2] < 5)) { /* calibration of gyro sensor data */ data->cal_data.x = sum_raw[0]/(FIFO_TEST_WTM + 1); data->cal_data.y = sum_raw[1]/(FIFO_TEST_WTM + 1); data->cal_data.z = sum_raw[2]/(FIFO_TEST_WTM + 1); pr_info("%s: cal data (%d,%d,%d)\n", __func__, data->cal_data.x, data->cal_data.y, data->cal_data.z); /* save cal data */ old_fs = get_fs(); set_fs(KERNEL_DS); cal_filp = filp_open(CALIBRATION_FILE_PATH, O_CREAT | O_TRUNC | O_WRONLY, 0666); if (IS_ERR(cal_filp)) { pr_err("%s: Can't open calibration file\n", __func__); set_fs(old_fs); err = PTR_ERR(cal_filp); return err; } err = cal_filp->f_op->write(cal_filp, (char *)&data->cal_data, 3 * sizeof(s16), &cal_filp->f_pos); if (err != 3 * sizeof(s16)) { pr_err("%s: Can't write the cal data to file\n", __func__); err = -EIO; } filp_close(cal_filp, current->files); set_fs(old_fs); *cal_pass = 1; } /* else calibration is failed */ } else { /* check zero_rate first time, go to check again */ zero_rate_read_2nd = true; sum_raw[0] = 0; sum_raw[1] = 0; sum_raw[2] = 0; zero_rate_delta[0] = zero_rate_data[0]; zero_rate_delta[1] = zero_rate_data[1]; zero_rate_delta[2] = zero_rate_data[2]; pr_info("[gyro_self_test] zero rate first: %8d %8d %8d\n", zero_rate_data[0], zero_rate_data[1], zero_rate_data[2]); goto read_zero_rate_again; } fifo_pass = 1; exit: /* 1: success, 0: fail, 2: retry */ return fifo_pass; }
static int at91_do_twi_transfer(struct at91_twi_dev *dev) { int ret; unsigned long time_left; bool has_unre_flag = dev->pdata->has_unre_flag; bool has_alt_cmd = dev->pdata->has_alt_cmd; /* * WARNING: the TXCOMP bit in the Status Register is NOT a clear on * read flag but shows the state of the transmission at the time the * Status Register is read. According to the programmer datasheet, * TXCOMP is set when both holding register and internal shifter are * empty and STOP condition has been sent. * Consequently, we should enable NACK interrupt rather than TXCOMP to * detect transmission failure. * Indeed let's take the case of an i2c write command using DMA. * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and * TXCOMP bits are set together into the Status Register. * LOCK is a clear on write bit, which is set to prevent the DMA * controller from sending new data on the i2c bus after a NACK * condition has happened. Once locked, this i2c peripheral stops * triggering the DMA controller for new data but it is more than * likely that a new DMA transaction is already in progress, writing * into the Transmit Holding Register. Since the peripheral is locked, * these new data won't be sent to the i2c bus but they will remain * into the Transmit Holding Register, so TXCOMP bit is cleared. * Then when the interrupt handler is called, the Status Register is * read: the TXCOMP bit is clear but NACK bit is still set. The driver * manage the error properly, without waiting for timeout. * This case can be reproduced easyly when writing into an at24 eeprom. * * Besides, the TXCOMP bit is already set before the i2c transaction * has been started. For read transactions, this bit is cleared when * writing the START bit into the Control Register. So the * corresponding interrupt can safely be enabled just after. * However for write transactions managed by the CPU, we first write * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP * interrupt. If TXCOMP interrupt were enabled before writing into THR, * the interrupt handler would be called immediately and the i2c command * would be reported as completed. * Also when a write transaction is managed by the DMA controller, * enabling the TXCOMP interrupt in this function may lead to a race * condition since we don't know whether the TXCOMP interrupt is enabled * before or after the DMA has started to write into THR. So the TXCOMP * interrupt is enabled later by at91_twi_write_data_dma_callback(). * Immediately after in that DMA callback, if the alternative command * mode is not used, we still need to send the STOP condition manually * writing the corresponding bit into the Control Register. */ dev_dbg(dev->dev, "transfer: %s %zu bytes.\n", (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len); reinit_completion(&dev->cmd_complete); dev->transfer_status = 0; /* Clear pending interrupts, such as NACK. */ at91_twi_read(dev, AT91_TWI_SR); if (dev->fifo_size) { unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR); /* Reset FIFO mode register */ fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK | AT91_TWI_FMR_RXRDYM_MASK); fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA); fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA); at91_twi_write(dev, AT91_TWI_FMR, fifo_mr); /* Flush FIFOs */ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_THRCLR | AT91_TWI_RHRCLR); } if (!dev->buf_len) { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); } else if (dev->msg->flags & I2C_M_RD) { unsigned start_flags = AT91_TWI_START; /* if only one byte is to be read, immediately stop transfer */ if (!dev->use_alt_cmd && dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN)) start_flags |= AT91_TWI_STOP; at91_twi_write(dev, AT91_TWI_CR, start_flags); /* * When using dma without alternative command mode, the last * byte has to be read manually in order to not send the stop * command too late and then to receive extra data. * In practice, there are some issues if you use the dma to * read n-1 bytes because of latency. * Reading n-2 bytes with dma and the two last ones manually * seems to be the best solution. */ if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) { at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK); at91_twi_read_data_dma(dev); } else { at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP | AT91_TWI_NACK | AT91_TWI_RXRDY); } } else { if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) { at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK); at91_twi_write_data_dma(dev); } else { at91_twi_write_next_byte(dev); at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP | AT91_TWI_NACK | AT91_TWI_TXRDY); } } time_left = wait_for_completion_timeout(&dev->cmd_complete, dev->adapter.timeout); if (time_left == 0) { dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR); dev_err(dev->dev, "controller timed out\n"); at91_init_twi_bus(dev); ret = -ETIMEDOUT; goto error; } if (dev->transfer_status & AT91_TWI_NACK) { dev_dbg(dev->dev, "received nack\n"); ret = -EREMOTEIO; goto error; } if (dev->transfer_status & AT91_TWI_OVRE) { dev_err(dev->dev, "overrun while reading\n"); ret = -EIO; goto error; } if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) { dev_err(dev->dev, "underrun while writing\n"); ret = -EIO; goto error; } if ((has_alt_cmd || dev->fifo_size) && (dev->transfer_status & AT91_TWI_LOCK)) { dev_err(dev->dev, "tx locked\n"); ret = -EIO; goto error; } if (dev->recv_len_abort) { dev_err(dev->dev, "invalid smbus block length recvd\n"); ret = -EPROTO; goto error; } dev_dbg(dev->dev, "transfer complete\n"); return 0; error: /* first stop DMA transfer if still in progress */ at91_twi_dma_cleanup(dev); /* then flush THR/FIFO and unlock TX if locked */ if ((has_alt_cmd || dev->fifo_size) && (dev->transfer_status & AT91_TWI_LOCK)) { dev_dbg(dev->dev, "unlock tx\n"); at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_THRCLR | AT91_TWI_LOCKCLR); } return ret; }
static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl, struct dsi_buf *tp) { int len, ret = 0; int domain = MDSS_IOMMU_DOMAIN_UNSECURE; char *bp; unsigned long size, addr; bp = tp->data; len = ALIGN(tp->len, 4); size = ALIGN(tp->len, SZ_4K); if (is_mdss_iommu_attached()) { int ret = msm_iommu_map_contig_buffer(tp->dmap, mdss_get_iommu_domain(domain), 0, size, SZ_4K, 0, &(addr)); if (IS_ERR_VALUE(ret)) { pr_err("unable to map dma memory to iommu(%d)\n", ret); return -ENOMEM; } } else { addr = tp->dmap; } INIT_COMPLETION(ctrl->dma_comp); if (ctrl->shared_pdata.broadcast_enable) if ((ctrl->ndx == DSI_CTRL_1) && (left_ctrl_pdata != NULL)) { MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x048, addr); MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x04c, len); } MIPI_OUTP((ctrl->ctrl_base) + 0x048, addr); MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len); wmb(); if (ctrl->shared_pdata.broadcast_enable) if ((ctrl->ndx == DSI_CTRL_1) && (left_ctrl_pdata != NULL)) { MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x090, 0x01); } MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01); /* trigger */ wmb(); ret = wait_for_completion_timeout(&ctrl->dma_comp, msecs_to_jiffies(DMA_TX_TIMEOUT)); if (ret == 0) ret = -ETIMEDOUT; else ret = tp->len; if (is_mdss_iommu_attached()) msm_iommu_unmap_contig_buffer(addr, mdss_get_iommu_domain(domain), 0, size); return ret; }
static inline int xfer_read(struct i2c_jz *i2c, unsigned char *buf, int len, enum msg_end_type end_type) { int ret = 0; long timeout; unsigned short tmp; unsigned int wait_complete_timeout_ms; wait_complete_timeout_ms = len * 1000 * 9 * 2 / i2c->rate + CONFIG_I2C_JZV10_WAIT_MS; #ifdef CONFIG_I2C_DEBUG_INFO if (i2c->debug > DEBUG_WARN) dev_info(&(i2c->adap.dev), "%s, Begin read msg, want to read length is %d\n", __func__, len); #endif memset(buf, 0, len); i2c->rd_len = len; i2c->len = len; i2c->rbuf = buf; i2c->r_end_type = end_type; i2c_readl(i2c, I2C_CSTP); /* clear STP bit */ i2c_readl(i2c, I2C_CTXOF); /* clear TXOF bit */ i2c_readl(i2c, I2C_CTXABRT); /* clear TXABRT bit */ if (len <= I2C_FIFO_LEN) { i2c_writel(i2c, I2C_RXTL, len - 1); } else { i2c_writel(i2c, I2C_RXTL, RX_LEVEL); } while (i2c_readl(i2c, I2C_STA) & I2C_STA_RFNE) { i2c_readl(i2c, I2C_DC); } if (i2c_send_rcmd(i2c)) BUG(); tmp = I2C_INTM_MRXFL | I2C_INTM_MTXABT; if (end_type == MSG_END_STOP) tmp |= I2C_INTM_MISTP; i2c_writel(i2c, I2C_INTM, tmp); timeout = wait_for_completion_timeout(&i2c->complete, msecs_to_jiffies (wait_complete_timeout_ms)); if (!timeout) { dev_err(&(i2c->adap.dev), "--I2C irq read timeout\n"); #ifdef I2C_DEBUG i2c_jz_dump_regs(i2c); #endif ret = -ETIMEDOUT; } tmp = i2c_readl(i2c, I2C_TXABRT); if (tmp) { txabrt(i2c, tmp); if (tmp > 0x1 && tmp < 0x10) ret = -ENXIO; else ret = -EIO; // ABRT_GCALL_READ if (tmp & (1 << 5)) { ret = -EAGAIN; } i2c_readl(i2c, I2C_CTXABRT); } if (ret < 0) i2c_jz_reset(i2c); #ifdef CONFIG_I2C_DEBUG_INFO if (i2c->debug > DEBUG_WARN) dev_info(&(i2c->adap.dev), "%s, Reading msg over\n", __func__); #endif return ret; }
int arizona_set_fll(struct arizona_fll *fll, int source, unsigned int Fref, unsigned int Fout) { struct arizona *arizona = fll->arizona; struct arizona_fll_cfg cfg, sync; unsigned int reg, val; int syncsrc; bool ena; int ret; ret = regmap_read(arizona->regmap, fll->base + 1, ®); if (ret != 0) { arizona_fll_err(fll, "Failed to read current state: %d\n", ret); return ret; } ena = reg & ARIZONA_FLL1_ENA; if (Fout) { /* Do we have a 32kHz reference? */ regmap_read(arizona->regmap, ARIZONA_CLOCK_32K_1, &val); switch (val & ARIZONA_CLK_32K_SRC_MASK) { case ARIZONA_CLK_SRC_MCLK1: case ARIZONA_CLK_SRC_MCLK2: syncsrc = val & ARIZONA_CLK_32K_SRC_MASK; break; default: syncsrc = -1; } if (source == syncsrc) syncsrc = -1; if (syncsrc >= 0) { ret = arizona_calc_fll(fll, &sync, Fref, Fout); if (ret != 0) return ret; ret = arizona_calc_fll(fll, &cfg, 32768, Fout); if (ret != 0) return ret; } else { ret = arizona_calc_fll(fll, &cfg, Fref, Fout); if (ret != 0) return ret; } } else { regmap_update_bits(arizona->regmap, fll->base + 1, ARIZONA_FLL1_ENA, 0); regmap_update_bits(arizona->regmap, fll->base + 0x11, ARIZONA_FLL1_SYNC_ENA, 0); if (ena) pm_runtime_put_autosuspend(arizona->dev); return 0; } regmap_update_bits(arizona->regmap, fll->base + 5, ARIZONA_FLL1_OUTDIV_MASK, cfg.outdiv << ARIZONA_FLL1_OUTDIV_SHIFT); if (syncsrc >= 0) { arizona_apply_fll(arizona, fll->base, &cfg, syncsrc); arizona_apply_fll(arizona, fll->base + 0x10, &sync, source); } else { arizona_apply_fll(arizona, fll->base, &cfg, source); } if (!ena) pm_runtime_get(arizona->dev); /* Clear any pending completions */ try_wait_for_completion(&fll->ok); regmap_update_bits(arizona->regmap, fll->base + 1, ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA); if (syncsrc >= 0) regmap_update_bits(arizona->regmap, fll->base + 0x11, ARIZONA_FLL1_SYNC_ENA, ARIZONA_FLL1_SYNC_ENA); ret = wait_for_completion_timeout(&fll->ok, msecs_to_jiffies(25)); if (ret == 0) arizona_fll_warn(fll, "Timed out waiting for lock\n"); return 0; }
/* * This function implements the USB_PORT_FEAT_TEST handling of the * SINGLE_STEP_SET_FEATURE test mode as defined in the Embedded * High-Speed Electrical Test (EHSET) specification. This simply * issues a GetDescriptor control transfer, with an inserted 15-second * delay after the end of the SETUP stage and before the IN token of * the DATA stage is set. The idea is that this gives the test operator * enough time to configure the oscilloscope to perform a measurement * of the response time between the DATA and ACK packets that follow. */ static int xhci_ehset_single_step_set_feature(struct usb_hcd *hcd, int port) { int retval = -ENOMEM; struct usb_ctrlrequest *dr; struct urb *urb; struct usb_device *udev; struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct usb_device_descriptor *buf; unsigned long flags; DECLARE_COMPLETION_ONSTACK(done); /* Obtain udev of the rhub's child port */ udev = usb_hub_find_child(hcd->self.root_hub, port); if (!udev) { xhci_err(xhci, "No device attached to the RootHub\n"); return -ENODEV; } buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!dr) { kfree(buf); return -ENOMEM; } /* Fill Setup packet for GetDescriptor */ dr->bRequestType = USB_DIR_IN; dr->bRequest = USB_REQ_GET_DESCRIPTOR; dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8); dr->wIndex = 0; dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE); urb = xhci_request_single_step_set_feature_urb(udev, dr, buf, &done); if (!urb) goto cleanup; /* Now complete just the SETUP stage */ spin_lock_irqsave(&xhci->lock, flags); retval = xhci_submit_single_step_set_feature(hcd, urb, 1); spin_unlock_irqrestore(&xhci->lock, flags); if (retval) goto out1; if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) { usb_kill_urb(urb); retval = -ETIMEDOUT; xhci_err(xhci, "%s SETUP stage timed out on ep0\n", __func__); goto out1; } /* Sleep for 15 seconds; HC will send SOFs during this period */ msleep(15 * 1000); /* Complete remaining DATA and status stages. Re-use same URB */ urb->status = -EINPROGRESS; usb_get_urb(urb); atomic_inc(&urb->use_count); atomic_inc(&urb->dev->urbnum); spin_lock_irqsave(&xhci->lock, flags); retval = xhci_submit_single_step_set_feature(hcd, urb, 0); spin_unlock_irqrestore(&xhci->lock, flags); if (!retval && !wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) { usb_kill_urb(urb); retval = -ETIMEDOUT; xhci_err(xhci, "%s IN stage timed out on ep0\n", __func__); } out1: usb_free_urb(urb); cleanup: kfree(dr); kfree(buf); return retval; }
static int omap2_onenand_wait(struct mtd_info *mtd, int state) { struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); unsigned int intr = 0; unsigned int ctrl; unsigned long timeout; u32 syscfg; if (state == FL_RESETING) { int i; for (i = 0; i < 20; i++) { udelay(1); intr = read_reg(c, ONENAND_REG_INTERRUPT); if (intr & ONENAND_INT_MASTER) break; } ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); if (ctrl & ONENAND_CTRL_ERROR) { wait_err("controller error", state, ctrl, intr); return -EIO; } if (!(intr & ONENAND_INT_RESET)) { wait_err("timeout", state, ctrl, intr); return -EIO; } return 0; } if (state != FL_READING) { int result; /* Turn interrupts on */ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) { syscfg |= ONENAND_SYS_CFG1_IOBE; write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); if (cpu_is_omap34xx()) /* Add a delay to let GPIO settle */ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); } INIT_COMPLETION(c->irq_done); if (c->gpio_irq) { result = gpio_get_value(c->gpio_irq); if (result == -1) { ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); intr = read_reg(c, ONENAND_REG_INTERRUPT); wait_err("gpio error", state, ctrl, intr); return -EIO; } } else result = 0; if (result == 0) { int retry_cnt = 0; retry: result = wait_for_completion_timeout(&c->irq_done, msecs_to_jiffies(20)); if (result == 0) { /* Timeout after 20ms */ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); if (ctrl & ONENAND_CTRL_ONGO) { /* * The operation seems to be still going * so give it some more time. */ retry_cnt += 1; if (retry_cnt < 3) goto retry; intr = read_reg(c, ONENAND_REG_INTERRUPT); wait_err("timeout", state, ctrl, intr); return -EIO; } intr = read_reg(c, ONENAND_REG_INTERRUPT); if ((intr & ONENAND_INT_MASTER) == 0) wait_warn("timeout", state, ctrl, intr); } } } else { int retry_cnt = 0; /* Turn interrupts off */ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); syscfg &= ~ONENAND_SYS_CFG1_IOBE; write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); timeout = jiffies + msecs_to_jiffies(20); while (1) { if (time_before(jiffies, timeout)) { intr = read_reg(c, ONENAND_REG_INTERRUPT); if (intr & ONENAND_INT_MASTER) break; } else { /* Timeout after 20ms */ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); if (ctrl & ONENAND_CTRL_ONGO) { /* * The operation seems to be still going * so give it some more time. */ retry_cnt += 1; if (retry_cnt < 3) { timeout = jiffies + msecs_to_jiffies(20); continue; } } break; } } } intr = read_reg(c, ONENAND_REG_INTERRUPT); ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); if (intr & ONENAND_INT_READ) { int ecc = read_reg(c, ONENAND_REG_ECC_STATUS); if (ecc) { unsigned int addr1, addr8; addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1); addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8); if (ecc & ONENAND_ECC_2BIT_ALL) { printk(KERN_ERR "onenand_wait: ECC error = " "0x%04x, addr1 %#x, addr8 %#x\n", ecc, addr1, addr8); mtd->ecc_stats.failed++; return -EBADMSG; } else if (ecc & ONENAND_ECC_1BIT_ALL) { printk(KERN_NOTICE "onenand_wait: correctable " "ECC error = 0x%04x, addr1 %#x, " "addr8 %#x\n", ecc, addr1, addr8); mtd->ecc_stats.corrected++; } } } else if (state == FL_READING) { wait_err("timeout", state, ctrl, intr); return -EIO; } if (ctrl & ONENAND_CTRL_ERROR) { wait_err("controller error", state, ctrl, intr); if (ctrl & ONENAND_CTRL_LOCK) printk(KERN_ERR "onenand_wait: " "Device is write protected!!!\n"); return -EIO; } if (ctrl & 0xFE9F) wait_warn("unexpected controller status", state, ctrl, intr); return 0; }
static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap, struct i2c_msg *msg, bool stop) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); bool is_read = msg->flags & I2C_M_RD; unsigned long time_left; dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, stop=%d\n", is_read ? "receive" : "transmit", msg->addr, msg->len, stop); priv->len = msg->len; priv->buf = msg->buf; priv->enabled_irqs = UNIPHIER_FI2C_INT_FAULTS; priv->error = 0; priv->flags = 0; if (stop) priv->flags |= UNIPHIER_FI2C_STOP; reinit_completion(&priv->comp); uniphier_fi2c_clear_irqs(priv); writel(UNIPHIER_FI2C_RST_TBRST | UNIPHIER_FI2C_RST_RBRST, priv->membase + UNIPHIER_FI2C_RST); /* reset TX/RX FIFO */ if (is_read) uniphier_fi2c_rx_init(priv, msg->addr); else uniphier_fi2c_tx_init(priv, msg->addr); uniphier_fi2c_set_irqs(priv); dev_dbg(&adap->dev, "start condition\n"); writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA, priv->membase + UNIPHIER_FI2C_CR); time_left = wait_for_completion_timeout(&priv->comp, adap->timeout); if (!time_left) { dev_err(&adap->dev, "transaction timeout.\n"); uniphier_fi2c_recover(priv); return -ETIMEDOUT; } dev_dbg(&adap->dev, "complete\n"); if (unlikely(priv->flags & UNIPHIER_FI2C_DEFER_STOP_COMP)) { u32 status; int ret; ret = readl_poll_timeout(priv->membase + UNIPHIER_FI2C_SR, status, (status & UNIPHIER_FI2C_SR_STS) && !(status & UNIPHIER_FI2C_SR_BB), 1, 20); if (ret) { dev_err(&adap->dev, "stop condition was not completed.\n"); uniphier_fi2c_recover(priv); return ret; } } return priv->error; }
static int pm8xxx_read_channel_rsv(struct pm8xxx_xoadc *adc, const struct pm8xxx_chan_info *ch, u8 rsv, u16 *adc_code, bool force_ratiometric) { int ret; unsigned int val; u8 rsvmask, rsvval; u8 lsb, msb; dev_dbg(adc->dev, "read channel \"%s\", amux %d, prescale/mux: %d, rsv %d\n", ch->name, ch->hwchan->amux_channel, ch->hwchan->pre_scale_mux, rsv); mutex_lock(&adc->lock); /* Mux in this channel */ val = ch->hwchan->amux_channel << ADC_AMUX_SEL_SHIFT; val |= ch->hwchan->pre_scale_mux << ADC_AMUX_PREMUX_SHIFT; ret = regmap_write(adc->map, ADC_ARB_USRP_AMUX_CNTRL, val); if (ret) goto unlock; /* Set up ratiometric scale value, mask off all bits except these */ rsvmask = (ADC_ARB_USRP_RSV_RST | ADC_ARB_USRP_RSV_DTEST0 | ADC_ARB_USRP_RSV_DTEST1 | ADC_ARB_USRP_RSV_OP); if (adc->variant->broken_ratiometric && !force_ratiometric) { /* * Apparently the PM8058 has some kind of bug which is * reflected in the vendor tree drivers/misc/pmix8058-xoadc.c * which just hardcodes the RSV selector to SEL1 (0x20) for * most cases and SEL0 (0x10) for the MUXOFF channel only. * If we force ratiometric (currently only done when attempting * to do ratiometric calibration) this doesn't seem to work * very well and I suspect ratiometric conversion is simply * broken or not supported on the PM8058. * * Maybe IO_SEL2 doesn't exist on PM8058 and bits 4 & 5 select * the mode alone. * * Some PM8058 register documentation would be nice to get * this right. */ if (ch->hwchan->amux_channel == PM8XXX_CHANNEL_MUXOFF) rsvval = ADC_ARB_USRP_RSV_IP_SEL0; else rsvval = ADC_ARB_USRP_RSV_IP_SEL1; } else { if (rsv == 0xff) rsvval = (ch->amux_ip_rsv << ADC_RSV_IP_SEL_SHIFT) | ADC_ARB_USRP_RSV_TRM; else rsvval = (rsv << ADC_RSV_IP_SEL_SHIFT) | ADC_ARB_USRP_RSV_TRM; } ret = regmap_update_bits(adc->map, ADC_ARB_USRP_RSV, ~rsvmask, rsvval); if (ret) goto unlock; ret = regmap_write(adc->map, ADC_ARB_USRP_ANA_PARAM, ADC_ARB_USRP_ANA_PARAM_DIS); if (ret) goto unlock; /* Decimation factor */ ret = regmap_write(adc->map, ADC_ARB_USRP_DIG_PARAM, ADC_ARB_USRP_DIG_PARAM_SEL_SHIFT0 | ADC_ARB_USRP_DIG_PARAM_SEL_SHIFT1 | ch->decimation << ADC_DIG_PARAM_DEC_SHIFT); if (ret) goto unlock; ret = regmap_write(adc->map, ADC_ARB_USRP_ANA_PARAM, ADC_ARB_USRP_ANA_PARAM_EN); if (ret) goto unlock; /* Enable the arbiter, the Qualcomm code does it twice like this */ ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, ADC_ARB_USRP_CNTRL_EN_ARB); if (ret) goto unlock; ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, ADC_ARB_USRP_CNTRL_EN_ARB); if (ret) goto unlock; /* Fire a request! */ reinit_completion(&adc->complete); ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, ADC_ARB_USRP_CNTRL_EN_ARB | ADC_ARB_USRP_CNTRL_REQ); if (ret) goto unlock; /* Next the interrupt occurs */ ret = wait_for_completion_timeout(&adc->complete, VADC_CONV_TIME_MAX_US); if (!ret) { dev_err(adc->dev, "conversion timed out\n"); ret = -ETIMEDOUT; goto unlock; } ret = regmap_read(adc->map, ADC_ARB_USRP_DATA0, &val); if (ret) goto unlock; lsb = val; ret = regmap_read(adc->map, ADC_ARB_USRP_DATA1, &val); if (ret) goto unlock; msb = val; *adc_code = (msb << 8) | lsb; /* Turn off the ADC by setting the arbiter to 0 twice */ ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, 0); if (ret) goto unlock; ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, 0); if (ret) goto unlock; unlock: mutex_unlock(&adc->lock); return ret; }
static int nfcwilink_open(struct nci_dev *ndev) { struct nfcwilink *drv = nci_get_drvdata(ndev); unsigned long comp_ret; int rc; if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) { rc = -EBUSY; goto exit; } nfcwilink_proto.priv_data = drv; init_completion(&drv->completed); drv->st_register_cb_status = -EINPROGRESS; rc = st_register(&nfcwilink_proto); if (rc < 0) { if (rc == -EINPROGRESS) { comp_ret = wait_for_completion_timeout( &drv->completed, msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT)); dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n", comp_ret); if (comp_ret == 0) { /* timeout */ rc = -ETIMEDOUT; goto clear_exit; } else if (drv->st_register_cb_status != 0) { rc = drv->st_register_cb_status; nfc_err(&drv->pdev->dev, "st_register_cb failed %d\n", rc); goto clear_exit; } } else { nfc_err(&drv->pdev->dev, "st_register failed %d\n", rc); goto clear_exit; } } /* st_register MUST fill the write callback */ BUG_ON(nfcwilink_proto.write == NULL); drv->st_write = nfcwilink_proto.write; if (nfcwilink_download_fw(drv)) { nfc_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d\n", rc); /* open should succeed, even if the FW download failed */ } goto exit; clear_exit: clear_bit(NFCWILINK_RUNNING, &drv->flags); exit: return rc; }
int ath10k_htc_connect_service(struct ath10k_htc *htc, struct ath10k_htc_svc_conn_req *conn_req, struct ath10k_htc_svc_conn_resp *conn_resp) { struct ath10k_htc_msg *msg; struct ath10k_htc_conn_svc *req_msg; struct ath10k_htc_conn_svc_response resp_msg_dummy; struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy; enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT; struct ath10k_htc_ep *ep; struct sk_buff *skb; unsigned int max_msg_size = 0; int length, status; bool disable_credit_flow_ctrl = false; u16 message_id, service_id, flags = 0; u8 tx_alloc = 0; /* special case for HTC pseudo control service */ if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) { disable_credit_flow_ctrl = true; assigned_eid = ATH10K_HTC_EP_0; max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN; memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy)); goto setup; } tx_alloc = ath10k_htc_get_credit_allocation(htc, conn_req->service_id); if (!tx_alloc) ath10k_dbg(ATH10K_DBG_BOOT, "boot htc service %s does not allocate target credits\n", htc_service_name(conn_req->service_id)); skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); if (!skb) { ath10k_err("Failed to allocate HTC packet\n"); return -ENOMEM; } length = sizeof(msg->hdr) + sizeof(msg->connect_service); skb_put(skb, length); memset(skb->data, 0, length); msg = (struct ath10k_htc_msg *)skb->data; msg->hdr.message_id = __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID); flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC); /* Only enable credit flow control for WMI ctrl service */ if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) { flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; disable_credit_flow_ctrl = true; } req_msg = &msg->connect_service; req_msg->flags = __cpu_to_le16(flags); req_msg->service_id = __cpu_to_le16(conn_req->service_id); INIT_COMPLETION(htc->ctl_resp); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); if (status) { kfree_skb(skb); return status; } /* wait for response */ status = wait_for_completion_timeout(&htc->ctl_resp, ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); if (status <= 0) { if (status == 0) status = -ETIMEDOUT; ath10k_err("Service connect timeout: %d\n", status); return status; } /* we controlled the buffer creation, it's aligned */ msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; resp_msg = &msg->connect_service_response; message_id = __le16_to_cpu(msg->hdr.message_id); service_id = __le16_to_cpu(resp_msg->service_id); if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) || (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->connect_service_response))) { ath10k_err("Invalid resp message ID 0x%x", message_id); return -EPROTO; } ath10k_dbg(ATH10K_DBG_HTC, "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n", htc_service_name(service_id), resp_msg->status, resp_msg->eid); conn_resp->connect_resp_code = resp_msg->status; /* check response status */ if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) { ath10k_err("HTC Service %s connect request failed: 0x%x)\n", htc_service_name(service_id), resp_msg->status); return -EPROTO; } assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid; max_msg_size = __le16_to_cpu(resp_msg->max_msg_size); setup: if (assigned_eid >= ATH10K_HTC_EP_COUNT) return -EPROTO; if (max_msg_size == 0) return -EPROTO; ep = &htc->endpoint[assigned_eid]; ep->eid = assigned_eid; if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED) return -EPROTO; /* return assigned endpoint to caller */ conn_resp->eid = assigned_eid; conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size); /* setup the endpoint */ ep->service_id = conn_req->service_id; ep->max_tx_queue_depth = conn_req->max_send_queue_depth; ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size); ep->tx_credits = tx_alloc; ep->tx_credit_size = htc->target_credit_size; ep->tx_credits_per_max_message = ep->max_ep_message_len / htc->target_credit_size; if (ep->max_ep_message_len % htc->target_credit_size) ep->tx_credits_per_max_message++; /* copy all the callbacks */ ep->ep_ops = conn_req->ep_ops; status = ath10k_hif_map_service_to_pipe(htc->ar, ep->service_id, &ep->ul_pipe_id, &ep->dl_pipe_id, &ep->ul_is_polled, &ep->dl_is_polled); if (status) return status; ath10k_dbg(ATH10K_DBG_BOOT, "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n", htc_service_name(ep->service_id), ep->ul_pipe_id, ep->dl_pipe_id, ep->eid); ath10k_dbg(ATH10K_DBG_BOOT, "boot htc ep %d ul polled %d dl polled %d\n", ep->eid, ep->ul_is_polled, ep->dl_is_polled); if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { ep->tx_credit_flow_enabled = false; ath10k_dbg(ATH10K_DBG_BOOT, "boot htc service '%s' eid %d TX flow control disabled\n", htc_service_name(ep->service_id), assigned_eid); } return status; }
int ath10k_htc_wait_target(struct ath10k_htc *htc) { int status = 0; struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_resp conn_resp; struct ath10k_htc_msg *msg; u16 message_id; u16 credit_count; u16 credit_size; INIT_COMPLETION(htc->ctl_resp); status = ath10k_hif_start(htc->ar); if (status) { ath10k_err("could not start HIF (%d)\n", status); goto err_start; } status = wait_for_completion_timeout(&htc->ctl_resp, ATH10K_HTC_WAIT_TIMEOUT_HZ); if (status <= 0) { if (status == 0) status = -ETIMEDOUT; ath10k_err("ctl_resp never came in (%d)\n", status); goto err_target; } if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) { ath10k_err("Invalid HTC ready msg len:%d\n", htc->control_resp_len); status = -ECOMM; goto err_target; } msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; message_id = __le16_to_cpu(msg->hdr.message_id); credit_count = __le16_to_cpu(msg->ready.credit_count); credit_size = __le16_to_cpu(msg->ready.credit_size); if (message_id != ATH10K_HTC_MSG_READY_ID) { ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id); status = -ECOMM; goto err_target; } htc->total_transmit_credits = credit_count; htc->target_credit_size = credit_size; ath10k_dbg(ATH10K_DBG_HTC, "Target ready! transmit resources: %d size:%d\n", htc->total_transmit_credits, htc->target_credit_size); if ((htc->total_transmit_credits == 0) || (htc->target_credit_size == 0)) { status = -ECOMM; ath10k_err("Invalid credit size received\n"); goto err_target; } ath10k_htc_setup_target_buffer_assignments(htc); /* setup our pseudo HTC control endpoint connection */ memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete; conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete; conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS; conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL; /* connect fake service */ status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); if (status) { ath10k_err("could not connect to htc service (%d)\n", status); goto err_target; } return 0; err_target: ath10k_hif_stop(htc->ar); err_start: return status; }
int mddi_host_register_multiwrite(uint32 reg_addr, uint32 *value_list_ptr, uint32 value_count, boolean wait, mddi_llist_done_cb_type done_cb, mddi_host_type host) { mddi_linked_list_type *curr_llist_ptr; mddi_linked_list_type *curr_llist_dma_ptr; mddi_register_access_packet_type *regacc_pkt_ptr; uint16 curr_llist_idx; int ret = 0; if (!value_list_ptr || !value_count || value_count > MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR) { MDDI_MSG_ERR("\n Invalid value_list or value_count"); return -EINVAL; } if (in_interrupt()) MDDI_MSG_CRIT("Called from ISR context\n"); if (!mddi_host_powered) { MDDI_MSG_ERR("MDDI powered down!\n"); mddi_init(); } down(&mddi_host_mutex); curr_llist_idx = mddi_get_next_free_llist_item(host, TRUE); curr_llist_ptr = &llist_extern[host][curr_llist_idx]; curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx]; curr_llist_ptr->link_controller_flags = 1; curr_llist_ptr->packet_header_count = 14; curr_llist_ptr->packet_data_count = (uint16)(value_count * 4); curr_llist_ptr->next_packet_pointer = NULL; curr_llist_ptr->reserved = 0; regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt; regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count + curr_llist_ptr->packet_data_count; regacc_pkt_ptr->packet_type = 146; /* register access packet */ regacc_pkt_ptr->bClient_ID = 0; regacc_pkt_ptr->read_write_info = value_count; regacc_pkt_ptr->register_address = reg_addr; memcpy((void *)®acc_pkt_ptr->register_data_list[0], value_list_ptr, curr_llist_ptr->packet_data_count); curr_llist_ptr->packet_data_pointer = (void *)(®acc_pkt_ptr->register_data_list[0]); MDDI_MSG_DEBUG("Reg Access write reg=0x%x, value=0x%x\n", regacc_pkt_ptr->register_address, regacc_pkt_ptr->register_data_list[0]); /* now adjust pointers */ mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait, done_cb, host); up(&mddi_host_mutex); if (wait) { int wait_ret; mddi_linked_list_notify_type *llist_notify_ptr; llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx]; wait_ret = wait_for_completion_timeout( &(llist_notify_ptr->done_comp), 5 * HZ); if (wait_ret <= 0) ret = -EBUSY; if (wait_ret < 0) printk(KERN_ERR "%s: failed to wait for completion!\n", __func__); else if (!wait_ret) printk(KERN_ERR "%s: Timed out waiting!\n", __func__); } return ret; }
static int carl9170_usb_load_firmware(struct ar9170 *ar) { const u8 *data; u8 *buf; unsigned int transfer; size_t len; u32 addr; int err = 0; buf = kmalloc(4096, GFP_KERNEL); if (!buf) { err = -ENOMEM; goto err_out; } data = ar->fw.fw->data; len = ar->fw.fw->size; addr = ar->fw.address; /* this removes the miniboot image */ data += ar->fw.offset; len -= ar->fw.offset; while (len) { transfer = min_t(unsigned int, len, 4096u); memcpy(buf, data, transfer); err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0), 0x30 /* FW DL */, 0x40 | USB_DIR_OUT, addr >> 8, 0, buf, transfer, 100); if (err < 0) { kfree(buf); goto err_out; } len -= transfer; data += transfer; addr += transfer; } kfree(buf); err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0), 0x31 /* FW DL COMPLETE */, 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 200); if (wait_for_completion_timeout(&ar->fw_boot_wait, HZ) == 0) { err = -ETIMEDOUT; goto err_out; } err = carl9170_echo_test(ar, 0x4a110123); if (err) goto err_out; /* now, start the command response counter */ ar->cmd_seq = -1; return 0; err_out: dev_err(&ar->udev->dev, "firmware upload failed (%d).\n", err); return err; }
int mddi_host_register_multiread(uint32 reg_addr, uint32 *value_list_ptr, uint32 value_count, boolean wait, mddi_host_type host) { mddi_linked_list_type *curr_llist_ptr; mddi_register_access_packet_type *regacc_pkt_ptr; uint16 curr_llist_idx; int ret = 0; if (!value_list_ptr || !value_count || value_count >= MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR) { MDDI_MSG_ERR("\n Invalid value_list or value_count"); return -EINVAL; } if (in_interrupt()) MDDI_MSG_CRIT("Called from ISR context\n"); if (!mddi_host_powered) { MDDI_MSG_ERR("MDDI powered down!\n"); mddi_init(); } down(&mddi_host_mutex); mddi_reg_read_value_ptr = value_list_ptr; curr_llist_idx = mddi_get_reg_read_llist_item(host, TRUE); if (curr_llist_idx == UNASSIGNED_INDEX) { up(&mddi_host_mutex); /* need to change this to some sort of wait */ MDDI_MSG_ERR("Attempting to queue up more than 1 reg read\n"); return -EINVAL; } curr_llist_ptr = &llist_extern[host][curr_llist_idx]; curr_llist_ptr->link_controller_flags = 0x11; curr_llist_ptr->packet_header_count = 14; curr_llist_ptr->packet_data_count = 0; curr_llist_ptr->next_packet_pointer = NULL; curr_llist_ptr->packet_data_pointer = NULL; curr_llist_ptr->reserved = 0; regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt; regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count; regacc_pkt_ptr->packet_type = 146; /* register access packet */ regacc_pkt_ptr->bClient_ID = 0; regacc_pkt_ptr->read_write_info = 0x8000 | value_count; regacc_pkt_ptr->register_address = reg_addr; /* now adjust pointers */ mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait, NULL, host); /* need to check if we can write the pointer or not */ up(&mddi_host_mutex); if (wait) { int wait_ret; mddi_linked_list_notify_type *llist_notify_ptr; llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx]; wait_ret = wait_for_completion_timeout( &(llist_notify_ptr->done_comp), 5 * HZ); if (wait_ret <= 0) ret = -EBUSY; if (wait_ret < 0) printk(KERN_ERR "%s: failed to wait for completion!\n", __func__); else if (!wait_ret) printk(KERN_ERR "%s: Timed out waiting!\n", __func__); } MDDI_MSG_DEBUG("Reg Read value=0x%x\n", *value_list_ptr); return ret; }
static int dwc3_otg_set_power(struct usb_phy *phy, unsigned mA) { static int power_supply_type; struct dwc3_otg *dotg = container_of(phy->otg, struct dwc3_otg, otg); if (!dotg->psy || !dotg->charger) { dev_err(phy->dev, "no usb power supply/charger registered\n"); return 0; } if (dotg->charger->charging_disabled) return 0; #ifdef CONFIG_LGE_PM if (dotg->charger->chg_type == DWC3_SDP_CHARGER || dotg->charger->chg_type == DWC3_FLOATED_CHARGER) #else if (dotg->charger->chg_type == DWC3_SDP_CHARGER) #endif power_supply_type = POWER_SUPPLY_TYPE_USB; else if (dotg->charger->chg_type == DWC3_CDP_CHARGER) power_supply_type = POWER_SUPPLY_TYPE_USB_CDP; else if (dotg->charger->chg_type == DWC3_DCP_CHARGER || dotg->charger->chg_type == DWC3_PROPRIETARY_CHARGER) power_supply_type = POWER_SUPPLY_TYPE_USB_DCP; else power_supply_type = POWER_SUPPLY_TYPE_UNKNOWN; #ifndef CONFIG_LGE_PM power_supply_set_supply_type(dotg->psy, power_supply_type); #endif #if defined (CONFIG_TOUCHSCREEN_SYNAPTICS_G3) && defined (CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) update_status(1, dotg->charger->chg_type); #endif #if defined (CONFIG_TOUCHSCREEN_ATMEL_2954) || defined(CONFIG_TOUCHSCREEN_ATMEL_mxT2954) pr_info("%s : chg_type is %d. previous_usb_status is %d.\n", __func__, dotg->charger->chg_type,previous_usb_status); if(previous_usb_status!=dotg->charger->chg_type) trigger_usb_state_from_otg(dotg->charger->chg_type); else pr_info("%s : previous_usb_status and current_usb_status is same.\n", __func__); previous_usb_status = dotg->charger->chg_type; #endif #ifdef CONFIG_LGE_PM if (mA > 2 && lge_pm_get_cable_type() != NO_INIT_CABLE) { if (dotg->charger->chg_type == DWC3_SDP_CHARGER) { if (dotg->dwc->gadget.speed == USB_SPEED_SUPER) { if (dotg->charger->max_power > 2) dotg->charger->max_power = 0; mA = DWC3_USB30_CHG_CURRENT; } else { mA = lge_pm_get_usb_current(); } #ifdef CONFIG_QPNP_CHARGER /* For MST, boost current up over 900mA in spite of USB */ if (pseudo_batt_info.mode && mA == 500 ) mA = DWC3_USB30_CHG_CURRENT; #endif } else if (dotg->charger->chg_type == DWC3_DCP_CHARGER) { mA = lge_pm_get_ta_current(); } else if (dotg->charger->chg_type == DWC3_FLOATED_CHARGER) { mA = lge_pm_get_usb_current(); } } #endif if (dotg->charger->chg_type == DWC3_CDP_CHARGER) mA = DWC3_IDEV_CHG_MAX; if (dotg->charger->max_power == mA) return 0; dev_info(phy->dev, "Avail curr from USB = %u\n", mA); /* [email protected] make psy getter and move it above power_supply_type setter. 2014-02-06 */ #ifdef CONFIG_LGE_PM #ifndef CONFIG_USB_DWC3_LGE_SINGLE_PSY if (dwc3_otg_get_psy(phy) < 0) goto psy_error; #else if (strcmp(dotg->psy->name, "usb")) { pr_info("%s psy name is %s, so change psy to usb.\n", __func__, dotg->psy->name); dotg->psy = power_supply_get_by_name("usb"); if (!dotg->psy) goto psy_error; } #endif power_supply_set_supply_type(dotg->psy, power_supply_type); #endif #ifdef CONFIG_LGE_PM if (mA > 2) { #else if (dotg->charger->max_power <= 2 && mA > 2) { #endif /* Enable charging */ if (power_supply_set_online(dotg->psy, true)) goto psy_error; if (power_supply_set_current_limit(dotg->psy, 1000*mA)) goto psy_error; #ifdef CONFIG_QPNP_CHARGER if (!strncmp(dotg->psy->name, "ac", 2)) { dotg->psy = power_supply_get_by_name("usb"); if (!dotg->psy) goto psy_error; if (power_supply_set_online(dotg->psy, true)) goto psy_error; if (power_supply_set_supply_type(dotg->psy, power_supply_type)) goto psy_error; if (power_supply_set_current_limit(dotg->psy, 1000*mA)) goto psy_error; dotg->psy = power_supply_get_by_name("ac"); if (!dotg->psy) goto psy_error; } #endif } else if (dotg->charger->max_power > 0 && (mA == 0 || mA == 2)) { /* Disable charging */ if (power_supply_set_online(dotg->psy, false)) goto psy_error; /* Set max current limit */ if (power_supply_set_current_limit(dotg->psy, 0)) goto psy_error; #ifdef CONFIG_QPNP_CHARGER if (!strncmp(dotg->psy->name, "ac", 2)) { dotg->psy = power_supply_get_by_name("usb"); if (!dotg->psy) goto psy_error; if (power_supply_set_online(dotg->psy, false)) goto psy_error; if (power_supply_set_supply_type(dotg->psy, power_supply_type)) goto psy_error; if (power_supply_set_current_limit(dotg->psy, 0)) goto psy_error; dotg->psy = power_supply_get_by_name("ac"); if (!dotg->psy) goto psy_error; } #endif #ifndef CONFIG_USB_DWC3_LGE_SINGLE_PSY dotg->charger->chg_type = DWC3_INVALID_CHARGER; #endif } power_supply_changed(dotg->psy); dotg->charger->max_power = mA; #if defined (CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) || defined(CONFIG_TOUCHSCREEN_ATMEL_S540) #if defined (CONFIG_TOUCHSCREEN_SYNAPTICS_G2) || defined (CONFIG_MACH_MSM8974_TIGERS) || defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W) queue_work(touch_otg_wq, &dotg->touch_work); #endif #endif return 0; psy_error: dev_dbg(phy->dev, "power supply error when setting property\n"); return -ENXIO; } /* IRQs which OTG driver is interested in handling */ #define DWC3_OEVT_MASK (DWC3_OEVTEN_OTGCONIDSTSCHNGEVNT | \ DWC3_OEVTEN_OTGBDEVVBUSCHNGEVNT) /** * dwc3_otg_interrupt - interrupt handler for dwc3 otg events. * @_dotg: Pointer to out controller context structure * * Returns IRQ_HANDLED on success otherwise IRQ_NONE. */ static irqreturn_t dwc3_otg_interrupt(int irq, void *_dotg) { struct dwc3_otg *dotg = (struct dwc3_otg *)_dotg; u32 osts, oevt_reg; int ret = IRQ_NONE; int handled_irqs = 0; struct usb_phy *phy = dotg->otg.phy; oevt_reg = dwc3_readl(dotg->regs, DWC3_OEVT); if (!(oevt_reg & DWC3_OEVT_MASK)) return IRQ_NONE; osts = dwc3_readl(dotg->regs, DWC3_OSTS); if ((oevt_reg & DWC3_OEVTEN_OTGCONIDSTSCHNGEVNT) || (oevt_reg & DWC3_OEVTEN_OTGBDEVVBUSCHNGEVNT)) { /* * ID sts has changed, set inputs later, in the workqueue * function, switch from A to B or from B to A. */ if (oevt_reg & DWC3_OEVTEN_OTGCONIDSTSCHNGEVNT) { if (osts & DWC3_OTG_OSTS_CONIDSTS) { dev_dbg(phy->dev, "ID set\n"); set_bit(ID, &dotg->inputs); } else { dev_dbg(phy->dev, "ID clear\n"); clear_bit(ID, &dotg->inputs); } handled_irqs |= DWC3_OEVTEN_OTGCONIDSTSCHNGEVNT; } if (oevt_reg & DWC3_OEVTEN_OTGBDEVVBUSCHNGEVNT) { if (osts & DWC3_OTG_OSTS_BSESVALID) { dev_dbg(phy->dev, "BSV set\n"); set_bit(B_SESS_VLD, &dotg->inputs); } else { dev_dbg(phy->dev, "BSV clear\n"); clear_bit(B_SESS_VLD, &dotg->inputs); } handled_irqs |= DWC3_OEVTEN_OTGBDEVVBUSCHNGEVNT; } queue_delayed_work(system_nrt_wq, &dotg->sm_work, 0); ret = IRQ_HANDLED; /* Clear the interrupts we handled */ dwc3_writel(dotg->regs, DWC3_OEVT, handled_irqs); } return ret; } /** * dwc3_otg_init_sm - initialize OTG statemachine input * @dotg: Pointer to the dwc3_otg structure * */ void dwc3_otg_init_sm(struct dwc3_otg *dotg) { u32 osts = dwc3_readl(dotg->regs, DWC3_OSTS); struct usb_phy *phy = dotg->otg.phy; struct dwc3_ext_xceiv *ext_xceiv; int ret; dev_dbg(phy->dev, "Initialize OTG inputs, osts: 0x%x\n", osts); /* * VBUS initial state is reported after PMIC * driver initialization. Wait for it. */ ret = wait_for_completion_timeout(&dotg->dwc3_xcvr_vbus_init, HZ * 5); if (!ret) { dev_err(phy->dev, "%s: completion timeout\n", __func__); /* We can safely assume no cable connected */ set_bit(ID, &dotg->inputs); } ext_xceiv = dotg->ext_xceiv; dwc3_otg_reset(dotg); if (ext_xceiv && !ext_xceiv->otg_capability) { if (osts & DWC3_OTG_OSTS_CONIDSTS) set_bit(ID, &dotg->inputs); else clear_bit(ID, &dotg->inputs); if (osts & DWC3_OTG_OSTS_BSESVALID) set_bit(B_SESS_VLD, &dotg->inputs); else clear_bit(B_SESS_VLD, &dotg->inputs); } }
static inline int xfer_write(struct i2c_jz *i2c, unsigned char *buf, int len, enum msg_end_type end_type) { int ret = 0; long timeout = TIMEOUT; unsigned short reg_tmp; unsigned int wait_complete_timeout_ms; wait_complete_timeout_ms = len * 1000 * 9 * 2 / i2c->rate + CONFIG_I2C_JZV10_WAIT_MS; #ifdef CONFIG_I2C_DEBUG_INFO if (i2c->debug > DEBUG_WARN) dev_info(&(i2c->adap.dev), "%s, Begin write msg, want to write length is %d\n", __func__, len); #endif i2c->wbuf = buf; i2c->len = len; i2c_writel(i2c, I2C_TXTL, TX_LEVEL); i2c_readl(i2c, I2C_CSTP); /* clear STP bit */ i2c_readl(i2c, I2C_CTXOF); /* clear TXOF bit */ i2c_readl(i2c, I2C_CTXABRT); /* clear TXABRT bit */ i2c->w_end_type = end_type; while ((i2c_readl(i2c, I2C_STA) & I2C_STA_TFNF) && (i2c->len > 0)) { reg_tmp = *i2c->wbuf++; if (i2c->len == 1) { if (end_type == MSG_END_STOP) { reg_tmp |= I2C_DC_STP; } } i2c_writel(i2c, I2C_DC, reg_tmp); i2c->len -= 1; } if (i2c->len == 0) { i2c_writel(i2c, I2C_TXTL, 0); } reg_tmp = I2C_INTM_MTXEMP | I2C_INTM_MTXABT | I2C_INTM_MTXOF; if (end_type == MSG_END_STOP) reg_tmp |= I2C_INTM_MISTP; i2c_writel(i2c, I2C_INTM, reg_tmp); timeout = wait_for_completion_timeout(&i2c->complete, msecs_to_jiffies (wait_complete_timeout_ms)); if (!timeout) { dev_err(&(i2c->adap.dev), "--I2C pio write wait timeout\n"); #ifdef I2C_DEBUG i2c_jz_dump_regs(i2c); #endif ret = -ETIMEDOUT; } reg_tmp = i2c_readl(i2c, I2C_TXABRT); if (reg_tmp) { txabrt(i2c, reg_tmp); if (reg_tmp > 0x1 && reg_tmp < 0x10) ret = -ENXIO; else ret = -EIO; //after I2C_TXABRT_ABRT_XDATA_NOACK error,this required core to resend if (reg_tmp & 8) { ret = -EAGAIN; } i2c_readl(i2c, I2C_CTXABRT); } if (ret < 0) i2c_jz_reset(i2c); #ifdef CONFIG_I2C_DEBUG_INFO if (i2c->debug > DEBUG_WARN) dev_info(&(i2c->adap.dev), "%s, Write msg over\n", __func__); #endif return ret; }
/* Called from HCI core to initialize the device */ static int hci_st_open(struct hci_dev *hdev) { static struct st_proto_s hci_st_proto; unsigned long timeleft; int err; BTDRV_API_START(); err = 0; BT_DRV_DBG("%s %p", hdev->name, hdev); /* Already registered with ST ? */ if (test_bit(BT_ST_REGISTERED, &hst->flags)) { BT_DRV_ERR("Registered with ST already,open called again?"); BTDRV_API_EXIT(0); return 0; } /* Populate BT driver info required by ST */ memset(&hci_st_proto, 0, sizeof(hci_st_proto)); /* BT driver ID */ hci_st_proto.type = ST_BT; /* Receive function which called from ST */ hci_st_proto.recv = hci_st_receive; /* Packet match function may used in future */ hci_st_proto.match_packet = NULL; /* Callback to be called when registration is pending */ hci_st_proto.reg_complete_cb = hci_st_registration_completion_cb; /* This is write function pointer of ST. BT driver will make use of this * for sending any packets to chip. ST will assign and give to us, so * make it as NULL */ hci_st_proto.write = NULL; /* send in the hst to be received at registration complete callback * and during st's receive */ hci_st_proto.priv_data = hst; /* Register with ST layer */ err = st_register(&hci_st_proto); if (err == -EINPROGRESS) { /* Prepare wait-for-completion handler data structures. * Needed to syncronize this and st_registration_completion_cb() * functions. */ init_completion(&hst->wait_for_btdrv_reg_completion); /* Reset ST registration callback status flag , this value * will be updated in hci_st_registration_completion_cb() * function whenever it called from ST driver. */ hst->streg_cbdata = -EINPROGRESS; /* ST is busy with other protocol registration(may be busy with * firmware download).So,Wait till the registration callback * (passed as a argument to st_register() function) getting * called from ST. */ BT_DRV_DBG(" %s waiting for reg completion signal from ST", __func__); timeleft = wait_for_completion_timeout (&hst->wait_for_btdrv_reg_completion, msecs_to_jiffies(BT_REGISTER_TIMEOUT)); if (!timeleft) { BT_DRV_ERR("Timeout(%ld sec),didn't get reg" "completion signal from ST", BT_REGISTER_TIMEOUT / 1000); BTDRV_API_EXIT(-ETIMEDOUT); return -ETIMEDOUT; } /* Is ST registration callback called with ERROR value? */ if (hst->streg_cbdata != 0) { BT_DRV_ERR("ST reg completion CB called with invalid" "status %d", hst->streg_cbdata); BTDRV_API_EXIT(-EAGAIN); return -EAGAIN; } err = 0; } else if (err == -1) { BT_DRV_ERR("st_register failed %d", err); BTDRV_API_EXIT(-EAGAIN); return -EAGAIN; } /* Do we have proper ST write function? */ if (hci_st_proto.write != NULL) { /* We need this pointer for sending any Bluetooth pkts */ hst->st_write = hci_st_proto.write; } else { BT_DRV_ERR("failed to get ST write func pointer"); /* Undo registration with ST */ err = st_unregister(ST_BT); if (err < 0) BT_DRV_ERR("st_unregister failed %d", err); hst->st_write = NULL; BTDRV_API_EXIT(-EAGAIN); return -EAGAIN; } /* Registration with ST layer is completed successfully, * now chip is ready to accept commands from HCI CORE. * Mark HCI Device flag as RUNNING */ set_bit(HCI_RUNNING, &hdev->flags); /* Registration with ST successful */ set_bit(BT_ST_REGISTERED, &hst->flags); BTDRV_API_EXIT(err); return err; }
/** * cds_get_reg_domain_from_country_code() - get the regulatory domain * @reg_domain_ptr: ptr to store regulatory domain * * Return: CDF_STATUS_SUCCESS on success * CDF_STATUS_E_FAULT on error * CDF_STATUS_E_EMPTY country table empty */ CDF_STATUS cds_get_reg_domain_from_country_code(v_REGDOMAIN_t *reg_domain_ptr, const country_code_t country_code, v_CountryInfoSource_t source) { v_CONTEXT_t cds_context = NULL; hdd_context_t *hdd_ctx = NULL; struct wiphy *wiphy = NULL; int i; if (NULL == reg_domain_ptr) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, ("Invalid reg domain pointer")); return CDF_STATUS_E_FAULT; } *reg_domain_ptr = REGDOMAIN_COUNT; if (NULL == country_code) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, ("Country code array is NULL")); return CDF_STATUS_E_FAULT; } if (0 == country_info_table.countryCount) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, ("Reg domain table is empty")); return CDF_STATUS_E_EMPTY; } cds_context = cds_get_global_context(); if (NULL != cds_context) hdd_ctx = cds_get_context(CDF_MODULE_ID_HDD); else return CDF_STATUS_E_EXISTS; if (NULL == hdd_ctx) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, ("Invalid pHddCtx pointer")); return CDF_STATUS_E_FAULT; } wiphy = hdd_ctx->wiphy; if (cds_is_logp_in_progress()) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, "SSR in progress, return"); *reg_domain_ptr = temp_reg_domain; return CDF_STATUS_SUCCESS; } temp_reg_domain = REGDOMAIN_COUNT; for (i = 0; i < country_info_table.countryCount && REGDOMAIN_COUNT == temp_reg_domain; i++) { if (memcmp(country_code, country_info_table.countryInfo[i].countryCode, CDS_COUNTRY_CODE_LEN) == 0) { temp_reg_domain = country_info_table.countryInfo[i].regDomain; break; } } if (REGDOMAIN_COUNT == temp_reg_domain) { CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, ("Country does not map to any Regulatory domain")); temp_reg_domain = REGDOMAIN_WORLD; } if (COUNTRY_QUERY == source) { *reg_domain_ptr = temp_reg_domain; return CDF_STATUS_SUCCESS; } if ((COUNTRY_INIT == source) && (false == init_by_reg_core)) { init_by_driver = true; if (('0' != country_code[0]) || ('0' != country_code[1])) { INIT_COMPLETION(hdd_ctx->reg_init); regulatory_hint(wiphy, country_code); wait_for_completion_timeout(&hdd_ctx->reg_init, msecs_to_jiffies(REG_WAIT_TIME)); } } else if (COUNTRY_IE == source || COUNTRY_USER == source) { regulatory_hint_user(country_code, NL80211_USER_REG_HINT_USER); } *reg_domain_ptr = temp_reg_domain; return CDF_STATUS_SUCCESS; }
static int debug_test_smsm(char *buf, int max) { int i = 0; int test_num = 0; int ret; /* Test case 1 - Register new callback for notification */ do { test_num++; SMSM_CB_TEST_INIT(); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 0); /* de-assert SMSM_SMD_INIT to trigger state update */ UT_EQ_INT(smsm_cb_data.cb_count, 0); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 1); UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, SMSM_SMDINIT); UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, 0x0); UT_EQ_INT((int)smsm_cb_data.data, 0x1234); /* re-assert SMSM_SMD_INIT to trigger state update */ INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, 0x0); UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, SMSM_SMDINIT); /* deregister callback */ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 2); /* make sure state change doesn't cause any more callbacks */ INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); i += scnprintf(buf + i, max - i, "Test %d - PASS\n", test_num); } while (0); /* Test case 2 - Update already registered callback */ do { test_num++; SMSM_CB_TEST_INIT(); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 0); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 1); /* verify both callback bits work */ INIT_COMPLETION(smsm_cb_completion); UT_EQ_INT(smsm_cb_data.cb_count, 0); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 1); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 3); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 4); /* deregister 1st callback */ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 1); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 4); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 5); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 6); /* deregister 2nd callback */ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 2); /* make sure state change doesn't cause any more callbacks */ INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 6); i += scnprintf(buf + i, max - i, "Test %d - PASS\n", test_num); } while (0); /* Test case 3 - Two callback registrations with different data */ do { test_num++; SMSM_CB_TEST_INIT(); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 0); ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x3456); UT_EQ_INT(ret, 0); /* verify both callbacks work */ INIT_COMPLETION(smsm_cb_completion); UT_EQ_INT(smsm_cb_data.cb_count, 0); smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 1); UT_EQ_INT((int)smsm_cb_data.data, 0x1234); INIT_COMPLETION(smsm_cb_completion); smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0); UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion, msecs_to_jiffies(20)), 0); UT_EQ_INT(smsm_cb_data.cb_count, 2); UT_EQ_INT((int)smsm_cb_data.data, 0x3456); /* cleanup and unregister * degregister in reverse to verify data field is * being used */ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT); smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT); ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_INIT, smsm_state_cb, (void *)0x3456); UT_EQ_INT(ret, 2); ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT, smsm_state_cb, (void *)0x1234); UT_EQ_INT(ret, 2); i += scnprintf(buf + i, max - i, "Test %d - PASS\n", test_num); } while (0); return i; }
/* Called from HCI core to initialize the device */ static int ti_st_open(struct hci_dev *hdev) { unsigned long timeleft; struct ti_st *hst; int err, i; BT_DBG("%s %p", hdev->name, hdev); if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; /* provide contexts for callbacks from ST */ hst = hci_get_drvdata(hdev); for (i = 0; i < MAX_BT_CHNL_IDS; i++) { ti_st_proto[i].priv_data = hst; ti_st_proto[i].max_frame_size = HCI_MAX_FRAME_SIZE; ti_st_proto[i].recv = st_receive; ti_st_proto[i].reg_complete_cb = st_reg_completion_cb; /* Prepare wait-for-completion handler */ init_completion(&hst->wait_reg_completion); /* Reset ST registration callback status flag, * this value will be updated in * st_reg_completion_cb() * function whenever it called from ST driver. */ hst->reg_status = -EINPROGRESS; err = st_register(&ti_st_proto[i]); if (!err) goto done; if (err != -EINPROGRESS) { clear_bit(HCI_RUNNING, &hdev->flags); BT_ERR("st_register failed %d", err); return err; } /* ST is busy with either protocol * registration or firmware download. */ BT_DBG("waiting for registration " "completion signal from ST"); timeleft = wait_for_completion_timeout (&hst->wait_reg_completion, msecs_to_jiffies(BT_REGISTER_TIMEOUT)); if (!timeleft) { clear_bit(HCI_RUNNING, &hdev->flags); BT_ERR("Timeout(%d sec),didn't get reg " "completion signal from ST", BT_REGISTER_TIMEOUT / 1000); return -ETIMEDOUT; } /* Is ST registration callback * called with ERROR status? */ if (hst->reg_status != 0) { clear_bit(HCI_RUNNING, &hdev->flags); BT_ERR("ST registration completed with invalid " "status %d", hst->reg_status); return -EAGAIN; } done: hst->st_write = ti_st_proto[i].write; if (!hst->st_write) { BT_ERR("undefined ST write function"); clear_bit(HCI_RUNNING, &hdev->flags); for (i = 0; i < MAX_BT_CHNL_IDS; i++) { /* Undo registration with ST */ err = st_unregister(&ti_st_proto[i]); if (err) BT_ERR("st_unregister() failed with " "error %d", err); hst->st_write = NULL; } return -EIO; } } return 0; }
/* * Low level master read/write transaction. */ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); #ifdef OMAP_HACK u8 zero_byte = 0; #endif int r; u16 w; dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); #ifndef OMAP_HACK if (msg->len == 0) return -EINVAL; omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr); /* REVISIT: Could the STB bit of I2C_CON be used with probing? */ dev->buf = msg->buf; dev->buf_len = msg->len; #else omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr); /* REVISIT: Remove this hack when we can get I2C chips from board-*.c * files * Sigh, seems we can't do zero length transactions. Thus, we * can't probe for devices w/o actually sending/receiving at least * a single byte. So we'll set count to 1 for the zero length * transaction case and hope we don't cause grief for some * arbitrary device due to random byte write/read during * probes. */ if (msg->len == 0) { dev->buf = &zero_byte; dev->buf_len = 1; } else { dev->buf = msg->buf; dev->buf_len = msg->len; } #endif omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); /* Clear the FIFO Buffers */ w = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG); w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR; omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w); init_completion(&dev->cmd_complete); dev->cmd_err = 0; w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT; /* High speed configuration */ if (dev->speed > 400) w |= OMAP_I2C_CON_OPMODE_HS; if (msg->flags & I2C_M_TEN) w |= OMAP_I2C_CON_XA; if (!(msg->flags & I2C_M_RD)) w |= OMAP_I2C_CON_TRX; if (!dev->b_hw && stop) w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); if (dev->b_hw && stop) { /* H/w behavior: dont write stt and stp together.. */ while (omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) & OMAP_I2C_CON_STT) { /* Dont do anything - this will come in a couple of loops at max*/ } w |= OMAP_I2C_CON_STP; w &= ~OMAP_I2C_CON_STT; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); } r = wait_for_completion_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT); dev->buf_len = 0; if (r < 0) return r; if (r == 0) { dev_err(dev->dev, "controller timed out\n"); omap_i2c_init(dev); return -ETIMEDOUT; } if (likely(!dev->cmd_err)) return 0; /* We have an error */ if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) { omap_i2c_init(dev); return -EIO; } if (dev->cmd_err & OMAP_I2C_STAT_NACK) { if (msg->flags & I2C_M_IGNORE_NAK) return 0; if (stop) { w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); } return -EREMOTEIO; } return -EIO; }
static int batt_read_adc(int channel, int *mv_reading) { int ret; void *h; struct adc_chan_result adc_chan_result; struct completion conv_complete_evt; #ifdef CONFIG_LGE_PM int wait_ret; #endif pr_debug("%s: called for %d\n", __func__, channel); ret = adc_channel_open(channel, &h); if (ret) { pr_err("%s: couldnt open channel %d ret=%d\n", __func__, channel, ret); goto out; } init_completion(&conv_complete_evt); ret = adc_channel_request_conv(h, &conv_complete_evt); if (ret) { pr_err("%s: couldnt request conv channel %d ret=%d\n", __func__, channel, ret); goto out; } #ifdef CONFIG_LGE_PM wait_ret = wait_for_completion_timeout(&conv_complete_evt, msecs_to_jiffies(MSM_PMIC_ADC_READ_TIMEOUT)); if(wait_ret <= 0) { printk(KERN_ERR "===%s: failed to adc wait for completion!===\n",__func__); goto sanity_out; } #else wait_for_completion(&conv_complete_evt); #endif ret = adc_channel_read_result(h, &adc_chan_result); if (ret) { pr_err("%s: couldnt read result channel %d ret=%d\n", __func__, channel, ret); goto out; } ret = adc_channel_close(h); if (ret) { pr_err("%s: couldnt close channel %d ret=%d\n", __func__, channel, ret); } if (mv_reading) *mv_reading = adc_chan_result.measurement; pr_debug("%s: done for %d\n", __func__, channel); return adc_chan_result.physical; out: pr_debug("%s: done for %d\n", __func__, channel); return -EINVAL; #ifdef CONFIG_LGE_PM sanity_out: pm8058_xoadc_clear_recentQ(); ret = adc_channel_close(h); if (ret) { pr_err("%s: couldnt close channel %d ret=%d\n", __func__, channel, ret); } if(channel == CHANNEL_ADC_BATT_THERM) { printk(KERN_ERR "============== batt temp adc read fail so default temp ===============\n"); if (mv_reading) *mv_reading = MSM_CHARGER_GAUGE_MISSING_TEMP_ADC; return MSM_CHARGER_GAUGE_MISSING_TEMP; } else if(channel == CHANNEL_ADC_ACC) { printk(KERN_ERR "============== ACC adc read fail so default usb ===============\n"); return CHANNEL_ADC_ACC_MISSING; } else { printk(KERN_ERR "============== adc read fail ===============\n"); return -EINVAL; } #endif }
/** * phy_mdm6600_device_power_on() - power on mdm6600 device * @ddata: device driver data * * To get the integrated USB phy in MDM6600 takes some hoops. We must ensure * the shared USB bootmode GPIOs are configured, then request modem start-up, * reset and power-up.. And then we need to recycle the shared USB bootmode * GPIOs as they are also used for Out of Band (OOB) wake for the USB and * TS 27.010 serial mux. */ static int phy_mdm6600_device_power_on(struct phy_mdm6600 *ddata) { struct gpio_desc *mode_gpio0, *mode_gpio1, *reset_gpio, *power_gpio; int error = 0, wakeirq; mode_gpio0 = ddata->mode_gpios->desc[PHY_MDM6600_MODE0]; mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1]; reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET]; power_gpio = ddata->ctrl_gpios[PHY_MDM6600_POWER]; /* * Shared GPIOs must be low for normal USB mode. After booting * they are used for OOB wake signaling. These can be also used * to configure USB flashing mode later on based on a module * parameter. */ gpiod_set_value_cansleep(mode_gpio0, 0); gpiod_set_value_cansleep(mode_gpio1, 0); /* Request start-up mode */ phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_NO_BYPASS); /* Request a reset first */ gpiod_set_value_cansleep(reset_gpio, 0); msleep(100); /* Toggle power GPIO to request mdm6600 to start */ gpiod_set_value_cansleep(power_gpio, 1); msleep(100); gpiod_set_value_cansleep(power_gpio, 0); /* * Looks like the USB PHY needs between 2.2 to 4 seconds. * If we try to use it before that, we will get L3 errors * from omap-usb-host trying to access the PHY. See also * phy_mdm6600_init() for -EPROBE_DEFER. */ msleep(PHY_MDM6600_PHY_DELAY_MS); ddata->enabled = true; /* Booting up the rest of MDM6600 will take total about 8 seconds */ dev_info(ddata->dev, "Waiting for power up request to complete..\n"); if (wait_for_completion_timeout(&ddata->ack, msecs_to_jiffies(PHY_MDM6600_ENABLED_DELAY_MS))) { if (ddata->status > PHY_MDM6600_STATUS_PANIC && ddata->status < PHY_MDM6600_STATUS_SHUTDOWN_ACK) dev_info(ddata->dev, "Powered up OK\n"); } else { ddata->enabled = false; error = -ETIMEDOUT; dev_err(ddata->dev, "Timed out powering up\n"); } /* Reconfigure mode1 GPIO as input for OOB wake */ gpiod_direction_input(mode_gpio1); wakeirq = gpiod_to_irq(mode_gpio1); if (wakeirq <= 0) return wakeirq; error = devm_request_threaded_irq(ddata->dev, wakeirq, NULL, phy_mdm6600_wakeirq_thread, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mdm6600-wake", ddata); if (error) dev_warn(ddata->dev, "no modem wakeirq irq%i: %i\n", wakeirq, error); ddata->running = true; return error; }
int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, const zd_addr_t *addresses, unsigned int count) { int r; int i, req_len, actual_req_len; struct usb_device *udev; struct usb_req_read_regs *req = NULL; unsigned long timeout; if (count < 1) { dev_dbg_f(zd_usb_dev(usb), "error: count is zero\n"); return -EINVAL; } if (count > USB_MAX_IOREAD16_COUNT) { dev_dbg_f(zd_usb_dev(usb), "error: count %u exceeds possible max %u\n", count, USB_MAX_IOREAD16_COUNT); return -EINVAL; } if (in_atomic()) { dev_dbg_f(zd_usb_dev(usb), "error: io in atomic context not supported\n"); return -EWOULDBLOCK; } if (!usb_int_enabled(usb)) { dev_dbg_f(zd_usb_dev(usb), "error: usb interrupt not enabled\n"); return -EWOULDBLOCK; } req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16); req = kmalloc(req_len, GFP_KERNEL); if (!req) return -ENOMEM; req->id = cpu_to_le16(USB_REQ_READ_REGS); for (i = 0; i < count; i++) req->addr[i] = cpu_to_le16((u16)addresses[i]); udev = zd_usb_to_usbdev(usb); prepare_read_regs_int(usb); r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), req, req_len, &actual_req_len, 1000 /* ms */); if (r) { dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg(). Error number %d\n", r); goto error; } if (req_len != actual_req_len) { dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n" " req_len %d != actual_req_len %d\n", req_len, actual_req_len); r = -EIO; goto error; } timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion, msecs_to_jiffies(1000)); if (!timeout) { disable_read_regs_int(usb); dev_dbg_f(zd_usb_dev(usb), "read timed out\n"); r = -ETIMEDOUT; goto error; } r = get_results(usb, values, req, count); error: kfree(req); return r; }
int mddi_host_register_write(uint32 reg_addr, uint32 reg_val, enum mddi_data_packet_size_type packet_size, boolean wait, mddi_llist_done_cb_type done_cb, mddi_host_type host) { mddi_linked_list_type *curr_llist_ptr; mddi_linked_list_type *curr_llist_dma_ptr; mddi_register_access_packet_type *regacc_pkt_ptr; uint16 curr_llist_idx; int ret = 0; if (in_interrupt()) MDDI_MSG_CRIT("Called from ISR context\n"); if (!mddi_host_powered) { MDDI_MSG_ERR("MDDI powered down!\n"); mddi_init(); } down(&mddi_host_mutex); curr_llist_idx = mddi_get_next_free_llist_item(host, TRUE); curr_llist_ptr = &llist_extern[host][curr_llist_idx]; curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx]; curr_llist_ptr->link_controller_flags = 1; curr_llist_ptr->packet_header_count = 14; curr_llist_ptr->packet_data_count = 4; curr_llist_ptr->next_packet_pointer = NULL; curr_llist_ptr->reserved = 0; regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt; regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count + (uint16)packet_size; regacc_pkt_ptr->packet_type = 146; /* register access packet */ regacc_pkt_ptr->bClient_ID = 0; regacc_pkt_ptr->read_write_info = 0x0001; regacc_pkt_ptr->register_address = reg_addr; regacc_pkt_ptr->register_data_list[0] = reg_val; MDDI_MSG_DEBUG("Reg Access write reg=0x%x, value=0x%x\n", regacc_pkt_ptr->register_address, regacc_pkt_ptr->register_data_list[0]); regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt; curr_llist_ptr->packet_data_pointer = (void *)(®acc_pkt_ptr->register_data_list[0]); /* now adjust pointers */ mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait, done_cb, host); up(&mddi_host_mutex); if (wait) { int wait_ret; mddi_linked_list_notify_type *llist_notify_ptr; llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx]; wait_ret = wait_for_completion_timeout( &(llist_notify_ptr->done_comp), HZ/5); if (wait_ret <= 0) ret = -EBUSY; if (wait_ret < 0) printk(KERN_ERR "%s: failed to wait for completion!\n", __func__); else if (!wait_ret) printk(KERN_ERR "%s: Timed out waiting!\n", __func__); } return ret; } /* mddi_host_register_write */
/* * Low level master read/write transaction. */ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); int r; u16 w; dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); if (msg->len == 0) return -EINVAL; omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr); /* REVISIT: Could the STB bit of I2C_CON be used with probing? */ dev->buf = msg->buf; dev->buf_len = msg->len; omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); /* Clear the FIFO Buffers */ w = omap_i2c_read_reg(dev, OMAP_I2C_BUF_REG); w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR; omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w); init_completion(&dev->cmd_complete); dev->cmd_err = 0; w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT; /* High speed configuration */ if (dev->speed > 400) w |= OMAP_I2C_CON_OPMODE_HS; if (msg->flags & I2C_M_TEN) w |= OMAP_I2C_CON_XA; if (!(msg->flags & I2C_M_RD)) w |= OMAP_I2C_CON_TRX; if (!dev->b_hw && stop) w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); /* * Don't write stt and stp together on some hardware. */ if (dev->b_hw && stop) { unsigned long delay = jiffies + OMAP_I2C_TIMEOUT; u16 con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); while (con & OMAP_I2C_CON_STT) { con = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); /* Let the user know if i2c is in a bad state */ if (time_after(jiffies, delay)) { dev_err(dev->dev, "controller timed out " "waiting for start condition to finish\n"); return -ETIMEDOUT; } cpu_relax(); } w |= OMAP_I2C_CON_STP; w &= ~OMAP_I2C_CON_STT; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); } /* * REVISIT: We should abort the transfer on signals, but the bus goes * into arbitration and we're currently unable to recover from it. */ r = wait_for_completion_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT); dev->buf_len = 0; if (r < 0) return r; if (r == 0) { dev_err(dev->dev, "controller timed out\n"); omap_i2c_init(dev); return -ETIMEDOUT; } if (likely(!dev->cmd_err)) return 0; /* We have an error */ if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR | OMAP_I2C_STAT_XUDF)) { omap_i2c_init(dev); return -EIO; } if (dev->cmd_err & OMAP_I2C_STAT_NACK) { if (msg->flags & I2C_M_IGNORE_NAK) return 0; if (stop) { w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG); w |= OMAP_I2C_CON_STP; omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w); } return -EREMOTEIO; } return -EIO; }
static int sun4i_spi_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *tfr) { struct sun4i_spi *sspi = spi_master_get_devdata(master); unsigned int mclk_rate, div, timeout; unsigned int tx_len = 0; int ret = 0; u32 reg; /* We don't support transfer larger than the FIFO */ if (tfr->len > SUN4I_FIFO_DEPTH) return -EINVAL; reinit_completion(&sspi->done); sspi->tx_buf = tfr->tx_buf; sspi->rx_buf = tfr->rx_buf; sspi->len = tfr->len; /* Clear pending interrupts */ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, ~0); reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); /* Reset FIFOs */ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_RF_RST | SUN4I_CTL_TF_RST); /* * Setup the transfer control register: Chip Select, * polarities, etc. */ if (spi->mode & SPI_CPOL) reg |= SUN4I_CTL_CPOL; else reg &= ~SUN4I_CTL_CPOL; if (spi->mode & SPI_CPHA) reg |= SUN4I_CTL_CPHA; else reg &= ~SUN4I_CTL_CPHA; if (spi->mode & SPI_LSB_FIRST) reg |= SUN4I_CTL_LMTF; else reg &= ~SUN4I_CTL_LMTF; /* * If it's a TX only transfer, we don't want to fill the RX * FIFO with bogus data */ if (sspi->rx_buf) reg &= ~SUN4I_CTL_DHB; else reg |= SUN4I_CTL_DHB; /* We want to control the chip select manually */ reg |= SUN4I_CTL_CS_MANUAL; sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); /* Ensure that we have a parent clock fast enough */ mclk_rate = clk_get_rate(sspi->mclk); if (mclk_rate < (2 * spi->max_speed_hz)) { clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz); mclk_rate = clk_get_rate(sspi->mclk); } /* * Setup clock divider. * * We have two choices there. Either we can use the clock * divide rate 1, which is calculated thanks to this formula: * SPI_CLK = MOD_CLK / (2 ^ (cdr + 1)) * Or we can use CDR2, which is calculated with the formula: * SPI_CLK = MOD_CLK / (2 * (cdr + 1)) * Wether we use the former or the latter is set through the * DRS bit. * * First try CDR2, and if we can't reach the expected * frequency, fall back to CDR1. */ div = mclk_rate / (2 * spi->max_speed_hz); if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) { if (div > 0) div--; reg = SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS; } else { div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz); reg = SUN4I_CLK_CTL_CDR1(div); } sun4i_spi_write(sspi, SUN4I_CLK_CTL_REG, reg); /* Setup the transfer now... */ if (sspi->tx_buf) tx_len = tfr->len; /* Setup the counters */ sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len)); sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len)); /* Fill the TX FIFO */ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH); /* Enable the interrupts */ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC); /* Start the transfer */ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH); timeout = wait_for_completion_timeout(&sspi->done, msecs_to_jiffies(1000)); if (!timeout) { ret = -ETIMEDOUT; goto out; } sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH); out: sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0); return ret; }