int gap_spi_async(struct spi_message * message, int dev_id) { // Save the device id (which CS line to use) in the state // variable of the message. message->state = (void*) dev_id; return spi_async(gapspi_spi_device, message); }
/* * This interrupt is called when pen is down and coordinates are * available. That is indicated by a either: * a) a rising edge on PINTDAV or (PENDAV mode) * b) a falling edge on DAV line (DAV mode) * depending on the setting of the IRQ bits in the CFR2 setting above. */ static irqreturn_t tsc2005_ts_irq_handler(int irq, void *dev_id) { struct tsc2005 *ts = dev_id; if (ts->disable_depth) goto out; if (!ts->spi_pending) { if (spi_async(ts->spi, &ts->read_msg)) { dev_err(&ts->spi->dev, "ts: spi_async() failed"); goto out; } } /* By shifting in 1s we can never wrap */ ts->spi_pending = (ts->spi_pending<<1)+1; /* Kick pen up timer only if it's not been started yet. Strictly, * it isn't even necessary to start it at all here, but doing so * keeps an equivalence between pen state and timer state. * The SPI read loop will keep pushing it into the future. * If it times out with an SPI pending, it's ignored anyway. */ if (!timer_pending(&ts->penup_timer)) { unsigned long pu = msecs_to_jiffies(TSC2005_TS_PENUP_TIME); ts->penup_timer.expires = jiffies + pu; add_timer(&ts->penup_timer); } out: return IRQ_HANDLED; }
static irqreturn_t at86rf230_isr(int irq, void *data) { struct at86rf230_local *lp = data; struct at86rf230_state_change *ctx; int rc; disable_irq_nosync(irq); ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); if (!ctx) { enable_irq(irq); return IRQ_NONE; } at86rf230_setup_spi_messages(lp, ctx); /* tell on error handling to free ctx */ ctx->free = true; ctx->buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG; ctx->msg.complete = at86rf230_irq_status; rc = spi_async(lp->spi, &ctx->msg); if (rc) { at86rf230_async_error(lp, ctx, rc); enable_irq(irq); return IRQ_NONE; } return IRQ_HANDLED; }
static void tsc210x_submit_async(struct tsc210x_spi_req *spi) { int ret; ret = spi_async(spi->dev, &spi->message); if (ret) dev_dbg(&spi->dev->dev, "%s: error %i in SPI request\n", __FUNCTION__, ret); }
static int dit4192_spi_read_device(struct dit4192 *dit4192, u8 reg, int bytes, u8 *buf) { int ret; unsigned char header[2]; struct spi_transfer spi_transfer_w; struct spi_transfer spi_transfer_r; struct spi_message spi_message; DECLARE_COMPLETION_ONSTACK(context); memset(&spi_transfer_w, 0, sizeof(struct spi_transfer)); memset(&spi_transfer_r, 0, sizeof(struct spi_transfer)); memset(&spi_message, 0, sizeof(struct spi_message)); spi_setup(dit4192->spi); spi_message_init(&spi_message); header[DIT4192_HEADER_0] = DIT4192_CMD_R | DIT4192_IO_STEP_1 | reg; //0x80 header[DIT4192_HEADER_1] = 0; spi_transfer_w.tx_buf = header; spi_transfer_w.len = 2; spi_message_add_tail(&spi_transfer_w, &spi_message); spi_transfer_r.rx_buf = buf; spi_transfer_r.len = bytes; spi_message_add_tail(&spi_transfer_r, &spi_message); spi_message.complete = dit4192_spi_completion_cb; spi_message.context = &context; /* must use spi_async in a context that may sleep */ ret = spi_async(dit4192->spi, &spi_message); if (ret == 0) { wait_for_completion(&context); if (spi_message.status == 0) { /* spi_message.actual_length should contain the number * of bytes actually read and should update ret to be * the actual length, but since our driver doesn't * support this, assume all count bytes were read. */ ret = bytes; } if (ret > 0) { ret = -EFAULT; } } else { pr_err("%s: Error calling spi_async, ret = %d\n", __func__, ret); } return ret; }
/********************************************************** *Message format: * write cmd | ADDR_H |ADDR_L | data stream | * 1B | 1B | 1B | length | * * read buffer length should be 1 + 1 + 1 + data_length ***********************************************************/ int gf_spi_write_bytes(struct gf_dev *gf_dev, u16 addr, u32 data_len, u8 *tx_buf) { #ifdef SPI_ASYNC DECLARE_COMPLETION_ONSTACK(read_done); #endif struct spi_message msg; struct spi_transfer *xfer = NULL; int ret = 0; xfer = kzalloc(sizeof(*xfer), GFP_KERNEL); if (xfer == NULL) { GF_LOG_ERROR("failed to kzalloc for command\n"); return -ENOMEM; } /* send gf command to device */ spi_message_init(&msg); tx_buf[0] = GF_W; tx_buf[1] = (u8)((addr >> 8) & 0xFF); tx_buf[2] = (u8)(addr & 0xFF); xfer[0].tx_buf = tx_buf; xfer[0].len = data_len + 3; xfer[0].delay_usecs = 5; spi_message_add_tail(xfer, &msg); #ifdef SPI_ASYNC msg.complete = gf_spi_complete; msg.context = &read_done; spin_lock_irq(&gf_dev->spi_lock); ret = spi_async(gf_dev->spi, &msg); spin_unlock_irq(&gf_dev->spi_lock); if (ret == 0) wait_for_completion(&read_done); else GF_LOG_ERROR("failed to spi write = %d\n", ret); #else ret = spi_sync(gf_dev->spi, &msg); #endif if (xfer) { kfree(xfer); xfer = NULL; } return ret; }
static void at86rf230_async_write_reg(struct at86rf230_local *lp, u8 reg, u8 val, struct at86rf230_state_change *ctx, void (*complete)(void *context)) { int rc; ctx->buf[0] = (reg & CMD_REG_MASK) | CMD_REG | CMD_WRITE; ctx->buf[1] = val; ctx->msg.complete = complete; rc = spi_async(lp->spi, &ctx->msg); if (rc) at86rf230_async_error(lp, ctx, rc); }
static int dit4192_spi_write_device(struct dit4192 *dit4192, u8 header0, u8 *data, u32 bytes) { int ret; u8 spi_data[DIT4192_MAX_DATA_SIZE]; struct spi_transfer spi_transfer; struct spi_message spi_message; DECLARE_COMPLETION_ONSTACK(context); memset(&spi_transfer, 0, sizeof(struct spi_transfer)); memset(&spi_message, 0, sizeof(struct spi_message)); spi_data[DIT4192_HEADER_0] = header0; spi_data[DIT4192_HEADER_1] = 0; if(bytes > 0) { if( bytes <= (DIT4192_MAX_DATA_SIZE-2) ) { memcpy(&spi_data[2], data, bytes); } else { /* This should never happen. */ pr_err("%s: SPI transfer error. Bad data size\n", __func__); return -1; } } spi_transfer.tx_buf = spi_data; spi_transfer.len = bytes+2; spi_setup(dit4192->spi); spi_message_init(&spi_message); spi_message_add_tail(&spi_transfer, &spi_message); spi_message.complete = dit4192_spi_completion_cb; spi_message.context = &context; /* must use spi_async in a context that may sleep */ ret = spi_async(dit4192->spi, &spi_message); if (ret == 0) { wait_for_completion(&context); /* update ret to contain the number of bytes actually written */ if (spi_message.status == 0) { ret = spi_transfer.len; } else pr_err("%s: SPI transfer error, spi_message.status = %d\n", __func__, spi_message.status); } else { pr_err("%s: Error calling spi_async, ret = %d\n", __func__, ret); } return 0; }
/* Generic function to get some register value in async mode */ static void at86rf230_async_read_reg(struct at86rf230_local *lp, u8 reg, struct at86rf230_state_change *ctx, void (*complete)(void *context)) { int rc; u8 *tx_buf = ctx->buf; tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG; ctx->msg.complete = complete; rc = spi_async(lp->spi, &ctx->msg); if (rc) at86rf230_async_error(lp, ctx, rc); }
static void at86rf230_rx_read_frame(struct at86rf230_local *lp) { int rc; u8 *buf = lp->irq.buf; buf[0] = CMD_FB; lp->irq.trx.len = AT86RF2XX_MAX_BUF; lp->irq.msg.complete = at86rf230_rx_read_frame_complete; rc = spi_async(lp->spi, &lp->irq.msg); if (rc) { enable_irq(lp->spi->irq); at86rf230_async_error(lp, &lp->irq, rc); } }
static void at86rf230_write_frame_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; u8 *buf = ctx->buf; int rc; buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; buf[1] = STATE_BUSY_TX; ctx->trx.len = 2; ctx->msg.complete = NULL; rc = spi_async(lp->spi, &ctx->msg); if (rc) at86rf230_async_error(lp, ctx, rc); }
static void olpc_xo175_ec_send_command(struct olpc_xo175_ec *priv, void *cmd, size_t cmdlen) { int ret; memcpy(&priv->tx_buf, cmd, cmdlen); priv->xfer.len = cmdlen; spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1); priv->msg.complete = olpc_xo175_ec_complete; priv->msg.context = priv; ret = spi_async(priv->spi, &priv->msg); if (ret) dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret); }
static void at86rf230_rx_read_frame(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; u8 *buf = ctx->buf; int rc; buf[0] = CMD_FB; ctx->trx.len = AT86RF2XX_MAX_BUF; ctx->msg.complete = at86rf230_rx_read_frame_complete; rc = spi_async(lp->spi, &ctx->msg); if (rc) { ctx->trx.len = 2; enable_irq(ctx->irq); at86rf230_async_error(lp, ctx, rc); } }
static void at86rf230_async_state_change_start(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; u8 *buf = ctx->buf; const u8 trx_state = buf[1] & 0x1f; int rc; /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */ if (trx_state == STATE_TRANSITION_IN_PROGRESS) { udelay(1); at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, at86rf230_async_state_change_start, ctx->irq_enable); return; } /* Check if we already are in the state which we change in */ if (trx_state == ctx->to_state) { if (ctx->complete) ctx->complete(context); return; } /* Set current state to the context of state change */ ctx->from_state = trx_state; /* Going into the next step for a state change which do a timing * relevant delay. */ buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; buf[1] = ctx->to_state; ctx->trx.len = 2; ctx->msg.complete = at86rf230_async_state_delay; rc = spi_async(lp->spi, &ctx->msg); if (rc) { if (ctx->irq_enable) enable_irq(lp->spi->irq); at86rf230_async_error(lp, ctx, rc); } }
static irqreturn_t at86rf230_isr(int irq, void *data) { struct at86rf230_local *lp = data; struct at86rf230_state_change *ctx = &lp->irq; u8 *buf = ctx->buf; int rc; disable_irq_nosync(irq); buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG; ctx->msg.complete = at86rf230_irq_status; rc = spi_async(lp->spi, &ctx->msg); if (rc) { enable_irq(irq); at86rf230_async_error(lp, ctx, rc); return IRQ_NONE; } return IRQ_HANDLED; }
/* Generic function to get some register value in async mode */ static void at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg, struct at86rf230_state_change *ctx, void (*complete)(void *context), const bool irq_enable) { int rc; u8 *tx_buf = ctx->buf; tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG; ctx->msg.complete = complete; ctx->irq_enable = irq_enable; rc = spi_async(lp->spi, &ctx->msg); if (rc) { if (irq_enable) enable_irq(ctx->irq); at86rf230_async_error(lp, ctx, rc); } }
static void at86rf230_write_frame(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; struct sk_buff *skb = lp->tx_skb; u8 *buf = lp->tx.buf; int rc; spin_lock(&lp->lock); lp->is_tx = 1; spin_unlock(&lp->lock); buf[0] = CMD_FB | CMD_WRITE; buf[1] = skb->len + 2; memcpy(buf + 2, skb->data, skb->len); lp->tx.trx.len = skb->len + 2; lp->tx.msg.complete = at86rf230_write_frame_complete; rc = spi_async(lp->spi, &lp->tx.msg); if (rc) at86rf230_async_error(lp, ctx, rc); }
static int spi_slave_time_submit(struct spi_slave_time_priv *priv) { u32 rem_us; int ret; u64 ts; ts = local_clock(); rem_us = do_div(ts, 1000000000) / 1000; priv->buf[0] = cpu_to_be32(ts); priv->buf[1] = cpu_to_be32(rem_us); spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1); priv->msg.complete = spi_slave_time_complete; priv->msg.context = priv; ret = spi_async(priv->spi, &priv->msg); if (ret) dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret); return ret; }
static void at86rf230_rx_trac_check(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; u8 *buf = ctx->buf; int rc; if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) { u8 trac = TRAC_MASK(buf[1]); switch (trac) { case TRAC_SUCCESS: lp->trac.success++; break; case TRAC_SUCCESS_WAIT_FOR_ACK: lp->trac.success_wait_for_ack++; break; case TRAC_INVALID: lp->trac.invalid++; break; default: WARN_ONCE(1, "received rx trac status %d\n", trac); break; } } buf[0] = CMD_FB; ctx->trx.len = AT86RF2XX_MAX_BUF; ctx->msg.complete = at86rf230_rx_read_frame_complete; rc = spi_async(lp->spi, &ctx->msg); if (rc) { ctx->trx.len = 2; at86rf230_async_error(lp, ctx, rc); } }
static ssize_t spidev_sync(struct spidev_data *spidev, struct spi_message *message) { DECLARE_COMPLETION_ONSTACK(done); int status; message->complete = spidev_complete; message->context = &done; spin_lock_irq(&spidev->spi_lock); if (spidev->spi == NULL) status = -ESHUTDOWN; else status = spi_async(spidev->spi, message); spin_unlock_irq(&spidev->spi_lock); if (status == 0) { wait_for_completion(&done); status = message->status; if (status == 0) status = message->actual_length; } return status; }
static ssize_t adg739_write(struct file *filp, const char __user *buf, size_t count, loff_t *fpos) { char buf_term[NUM_MULTIPLEXER]; //буфер, куда копируются сообщения из пользовательского пространства, и где они проходят предварительное форматирование int status = 0; int i =0; struct spi_transfer t = { //формируется передача .tx_buf = adg739_status->buffer, .len = NUM_MULTIPLEXER * 2, }; struct spi_message m; // сообщение DECLARE_COMPLETION_ONSTACK(done); //объявляется и инициализуется условная переменная //проверка на достоверность переданного буфера if (count > NUM_MULTIPLEXER) return (-EMSGSIZE); if (copy_from_user(buf_term, buf, count)) return (-EFAULT); for (i=0; i<count; i++) { switch(buf_term[i]) { case 's': buf_term[i] = 0x11; break; case 'v': buf_term[i] = 0x82; break; case 'g': buf_term[i] = 0x88; break; default: return (-EINVAL); } } //передача сообщения драйверу контроллера mutex_lock(&device_lockk); for (i=0; i<count; i++) { adg739_status->buffer[i]= buf_term[i]; adg739_status->buffer[i+4]= buf_term[i]; } spi_message_init(&m); //инициализация сообщения spi_message_add_tail(&t, &m); //постановка передачи в очередь сообщения m.complete = adg739_complete; m.context = &done; if (adg739_status->spi == NULL) status = -ESHUTDOWN; else { status = spi_async(adg739_status->spi, &m); //передача сообщения printk(KERN_INFO "Status function spi_async = %d\n", status); } if (status == 0) { wait_for_completion(&done); //ожидание обработки сообщения контроллером spi status = m.status; printk(KERN_INFO "Status message = %d\n", status); if (status == 0) status = m.actual_length/2; } mutex_unlock(&device_lockk); return (status); } //ФУНКЦИИ СТРУКТУРЫ SPI_DRIVER static int __devinit adg739_probe(struct spi_device *spi) { int status, dev; //регистрация устройства dev =device_create(devclass, &spi->dev, dev_adg739, NULL, MULTIPLEXER_NAME); //создание устройства status = IS_ERR(dev) ? PTR_ERR(dev) : 0; if(status != 0) { printk(KERN_ERR "The device_create function failed\n"); return (status); } //инициализация членов структуры состояния драйвера mutex_lock(&device_lockk); adg739_status->users = 0; adg739_status->spi = spi; spi->bits_per_word = 16; spi->max_speed_hz = 700000; spin_lock_init(&adg739_status->spi_lock); memset(adg739_status->buffer, 0, sizeof(adg739_status->buffer)); spi_set_drvdata(spi, adg739_status); //присваевает указателю spi->dev->driver_data значение adg739_status mutex_unlock(&device_lockk); return (0); }
/************************************************************* *First message: * write cmd | ADDR_H |ADDR_L | * 1B | 1B | 1B | *Second message: * read cmd | data stream | * 1B | length | * * read buffer length should be 1 + 1 + 1 + 1 + data_length **************************************************************/ int gf_spi_read_bytes(struct gf_dev *gf_dev, u16 addr, u32 data_len, u8 *rx_buf) { #ifdef SPI_ASYNC DECLARE_COMPLETION_ONSTACK(write_done); #endif struct spi_message msg; struct spi_transfer *xfer = NULL; int ret = 0; xfer = kzalloc(sizeof(*xfer) << 1, GFP_KERNEL);/* two messages */ if (xfer == NULL) { GF_LOG_ERROR("failed to kzalloc for command\n"); return -ENOMEM; } /* send gf command to device */ spi_message_init(&msg); rx_buf[0] = GF_W; rx_buf[1] = (u8)((addr >> 8) & 0xFF); rx_buf[2] = (u8)(addr & 0xFF); xfer[0].tx_buf = rx_buf; xfer[0].len = 3; xfer[0].delay_usecs = 5; spi_message_add_tail(&xfer[0], &msg); /*if wanted to read data from gf. Should write Read command to device before read any data from device. */ ret = spi_sync(gf_dev->spi, &msg); if (ret) { GF_LOG_ERROR("failed to spi_sync = %d\n", ret); goto err_gf_w; } spi_message_init(&msg); rx_buf[4] = GF_R; xfer[1].tx_buf = &rx_buf[4]; xfer[1].rx_buf = &rx_buf[4]; xfer[1].len = data_len + 1; xfer[1].delay_usecs = 5; spi_message_add_tail(&xfer[1], &msg); #ifdef SPI_ASYNC msg.complete = gf_spi_complete; msg.context = &write_done; spin_lock_irq(&gf_dev->spi_lock); ret = spi_async(gf_dev->spi, &msg); spin_unlock_irq(&gf_dev->spi_lock); if (ret == 0) wait_for_completion(&write_done); else GF_LOG_ERROR("ret = %d\n", ret); #else ret = spi_sync(gf_dev->spi, &msg); #endif err_gf_w: if (xfer) { kfree(xfer); xfer = NULL; } return ret; }
int ssp_spi_write_sync(struct spi_device *spi, const u8 *addr, const int len) { int ret; #if defined(CHANGE_ENDIAN) u8 buf[8] = {0}; #endif struct spi_message msg; struct spi_transfer xfer = { .len = len, #if !defined(CHANGE_ENDIAN) .tx_buf = addr, /*QCTK ALRAN QUP_CONFIG 0-4 bits BIG ENDIAN*/ .bits_per_word = 8, #else .tx_buf = buf, #endif }; #if defined(CHANGE_ENDIAN) buf[0] = addr[3]; buf[1] = addr[2]; buf[2] = addr[1]; buf[3] = addr[0]; buf[4] = addr[7]; buf[5] = addr[6]; buf[6] = addr[5]; buf[7] = addr[4]; #endif spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(spi, &msg); if (ret < 0) ssp_log("error %d\n", ret); return ret; } int ssp_spi_read_sync(struct spi_device *spi, u8 *in_buf, size_t len) { int ret; u8 read_out_buf[2]; struct spi_message msg; struct spi_transfer xfer = { .tx_buf = read_out_buf, .rx_buf = in_buf, .len = len, .cs_change = 0, }; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(spi, &msg); if (ret < 0) ssp_log("%s - error %d\n", __func__, ret); return ret; } int ssp_spi_sync(struct spi_device *spi, u8 *out_buf, size_t out_len, u8 *in_buf) { int ret; struct spi_message msg; struct spi_transfer xfer = { .tx_buf = out_buf, .rx_buf = in_buf, .len = out_len, .cs_change = 0, }; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(spi, &msg); ssp_log("%s - received %d\n", __func__, xfer.len); if (ret < 0) ssp_log("%s - error %d\n", __func__, ret); return ret; } unsigned int g_flag_spirecv; void ssp_spi_async_complete(void *context) { g_flag_spirecv = 1; } int ssp_spi_async(struct spi_device *spi, u8 *out_buf, size_t out_len, u8 *in_buf) { int ret; struct spi_message msg; struct spi_transfer xfer = { .tx_buf = out_buf, .rx_buf = in_buf, .len = out_len, .cs_change = 0, }; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); msg.complete = ssp_spi_async_complete; ret = spi_async(spi, &msg); if (ret < 0) ssp_log("%s - error %d\n", __func__, ret); return ret; } int ssp_spi_read(struct spi_device *spi, u8 *buf, size_t len, const int rxSize) { int k; int ret = 0; u8 temp_buf[4] = {0}; u32 count = len/rxSize; u32 extra = len%rxSize; for (k = 0; k < count; k++) { ret = ssp_spi_read_sync(spi, &buf[rxSize*k], rxSize); if (ret < 0) { ssp_log("%s - error %d\n", __func__, ret); return -EINVAL; } } if (extra != 0) { ret = ssp_spi_read_sync(spi, &buf[rxSize*k], extra); if (ret < 0) { ssp_log("%s - error %d\n", __func__, ret); return -EINVAL; } } for (k = 0; k < len-3; k += 4) { memcpy(temp_buf, (char *)&buf[k], sizeof(temp_buf)); buf[k] = temp_buf[3]; buf[k+1] = temp_buf[2]; buf[k+2] = temp_buf[1]; buf[k+3] = temp_buf[0]; } return 0; }
int BcmSpiSyncMultTrans(struct spi_transfer *pSpiTransfer, int numTransfers, int busNum, int slaveId) { struct spi_message message; int status; struct spi_device *pSpiDevice; int i; if ( (slaveId > 7) || (busNum > 1) ) { printk(KERN_ERR "ERROR BcmSpiSyncTrans: invalid slave id (%d) or busNum (%d)\n", slaveId, busNum); return SPI_STATUS_ERR; } if ( LEG_SPI_BUS_NUM == busNum ) { #ifndef SPI return( SPI_STATUS_ERR ); #else if ( NULL == bcmLegSpiDevices[slaveId] ) { printk(KERN_ERR "ERROR BcmSpiSyncTrans: device not registered\n"); return SPI_STATUS_ERR; } pSpiDevice = bcmLegSpiDevices[slaveId]; #endif } else if ( HS_SPI_BUS_NUM == busNum ) { #ifndef HS_SPI return( SPI_STATUS_ERR ); #else if ( NULL == bcmHSSpiDevices[slaveId] ) { printk(KERN_ERR "ERROR BcmSpiSyncTrans: device not registered\n"); return SPI_STATUS_ERR; } pSpiDevice = bcmHSSpiDevices[slaveId]; #endif } else return( SPI_STATUS_ERR ); spi_message_init(&message); for ( i = 0; i < numTransfers; i++ ) { spi_message_add_tail(&pSpiTransfer[i], &message); } /* the controller does not support asynchronous transfer when spi_async returns the transfer will be complete don't use spi_sync to avoid the call to schedule */ status = spi_async(pSpiDevice, &message); if (status >= 0) { status = SPI_STATUS_OK; } else { status = SPI_STATUS_ERR; } return( status ); }
int BcmSpiSyncTrans(unsigned char *txBuf, unsigned char *rxBuf, int prependcnt, int nbytes, int busNum, int slaveId) { struct spi_message message; struct spi_transfer xfer; int status; struct spi_device *pSpiDevice; if ( slaveId > 7 ) { printk(KERN_ERR "ERROR BcmSpiSyncTrans: invalid slave id %d\n", slaveId); return SPI_STATUS_ERR; } if ( LEG_SPI_BUS_NUM == busNum ) { #ifndef SPI return( SPI_STATUS_ERR ); #else if ( NULL == bcmLegSpiDevices[slaveId] ) { printk(KERN_ERR "ERROR BcmSpiSyncTrans: device not registered\n"); return SPI_STATUS_ERR; } pSpiDevice = bcmLegSpiDevices[slaveId]; #endif } else if ( HS_SPI_BUS_NUM == busNum ) { #ifndef HS_SPI return( SPI_STATUS_ERR ); #else if ( NULL == bcmHSSpiDevices[slaveId] ) { printk(KERN_ERR "ERROR BcmSpiSyncTrans: device not registered\n"); return SPI_STATUS_ERR; } pSpiDevice = bcmHSSpiDevices[slaveId]; #endif } else return( SPI_STATUS_ERR ); spi_message_init(&message); memset(&xfer, 0, (sizeof xfer)); xfer.prepend_cnt = prependcnt; xfer.len = nbytes; xfer.speed_hz = pSpiDevice->max_speed_hz; xfer.rx_buf = rxBuf; xfer.tx_buf = txBuf; spi_message_add_tail(&xfer, &message); /* the controller does not support asynchronous transfer when spi_async returns the transfer will be complete don't use spi_sync to avoid the call to schedule */ status = spi_async(pSpiDevice, &message); if (status >= 0) { status = SPI_STATUS_OK; } else { status = SPI_STATUS_ERR; } return( status ); }
static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec) { struct spi_device *spi = to_spi_device(codec->dev); struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec); struct list_head xfer_list; struct wm0010_boot_xfer *xfer; int ret; struct completion done; const struct firmware *fw; const struct dfw_binrec *rec; const struct dfw_inforec *inforec; u64 *img; u8 *out, dsp; u32 len, offset; INIT_LIST_HEAD(&xfer_list); ret = request_firmware(&fw, name, codec->dev); if (ret != 0) { dev_err(codec->dev, "Failed to request application(%s): %d\n", name, ret); return ret; } rec = (const struct dfw_binrec *)fw->data; inforec = (const struct dfw_inforec *)rec->data; offset = 0; dsp = inforec->dsp_target; wm0010->boot_failed = false; if (WARN_ON(!list_empty(&xfer_list))) return -EINVAL; init_completion(&done); /* First record should be INFO */ if (rec->command != DFW_CMD_INFO) { dev_err(codec->dev, "First record not INFO\r\n"); ret = -EINVAL; goto abort; } if (inforec->info_version != INFO_VERSION) { dev_err(codec->dev, "Unsupported version (%02d) of INFO record\r\n", inforec->info_version); ret = -EINVAL; goto abort; } dev_dbg(codec->dev, "Version v%02d INFO record found\r\n", inforec->info_version); /* Check it's a DSP file */ if (dsp != DEVICE_ID_WM0010) { dev_err(codec->dev, "Not a WM0010 firmware file.\r\n"); ret = -EINVAL; goto abort; } /* Skip the info record as we don't need to send it */ offset += ((rec->length) + 8); rec = (void *)&rec->data[rec->length]; while (offset < fw->size) { dev_dbg(codec->dev, "Packet: command %d, data length = 0x%x\r\n", rec->command, rec->length); len = rec->length + 8; xfer = kzalloc(sizeof(*xfer), GFP_KERNEL); if (!xfer) { ret = -ENOMEM; goto abort; } xfer->codec = codec; list_add_tail(&xfer->list, &xfer_list); out = kzalloc(len, GFP_KERNEL | GFP_DMA); if (!out) { ret = -ENOMEM; goto abort1; } xfer->t.rx_buf = out; img = kzalloc(len, GFP_KERNEL | GFP_DMA); if (!img) { ret = -ENOMEM; goto abort1; } xfer->t.tx_buf = img; byte_swap_64((u64 *)&rec->command, img, len); spi_message_init(&xfer->m); xfer->m.complete = wm0010_boot_xfer_complete; xfer->m.context = xfer; xfer->t.len = len; xfer->t.bits_per_word = 8; if (!wm0010->pll_running) { xfer->t.speed_hz = wm0010->sysclk / 6; } else { xfer->t.speed_hz = wm0010->max_spi_freq; if (wm0010->board_max_spi_speed && (wm0010->board_max_spi_speed < wm0010->max_spi_freq)) xfer->t.speed_hz = wm0010->board_max_spi_speed; } /* Store max usable spi frequency for later use */ wm0010->max_spi_freq = xfer->t.speed_hz; spi_message_add_tail(&xfer->t, &xfer->m); offset += ((rec->length) + 8); rec = (void *)&rec->data[rec->length]; if (offset >= fw->size) { dev_dbg(codec->dev, "All transfers scheduled\n"); xfer->done = &done; } ret = spi_async(spi, &xfer->m); if (ret != 0) { dev_err(codec->dev, "Write failed: %d\n", ret); goto abort1; } if (wm0010->boot_failed) { dev_dbg(codec->dev, "Boot fail!\n"); ret = -EINVAL; goto abort1; } } wait_for_completion(&done); ret = 0; abort1: while (!list_empty(&xfer_list)) { xfer = list_first_entry(&xfer_list, struct wm0010_boot_xfer, list); kfree(xfer->t.rx_buf); kfree(xfer->t.tx_buf); list_del(&xfer->list); kfree(xfer); } abort: release_firmware(fw); return ret; }
/* * This function is called by the SPI framework after the coordinates * have been read from TSC2005 */ static void tsc2005_ts_rx(void *arg) { struct tsc2005 *ts = arg; unsigned long flags; int inside_rect, pressure_limit; int x, y, z1, z2, pressure; spin_lock_irqsave(&ts->lock, flags); if (ts->disable_depth) { ts->spi_pending = 0; goto out; } x = ts->data[0]; y = ts->data[1]; z1 = ts->data[2]; z2 = ts->data[3]; /* validate pressure and position */ if (x > MAX_12BIT || y > MAX_12BIT) goto out; /* skip coords if the pressure-components are out of range */ if (z1 < 100 || z2 > MAX_12BIT || z1 >= z2) goto out; /* skip point if this is a pen down with the exact same values as * the value before pen-up - that implies SPI fed us stale data */ if (!ts->pen_down && ts->in_x == x && ts->in_y == y && ts->in_z1 == z1 && ts->in_z2 == z2) goto out; /* At this point we are happy we have a valid and useful reading. * Remember it for later comparisons. We may now begin downsampling */ ts->in_x = x; ts->in_y = y; ts->in_z1 = z1; ts->in_z2 = z2; /* don't run average on the "pen down" event */ if (ts->sample_sent) { ts->avg_x += x; ts->avg_y += y; ts->avg_z1 += z1; ts->avg_z2 += z2; if (++ts->sample_cnt < TS_SAMPLES) goto out; x = ts->avg_x / TS_SAMPLES; y = ts->avg_y / TS_SAMPLES; z1 = ts->avg_z1 / TS_SAMPLES; z2 = ts->avg_z2 / TS_SAMPLES; } ts->sample_cnt = 0; ts->avg_x = 0; ts->avg_y = 0; ts->avg_z1 = 0; ts->avg_z2 = 0; pressure = x * (z2 - z1) / z1; pressure = pressure * ts->x_plate_ohm / 4096; pressure_limit = ts->sample_sent? ts->p_max: ts->touch_pressure; if (pressure > pressure_limit) goto out; /* Discard the event if it still is within the previous rect - * unless the pressure is clearly harder, but then use previous * x,y position. If any coordinate deviates enough, fudging * of all three will still take place in the input layer. */ inside_rect = (ts->sample_sent && x > (int)ts->out_x - ts->fudge_x && x < (int)ts->out_x + ts->fudge_x && y > (int)ts->out_y - ts->fudge_y && y < (int)ts->out_y + ts->fudge_y); if (inside_rect) x = ts->out_x, y = ts->out_y; if (!inside_rect || pressure < (ts->out_p - ts->fudge_p)) { tsc2005_ts_update_pen_state(ts, x, y, pressure); ts->sample_sent = 1; ts->out_x = x; ts->out_y = y; ts->out_p = pressure; } out: if (ts->spi_pending > 1) { /* One or more interrupts (sometimes several dozens) * occured while waiting for the SPI read - get * another read going. */ ts->spi_pending = 1; if (spi_async(ts->spi, &ts->read_msg)) { dev_err(&ts->spi->dev, "ts: spi_async() failed"); ts->spi_pending = 0; } } else ts->spi_pending = 0; /* kick pen up timer - to make sure it expires again(!) */ if (ts->sample_sent) { mod_timer(&ts->penup_timer, jiffies + msecs_to_jiffies(TSC2005_TS_PENUP_TIME)); /* Also kick the watchdog, as we still think we're alive */ if (ts->esd_timeout && ts->disable_depth == 0) { unsigned long wdj = msecs_to_jiffies(ts->esd_timeout); mod_timer(&ts->esd_timer, round_jiffies(jiffies+wdj)); } } spin_unlock_irqrestore(&ts->lock, flags); }