int mh1_fetch_shading_tbl(struct spich_data *spich, uint8_t *table, int size)
{
	int error = 0;
	struct spi_message msg;

	printk("%s : Enter!! \n", __func__);

	if (!error) {

		struct spi_transfer cmd2 = {	//send frimware
			.cs_change = 1,
			.delay_usecs = 0,
			.speed_hz = (u32)spich->spi_freq_mhz,
			.tx_buf = table,
			.rx_buf = NULL,
			.len	= (int)size,
			.tx_dma = 0,
			.rx_dma = 0,
			.bits_per_word = 0,
		};

		mutex_lock(&spich->buf_lock);

		mh1_spi_write_set(spich);

		spi_message_init(&msg);
		spi_message_add_tail(&cmd2,  &msg);
		error = spi_sync(spich->spi, &msg);
		if (error)
			dev_err(&spich->spi->dev, "spi_sync failed.\n");

		mutex_unlock(&spich->buf_lock);

	}

	printk("%s done\n", __func__);

	return error;

}

int mh1_fetch_image_from_sd(struct spich_data *spich)
{
	int error = 0;
	struct spi_message msg;
	struct file *fp = NULL;

	mm_segment_t old_fs = get_fs();

	long fsize, nread;
	uint8_t *buf_mh1 = NULL;

	pr_err("%s : Enter!! \n", __func__);

	set_fs(KERNEL_DS);

	fp = filp_open("/system/media/RS_MH1.BIN", O_RDONLY, 0);
	if (IS_ERR(fp)) {
		pr_err("failed to open %s, err %ld. load from kernel/firmware\n",
			"/system/media/RS_MH1.BIN", PTR_ERR(fp));

		error = mh1_fetch_image(mh1_spich);
		if (error){
			pr_err("%s : load failed!! \n", __func__);
			goto out;
		}else{
			pr_err("%s : load success from kernel/firmware!! \n", __func__);
			return error;
		}
	}

	fsize = fp->f_path.dentry->d_inode->i_size;

	buf_mh1 = kmalloc(fsize, GFP_KERNEL);
	if (!buf_mh1) {
		pr_err("failed to allocate memory\n");
		error = -ENOMEM;
		goto out;
	}

	nread = vfs_read(fp, (char __user *)buf_mh1, fsize, &fp->f_pos);
	if (nread != fsize) {
		pr_err("failed to read firmware file, %ld Bytes\n", nread);
		error = -EIO;
		goto out;
	}

	filp_close(fp, current->files);

	if (!error) {

		struct spi_transfer cmd2 = {	//send frimware
			.cs_change = 1,
			.delay_usecs = 0,
			.speed_hz = (u32)spich->spi_freq_mhz,
			.tx_buf = buf_mh1,
			.rx_buf = NULL,
			.len	= fsize,
			.tx_dma = 0,
			.rx_dma = 0,
			.bits_per_word = 0,
		};

		mutex_lock(&spich->buf_lock);

		mh1_spi_write_set(spich);

		spi_message_init(&msg);
		spi_message_add_tail(&cmd2,  &msg);
		error = spi_sync(spich->spi, &msg);
		if (error)
			dev_err(&spich->spi->dev, "spi_sync failed.\n");

		mutex_unlock(&spich->buf_lock);

	}

	pr_err("%s:mh1_fetch_image done\n", __func__);

out:
	if (buf_mh1)
		kfree(buf_mh1);

	if (!IS_ERR(fp))
		filp_close(fp, current->files);

	set_fs(old_fs);

	pr_err("X");

	return error;

}

int mh1_fetch_image(struct spich_data *spich)
{
	int error = 0;
	struct spi_message msg;
	const struct firmware *fw_entry=NULL;

//	char tx_buffer[8] = {0x4, 0x7, 0x74, 0xe0, 0x1, 0x0, 0x0, 0x0}; // send frimware
//	char rx_buffer[1] = {0};

	if(strlen(mh1_inbuilt_fw_name_list) > 0)
	{
		error = request_firmware(&fw_entry,
				mh1_inbuilt_fw_name_list,
				&spich->spi->dev);
		if (error != 0)
		{
			printk( "%s: Firmware image %s not available\n", __func__, mh1_inbuilt_fw_name_list);
			return 1;
		}
	}

	printk("MH1 Firmware image size = %zu\n", fw_entry->size);

	if (!error) {
/*
		struct spi_transfer cmd1 = {	//send frimware
			.cs_change = 1,
			.delay_usecs = 0,
			.speed_hz = (u32)spich->spi_freq_mhz,
			.tx_buf = tx_buffer,
			.rx_buf = NULL,
			.len    = 8,
			.tx_dma = 0,
			.rx_dma = 0,
			.bits_per_word = 0,
		};
*/
		struct spi_transfer cmd2 = {	//send frimware
			.cs_change = 1,
			.delay_usecs = 0,
			.speed_hz = (u32)spich->spi_freq_mhz,
			.tx_buf =(u8*) fw_entry->data,
			.rx_buf = NULL,
			.len	= (int)fw_entry->size,
			.tx_dma = 0,
			.rx_dma = 0,
			.bits_per_word = 0,
		};

/*
		struct spi_transfer data = {
			.cs_change = 1,
			.delay_usecs = 0,
			.speed_hz = (u32)spich->spi_freq_mhz,
			.tx_buf = NULL,
			.rx_buf = rx_buffer,
			.len    = 1,
			.tx_dma = 0,
			.rx_dma = 0,
			.bits_per_word = 0,
		};
*/
		mutex_lock(&spich->buf_lock);

//Send Firmware
/*
		mh1_spi_write_set(spich);

		spi_message_init(&msg);
		spi_message_add_tail(&cmd1,  &msg);

		error = spi_sync(spich->spi, &msg);
		if (error)
			dev_err(&spich->spi->dev, "spi_sync failed.\n");

		mh1_spi_read_set(spich);

		spi_message_init(&msg);
		spi_message_add_tail(&data,  &msg);
		error = spi_sync(spich->spi, &msg);
		if (error)
			dev_err(&spich->spi->dev, "spi_sync failed.\n");

		printk("MH1 rx_buffer = %d\n", rx_buffer[0]);
*/
// Send Firmware
		mh1_spi_write_set(spich);

		spi_message_init(&msg);
		spi_message_add_tail(&cmd2,  &msg);
		error = spi_sync(spich->spi, &msg);
		if (error)
			dev_err(&spich->spi->dev, "spi_sync failed.\n");

		mutex_unlock(&spich->buf_lock);

	}

	pr_err("%s:mh1_fetch_image done\n", __func__);

	if (fw_entry)
		release_firmware(fw_entry);

	return error;
}
#endif

void Spi_Cs_Configuration(int number)
{
	if(number==1){ //cs3, TDMB
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS0].gpio,0);
	#if defined (CONFIG_MACH_MSM8992_PPLUS_KR)
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS3].gpio,0);
	#endif
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS2].gpio,1);
	}
	else if(number==2){ //cs2, STM
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS0].gpio,0);
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS2].gpio,0);
	#if defined (CONFIG_MACH_MSM8992_PPLUS_KR)
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS3].gpio,1);
	#endif
	}
	else if(number==3){ //cs0, MH1
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS0].gpio,1);
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS2].gpio,1);
	#if defined (CONFIG_MACH_MSM8992_PPLUS_KR)
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS3].gpio,1);
	#endif
	}
	else if(number==4){ //default state
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS0].gpio,0);
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS2].gpio,1);
	#if defined (CONFIG_MACH_MSM8992_PPLUS_KR)
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_CS3].gpio,1);
	#endif
	}
}

EXPORT_SYMBOL(Spi_Cs_Configuration);




struct spich_data *spich_p;
const char *stm_inbuilt_fw_name_list;

void spi_transfer(u8 *tx_buf,u8 *rx_buf,int size)
{

	int error = 0;
	struct spi_message msg;
    struct spi_transfer cmd2 = {	//send frimware
		.cs_change = 1,
		.delay_usecs = 10,
		.speed_hz = 5*1024*1024,
		.tx_buf =(u8*)tx_buf,
		.rx_buf = (u8*)rx_buf,
		.len	= (int)size,
		.tx_dma = 0,
		.rx_dma = 0,
		.bits_per_word = 8,
	};
//	mutex_lock(&spich_p->buf_lock);
	spi_message_init(&msg);
	spi_message_add_tail(&cmd2,  &msg);
	error = spi_sync(spich_p->spi, &msg);
	if (error)
		dev_err(&spich_p->spi->dev, "spi_sync failed.\n");

//	mutex_unlock(&spich_p->buf_lock);

}


void  StmResetHub( uint8_t tmp) {
	int gpio_state[5] = {0,};
    printk("STM %s START status : %d \n",__func__,tmp);
	if(gpio_state[0] == 0){
	    gpio_set_value(spich_p->gpio_array[GPIO_IDX_LDOEN].gpio,1);
	    mdelay(spich_p->pre_reset_delay);
	}
	if(tmp==STM_SYSTEM) {
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_BOOT0].gpio,1);
	}
	else if(tmp == STM_RESET) {
		gpio_set_value(spich_p->gpio_array[GPIO_IDX_BOOT0].gpio,0);
	}
	else if(tmp == STM_SHUTDOWN) {
        if(spich_p->pre_reset_delay==0){
		    gpio_set_value(spich_p->gpio_array[GPIO_IDX_LDOEN].gpio,0);
        }
		return;
	}
	mdelay(spich_p->pre_reset_delay*50);// if under rev.A reset_delay is 20ms
	gpio_set_value(spich_p->gpio_array[GPIO_IDX_NRST].gpio, 0);
	mdelay(spich_p->pre_reset_delay+3);
	gpio_set_value(spich_p->gpio_array[GPIO_IDX_NRST].gpio, 1);

    printk("STM %s END status : %d \n",__func__,tmp);
	return;
}
Exemple #2
0
static int ads7846_read12_ser(struct device *dev, unsigned command)
{
	struct spi_device	*spi = to_spi_device(dev);
	struct ads7846		*ts = dev_get_drvdata(dev);
	struct ser_req		*req = kzalloc(sizeof *req, GFP_KERNEL);
	int			status;
	int			sample;
	int			use_internal;

	if (!req)
		return -ENOMEM;

	spi_message_init(&req->msg);

	/* FIXME boards with ads7846 might use external vref instead ... */
	use_internal = (ts->model == 7846);

	/* maybe turn on internal vREF, and let it settle */
	if (use_internal) {
		req->ref_on = REF_ON;
		req->xfer[0].tx_buf = &req->ref_on;
		req->xfer[0].len = 1;
		spi_message_add_tail(&req->xfer[0], &req->msg);

		req->xfer[1].rx_buf = &req->scratch;
		req->xfer[1].len = 2;

		/* for 1uF, settle for 800 usec; no cap, 100 usec.  */
		req->xfer[1].delay_usecs = ts->vref_delay_usecs;
		spi_message_add_tail(&req->xfer[1], &req->msg);
	}

	/* take sample */
	req->command = (u8) command;
	req->xfer[2].tx_buf = &req->command;
	req->xfer[2].len = 1;
	spi_message_add_tail(&req->xfer[2], &req->msg);

	req->xfer[3].rx_buf = &req->sample;
	req->xfer[3].len = 2;
	spi_message_add_tail(&req->xfer[3], &req->msg);

	/* REVISIT:  take a few more samples, and compare ... */

	/* converter in low power mode & enable PENIRQ */
	req->ref_off = PWRDOWN;
	req->xfer[4].tx_buf = &req->ref_off;
	req->xfer[4].len = 1;
	spi_message_add_tail(&req->xfer[4], &req->msg);

	req->xfer[5].rx_buf = &req->scratch;
	req->xfer[5].len = 2;
	CS_CHANGE(req->xfer[5]);
	spi_message_add_tail(&req->xfer[5], &req->msg);

	ts->irq_disabled = 1;
	disable_irq(spi->irq);
	status = spi_sync(spi, &req->msg);
	ts->irq_disabled = 0;
	enable_irq(spi->irq);

	if (req->msg.status)
		status = req->msg.status;

	/* on-wire is a must-ignore bit, a BE12 value, then padding */
	sample = be16_to_cpu(req->sample);
	sample = sample >> 3;
	sample &= 0x0fff;

	kfree(req);
	return status ? status : sample;
}
Exemple #3
0
static int __devinit ad9834_probe(struct spi_device *spi)
{
	struct ad9834_platform_data *pdata = spi->dev.platform_data;
	struct ad9834_state *st;
	struct iio_dev *indio_dev;
	struct regulator *reg;
	int ret;

	if (!pdata) {
		dev_dbg(&spi->dev, "no platform data?\n");
		return -ENODEV;
	}

	reg = regulator_get(&spi->dev, "vcc");
	if (!IS_ERR(reg)) {
		ret = regulator_enable(reg);
		if (ret)
			goto error_put_reg;
	}

	indio_dev = iio_allocate_device(sizeof(*st));
	if (indio_dev == NULL) {
		ret = -ENOMEM;
		goto error_disable_reg;
	}
	spi_set_drvdata(spi, indio_dev);
	st = iio_priv(indio_dev);
	st->mclk = pdata->mclk;
	st->spi = spi;
	st->devid = spi_get_device_id(spi)->driver_data;
	st->reg = reg;
	indio_dev->dev.parent = &spi->dev;
	indio_dev->name = spi_get_device_id(spi)->name;
	indio_dev->info = &ad9834_info;
	indio_dev->modes = INDIO_DIRECT_MODE;

	/* Setup default messages */

	st->xfer.tx_buf = &st->data;
	st->xfer.len = 2;

	spi_message_init(&st->msg);
	spi_message_add_tail(&st->xfer, &st->msg);

	st->freq_xfer[0].tx_buf = &st->freq_data[0];
	st->freq_xfer[0].len = 2;
	st->freq_xfer[0].cs_change = 1;
	st->freq_xfer[1].tx_buf = &st->freq_data[1];
	st->freq_xfer[1].len = 2;

	spi_message_init(&st->freq_msg);
	spi_message_add_tail(&st->freq_xfer[0], &st->freq_msg);
	spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg);

	st->control = AD9834_B28 | AD9834_RESET;

	if (!pdata->en_div2)
		st->control |= AD9834_DIV2;

	if (!pdata->en_signbit_msb_out && (st->devid == ID_AD9834))
		st->control |= AD9834_SIGN_PIB;

	st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
	ret = spi_sync(st->spi, &st->msg);
	if (ret) {
		dev_err(&spi->dev, "device init failed\n");
		goto error_free_device;
	}

	ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0);
	if (ret)
		goto error_free_device;

	ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1);
	if (ret)
		goto error_free_device;

	ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0);
	if (ret)
		goto error_free_device;

	ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1);
	if (ret)
		goto error_free_device;

	ret = iio_device_register(indio_dev);
	if (ret)
		goto error_free_device;

	return 0;

error_free_device:
	iio_free_device(indio_dev);
error_disable_reg:
	if (!IS_ERR(reg))
		regulator_disable(reg);
error_put_reg:
	if (!IS_ERR(reg))
		regulator_put(reg);
	return ret;
}
static int stm32fwu_spi_write(struct spi_device *spi,
	const u8 *buffer, ssize_t len)
{
	int ret;
	u8 rx_buf[STM_MAX_BUFFER_SIZE] = {0,};
	struct spi_message m;
#if BYTETOBYTE_USED
	struct spi_transfer t[STM_MAX_BUFFER_SIZE];
	memset(t, 0, STM_MAX_BUFFER_SIZE * sizeof(struct spi_transfer));
	int i;
#else
	struct spi_transfer	t = {
		.tx_buf		= buffer,
		.rx_buf		= rx_buf,
		.len		= len,
		.bits_per_word = 8,
	};
#endif
	spi_message_init(&m);
#if BYTETOBYTE_USED
	for (i = 0; i < len; i++) {
		t[i].tx_buf = &buffer[i];
		t[i].rx_buf = &rx_buf[i];
		t[i].len = 1;
		t[i].bits_per_word = 8;
		t[i].delay_usecs = BYTE_DELAY_WRITE;
		spi_message_add_tail(&t[i], &m);
	}
#else
	spi_message_add_tail(&t, &m);
#endif
	ret = spi_sync(spi, &m);

	if (ret < 0) {
		pr_err("[SSP] Error in %d spi_write()\n", ret);
		return ret;
	}

	return len;
}

static int send_addr(struct spi_device *spi, u32 fw_addr, int send_short)
{
	int res;
	int i = send_short;
	int len = SEND_ADDR_LEN - send_short;
	u8 header[SEND_ADDR_LEN];
	struct stm32fwu_spi_cmd dummy_cmd;
	dummy_cmd.timeout = DEF_ACKROOF_NUMBER;


	header[0] = (u8)((fw_addr >> 24) & 0xFF);
	header[1] = (u8)((fw_addr >> 16) & 0xFF);
	header[2] = (u8)((fw_addr >> 8) & 0xFF);
	header[3] = (u8)(fw_addr & 0xFF);
	header[4] = header[0] ^ header[1] ^ header[2] ^ header[3];

	res = stm32fwu_spi_write(spi, &header[i], len);

	if (res <  len) {
		pr_err("[SSP] Error in sending address. Res  %d\n", res);
		return ((res > 0) ? -EIO : res);
	}

	res = stm32fwu_spi_wait_for_ack(spi, &dummy_cmd, BL_ACK);
	if (res != BL_ACK) {
		pr_err("[SSP] send_addr(): rcv_ack returned 0x%x\n",
			res);
		return res;
	}
	return 0;
}
Exemple #5
0
	struct spi_transfer xfers[] = {
		{
			.tx_buf = st->tx,
			.bits_per_word = 8,
			.len = 3,
		}
	};

	mutex_lock(&st->buf_lock);
	st->tx[0] = ADE7758_WRITE_REG(reg_address);
	st->tx[1] = (value >> 8) & 0xFF;
	st->tx[2] = value & 0xFF;

	spi_message_init(&msg);
	spi_message_add_tail(xfers, &msg);
	ret = spi_sync(st->us, &msg);
	mutex_unlock(&st->buf_lock);

	return ret;
}

static int ade7758_spi_write_reg_24(struct device *dev,
		u8 reg_address,
		u32 value)
{
	int ret;
	struct spi_message msg;
	struct iio_dev *indio_dev = dev_get_drvdata(dev);
	struct ade7758_state *st = iio_priv(indio_dev);
	struct spi_transfer xfers[] = {
		{
Exemple #6
0
static int ad5504_spi_read(struct spi_device *spi, u8 addr, u16 *val)
{
	u16 tmp = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr));
	int ret;
	struct spi_transfer	t = {
			.tx_buf		= &tmp,
			.rx_buf		= val,
			.len		= 2,
		};
	struct spi_message	m;

	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	ret = spi_sync(spi, &m);

	*val = be16_to_cpu(*val) & AD5504_RES_MASK;

	return ret;
}

static ssize_t ad5504_write_dac(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t len)
{
	struct iio_dev *indio_dev = dev_get_drvdata(dev);
	struct ad5504_state *st = iio_priv(indio_dev);
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
	long readin;
	int ret;

	ret = strict_strtol(buf, 10, &readin);
	if (ret)
		return ret;

	ret = ad5504_spi_write(st->spi, this_attr->address, readin);
	return ret ? ret : len;
}

static ssize_t ad5504_read_dac(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
{
	struct iio_dev *indio_dev = dev_get_drvdata(dev);
	struct ad5504_state *st = iio_priv(indio_dev);
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
	int ret;
	u16 val;

	ret = ad5504_spi_read(st->spi, this_attr->address, &val);
	if (ret)
		return ret;

	return sprintf(buf, "%d\n", val);
}

static ssize_t ad5504_read_powerdown_mode(struct device *dev,
				      struct device_attribute *attr, char *buf)
{
	struct iio_dev *indio_dev = dev_get_drvdata(dev);
	struct ad5504_state *st = iio_priv(indio_dev);

	const char mode[][14] = {"20kohm_to_gnd", "three_state"};

	return sprintf(buf, "%s\n", mode[st->pwr_down_mode]);
}

static ssize_t ad5504_write_powerdown_mode(struct device *dev,
				       struct device_attribute *attr,
				       const char *buf, size_t len)
{
	struct iio_dev *indio_dev = dev_get_drvdata(dev);
	struct ad5504_state *st = iio_priv(indio_dev);
	int ret;

	if (sysfs_streq(buf, "20kohm_to_gnd"))
		st->pwr_down_mode = AD5504_DAC_PWRDN_20K;
	else if (sysfs_streq(buf, "three_state"))
		st->pwr_down_mode = AD5504_DAC_PWRDN_3STATE;
	else
		ret = -EINVAL;

	return ret ? ret : len;
}

static ssize_t ad5504_read_dac_powerdown(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
{
	struct iio_dev *indio_dev = dev_get_drvdata(dev);
	struct ad5504_state *st = iio_priv(indio_dev);
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);

	return sprintf(buf, "%d\n",
			!(st->pwr_down_mask & (1 << this_attr->address)));
}

static ssize_t ad5504_write_dac_powerdown(struct device *dev,
					    struct device_attribute *attr,
					    const char *buf, size_t len)
{
	long readin;
	int ret;
	struct iio_dev *indio_dev = dev_get_drvdata(dev);
	struct ad5504_state *st = iio_priv(indio_dev);
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);

	ret = strict_strtol(buf, 10, &readin);
	if (ret)
		return ret;

	if (readin == 0)
		st->pwr_down_mask |= (1 << this_attr->address);
	else if (readin == 1)
		st->pwr_down_mask &= ~(1 << this_attr->address);
	else
		ret = -EINVAL;

	ret = ad5504_spi_write(st->spi, AD5504_ADDR_CTRL,
				AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
				AD5504_DAC_PWR(st->pwr_down_mask));

	/* writes to the CTRL register must be followed by a NOOP */
	ad5504_spi_write(st->spi, AD5504_ADDR_NOOP, 0);

	return ret ? ret : len;
}
Exemple #7
0
/**
 * cros_ec_command_spi_xfer - Transfer a message over SPI and receive the reply
 *
 * @ec_dev: ChromeOS EC device
 * @ec_msg: Message to transfer
 */
static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
				    struct cros_ec_msg *ec_msg)
{
	struct cros_ec_spi *ec_spi = ec_dev->priv;
	struct spi_transfer trans;
	struct spi_message msg;
	int i, len;
	u8 *ptr;
	int sum;
	int ret = 0, final_ret;
	struct timespec ts;

	/*
	 * We have the shared ec_dev buffer plus we do lots of separate spi_sync
	 * calls, so we need to make sure only one person is using this at a
	 * time.
	 */
	mutex_lock(&ec_spi->lock);

	len = cros_ec_prepare_tx(ec_dev, ec_msg);
	dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);

	/* If it's too soon to do another transaction, wait */
	if (ec_spi->last_transfer_ns) {
		struct timespec ts;
		unsigned long delay;	/* The delay completed so far */

		ktime_get_ts(&ts);
		delay = timespec_to_ns(&ts) - ec_spi->last_transfer_ns;
		if (delay < EC_SPI_RECOVERY_TIME_NS)
			ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
	}

	/* Transmit phase - send our message */
	debug_packet(ec_dev->dev, "out", ec_dev->dout, len);
	memset(&trans, 0, sizeof(trans));
	trans.tx_buf = ec_dev->dout;
	trans.len = len;
	trans.cs_change = 1;
	spi_message_init(&msg);
	spi_message_add_tail(&trans, &msg);
	ret = spi_sync(ec_spi->spi, &msg);

	/* Get the response */
	if (!ret) {
		ret = cros_ec_spi_receive_response(ec_dev,
				ec_msg->in_len + EC_MSG_TX_PROTO_BYTES);
	} else {
		dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
	}

	/* turn off CS */
	spi_message_init(&msg);

	if (ec_spi->end_of_msg_delay) {
		/*
		 * Add delay for last transaction, to ensure the rising edge
		 * doesn't come too soon after the end of the data.
		 */
		memset(&trans, 0, sizeof(trans));
		trans.delay_usecs = ec_spi->end_of_msg_delay;
		spi_message_add_tail(&trans, &msg);
	}

	final_ret = spi_sync(ec_spi->spi, &msg);
	ktime_get_ts(&ts);
	ec_spi->last_transfer_ns = timespec_to_ns(&ts);
	if (!ret)
		ret = final_ret;
	if (ret < 0) {
		dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
		goto exit;
	}

	/* check response error code */
	ptr = ec_dev->din;
	if (ptr[0]) {
		dev_warn(ec_dev->dev, "command 0x%02x returned an error %d\n",
			 ec_msg->cmd, ptr[0]);
		debug_packet(ec_dev->dev, "in_err", ptr, len);
		ret = -EINVAL;
		goto exit;
	}
	len = ptr[1];
	sum = ptr[0] + ptr[1];
	if (len > ec_msg->in_len) {
		dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
			len, ec_msg->in_len);
		ret = -ENOSPC;
		goto exit;
	}

	/* copy response packet payload and compute checksum */
	for (i = 0; i < len; i++) {
		sum += ptr[i + 2];
		if (ec_msg->in_len)
			ec_msg->in_buf[i] = ptr[i + 2];
	}
	sum &= 0xff;

	debug_packet(ec_dev->dev, "in", ptr, len + 3);

	if (sum != ptr[len + 2]) {
		dev_err(ec_dev->dev,
			"bad packet checksum, expected %02x, got %02x\n",
			sum, ptr[len + 2]);
		ret = -EBADMSG;
		goto exit;
	}

	ret = 0;
exit:
	mutex_unlock(&ec_spi->lock);
	return ret;
}
int tdmb_fc8050_spi_write_read(uint8* tx_data, int tx_length, uint8 *rx_data, int rx_length)
{
	int rc;

	struct spi_transfer	t = {
			.tx_buf		= tx_data,
			.rx_buf		= rx_data,
			.len		= tx_length+rx_length,
		};

	struct spi_message	m;	

	if (fc8050_ctrl_info.spi_ptr == NULL)
	{
		printk("tdmb_fc8050_spi_write_read error txdata=0x%x, length=%d\n", (unsigned int)tx_data, tx_length+rx_length);
	}

	mutex_lock(&fc8050_ctrl_info.mutex);

	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	rc = spi_sync(fc8050_ctrl_info.spi_ptr, &m);
	if ( rc < 0 )
	{
		printk("tdmb_fc8050_spi_read_burst result(%d), actual_len=%d\n",rc, m.actual_length);
	}

	mutex_unlock(&fc8050_ctrl_info.mutex);

	return TRUE;
}

#ifdef FEATURE_DMB_USE_WORKQUEUE
static irqreturn_t broadcast_tdmb_spi_isr(int irq, void *handle)
{
	struct tdmb_fc8050_ctrl_blk* fc8050_info_p;
	unsigned long flag;

	fc8050_info_p = (struct tdmb_fc8050_ctrl_blk *)handle;	
	if ( fc8050_info_p && fc8050_info_p->TdmbPowerOnState )
	{
		if (fc8050_info_p->spi_irq_status)
		{			
			printk("######### spi read function is so late skip #########\n");			
			return IRQ_HANDLED;
		}		
//		printk("***** broadcast_tdmb_spi_isr coming *******\n");
		spin_lock_irqsave(&fc8050_info_p->spin_lock, flag);
		queue_work(fc8050_info_p->spi_wq, &fc8050_info_p->spi_work);
		spin_unlock_irqrestore(&fc8050_info_p->spin_lock, flag);    
	}
	else
	{
		printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
	}

	return IRQ_HANDLED; 
}

static void broacast_tdmb_spi_work(struct work_struct *tdmb_work)
{
	struct tdmb_fc8050_ctrl_blk *pTdmbWorkData;

	pTdmbWorkData = container_of(tdmb_work, struct tdmb_fc8050_ctrl_blk, spi_work);
	if ( pTdmbWorkData )
	{
//		printk("broadcast_tdmb_spi_work START\n");
		fc8050_isr_control(0);
		pTdmbWorkData->spi_irq_status = TRUE;
		broadcast_drv_if_isr();
		pTdmbWorkData->spi_irq_status = FALSE;
		fc8050_isr_control(1);
//		printk("broadcast_tdmb_spi_work END\n");
//		printk("broacast_tdmb_spi_work is called handle=0x%x\n", (unsigned int)pTdmbWorkData);
	}
	else
	{
		printk("~~~~~~~broadcast_tdmb_spi_work call but pTdmbworkData is NULL ~~~~~~~\n");
	}
}
#else
static irqreturn_t broadcast_tdmb_spi_event_handler(int irq, void *handle)
{
	struct tdmb_fc8050_ctrl_blk *fc8050_info_p;

	fc8050_info_p = (struct tdmb_fc8050_ctrl_blk *)handle;
	if ( fc8050_info_p && fc8050_info_p->TdmbPowerOnState )
	{
		if (fc8050_info_p->spi_irq_status)
		{
			printk("######### spi read function is so late skip ignore #########\n");
			return IRQ_HANDLED;
		}

		fc8050_isr_control(0);
		fc8050_info_p->spi_irq_status = TRUE;
		broadcast_drv_if_isr();
		fc8050_info_p->spi_irq_status = FALSE;
		fc8050_isr_control(1);

	}
	else
	{
		printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
	}
	return IRQ_HANDLED;
}
#endif

static int broadcast_tdmb_fc8050_probe(struct spi_device *spi)
{
	int rc;
	
#ifdef ANTENNA_SWITCHING
	struct pm_gpio GPIO11_CFG = {
				.direction      = PM_GPIO_DIR_OUT,
				.pull           = PM_GPIO_PULL_NO,
				.function       = PM_GPIO_FUNC_NORMAL,
				.vin_sel        = 2,	/* for ESD TEST in I-pjt */
				.inv_int_pol    = 0,	
				};
	struct pm_gpio GPIO12_CFG = {
				.direction      = PM_GPIO_DIR_OUT,
				.pull           = PM_GPIO_PULL_NO,
				.function       = PM_GPIO_FUNC_NORMAL,
				.vin_sel        = 2,	/* for ESD TEST in I-pjt */
				.inv_int_pol    = 0,			
				};	
#endif  /* ANTENNA_SWITCHING */        

	fc8050_ctrl_info.TdmbPowerOnState = FALSE;
	
	fc8050_ctrl_info.spi_ptr 				= spi;
	fc8050_ctrl_info.spi_ptr->mode 			= SPI_MODE_0;
	fc8050_ctrl_info.spi_ptr->bits_per_word 	= 8;
	fc8050_ctrl_info.spi_ptr->max_speed_hz 	= ( 24000*1000 );
	rc = spi_setup(spi);
	printk("broadcast_tdmb_fc8050_probe spi_setup=%d\n", rc);
	BBM_HOSTIF_SELECT(NULL, 1);
	
#ifdef FEATURE_DMB_USE_WORKQUEUE
	INIT_WORK(&fc8050_ctrl_info.spi_work, broacast_tdmb_spi_work);
	fc8050_ctrl_info.spi_wq = create_singlethread_workqueue("tdmb_spi_wq");
	if(fc8050_ctrl_info.spi_wq == NULL){
		printk("Failed to setup tdmb spi workqueue \n");
		return -ENOMEM;
	}
#endif
#ifdef FEATURE_DMB_USE_WORKQUEUE
	rc = request_irq(spi->irq, broadcast_tdmb_spi_isr, IRQF_DISABLED | IRQF_TRIGGER_FALLING, spi->dev.driver->name, &fc8050_ctrl_info);
#else
	rc = request_threaded_irq(spi->irq, NULL, broadcast_tdmb_spi_event_handler, IRQF_DISABLED | IRQF_TRIGGER_FALLING,
		spi->dev.driver->name, &fc8050_ctrl_info);
#endif
	printk("broadcast_tdmb_fc8050_probe request_irq=%d\n", rc);

	gpio_request(101, "DMB_RESET_N");
	gpio_request(102, "DMB_EN");
	gpio_request(107, "DMB_INT_N");
	gpio_direction_output(DMB_RESET_N, false);      
	gpio_direction_output(DMB_EN, false);               
	gpio_direction_output(DMB_INT_N, false);           

#ifdef ANTENNA_SWITCHING
	pm8xxx_gpio_config(DMB_ANT_SEL_P, &GPIO11_CFG);
	pm8xxx_gpio_config(DMB_ANT_SEL_N, &GPIO12_CFG);
	gpio_set_value_cansleep(DMB_ANT_SEL_P, 1);	/* for ESD TEST in I-pjt */
	gpio_set_value_cansleep(DMB_ANT_SEL_N, 0);	/* for ESD TEST in I-pjt */
#endif  /* ANTENNA_SWITCHING */
	tdmb_fc8050_interrupt_lock();

	mutex_init(&fc8050_ctrl_info.mutex);

	wake_lock_init(&fc8050_ctrl_info.wake_lock,  WAKE_LOCK_SUSPEND, dev_name(&spi->dev));		
	spin_lock_init(&fc8050_ctrl_info.spin_lock);

#ifdef PM_QOS
	pm_qos_add_request(&fc8050_ctrl_info.pm_req_list, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
#endif  /* PM_QOS */

	printk("broadcast_fc8050_probe End\n");

	return rc;
}

static int broadcast_tdmb_fc8050_remove(struct spi_device *spi)
{
	printk("broadcast_tdmb_fc8050_remove \n");
#ifdef FEATURE_DMB_USE_WORKQUEUE

	if (fc8050_ctrl_info.spi_wq)
	{
		flush_workqueue(fc8050_ctrl_info.spi_wq);
		destroy_workqueue(fc8050_ctrl_info.spi_wq);
	}
#endif
	free_irq(spi->irq, &fc8050_ctrl_info);

	mutex_destroy(&fc8050_ctrl_info.mutex);

	wake_lock_destroy(&fc8050_ctrl_info.wake_lock);

#ifdef PM_QOS
	pm_qos_remove_request(&fc8050_ctrl_info.pm_req_list);
#endif  /* PM_QOS */
	memset((unsigned char*)&fc8050_ctrl_info, 0x0, sizeof(struct tdmb_fc8050_ctrl_blk));
	return 0;
}

static int broadcast_tdmb_fc8050_suspend(struct spi_device *spi, pm_message_t mesg)
{
	printk("broadcast_tdmb_fc8050_suspend \n");
	return 0;
}
Exemple #9
0
static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
				size_t len, bool fixed)
{
	struct spi_transfer t[2];
	struct spi_message m;
	u32 *busy_buf;
	u32 *cmd;
	u32 chunk_len;

	while (len > 0) {
		chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);

		cmd = &wl->buffer_cmd;
		busy_buf = wl->buffer_busyword;

		*cmd = 0;
		*cmd |= WSPI_CMD_READ;
		*cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) &
			WSPI_CMD_BYTE_LENGTH;
		*cmd |= addr & WSPI_CMD_BYTE_ADDR;

		if (fixed)
			*cmd |= WSPI_CMD_FIXED;

		spi_message_init(&m);
		memset(t, 0, sizeof(t));

		t[0].tx_buf = cmd;
		t[0].len = 4;
		t[0].cs_change = true;
		spi_message_add_tail(&t[0], &m);

		/* Busy and non busy words read */
		t[1].rx_buf = busy_buf;
		t[1].len = WL1271_BUSY_WORD_LEN;
		t[1].cs_change = true;
		spi_message_add_tail(&t[1], &m);

		spi_sync(wl_to_spi(wl), &m);

		if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
		    wl1271_spi_read_busy(wl)) {
			memset(buf, 0, chunk_len);
			return;
		}

		spi_message_init(&m);
		memset(t, 0, sizeof(t));

		t[0].rx_buf = buf;
		t[0].len = chunk_len;
		t[0].cs_change = true;
		spi_message_add_tail(&t[0], &m);

		spi_sync(wl_to_spi(wl), &m);

		wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
		wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);

		if (!fixed)
			addr += chunk_len;
		buf += chunk_len;
		len -= chunk_len;
	}
}
static int32_t spi_write_table_parsepage(CODEC_SPI_CMD *cmds, int num)
{
	int i;
	int bulk_counter;
	int status = 0;
	struct spi_message	m;
	struct spi_transfer	tx_addr;
	bool is_page_zero = false;
	unsigned char page_select = 0x00;
	unsigned char book_select = 0x7F;
	unsigned int reg_long1, reg_long2;

	if (codec_spi_dev == NULL) {
		status = -ESHUTDOWN;
		return status;
	}

	i = 0;

	while (i < num - 1) {
		if (cmds[i].reg == book_select && is_page_zero) {
			/* select book */
			codec_spi_write(cmds[i].reg, cmds[i].data, false);
			i++;
		} else if (cmds[i].reg == page_select) {
			/* select page */
			if (cmds[i].data == 0x00) is_page_zero = true;
			else is_page_zero = false;
			codec_spi_write(cmds[i].reg, cmds[i].data, false);
			i++;
		} else {
			spi_message_init(&m);
			memset(bulk_tx, 0, MINIDSP_COL_MAX * 2 * \
				sizeof(uint8_t));
			memset(&tx_addr, 0, sizeof(struct spi_transfer));

			bulk_counter = 0;
			bulk_tx[bulk_counter] = cmds[i].reg << 1;
			bulk_tx[bulk_counter + 1] = cmds[i].data;
			bulk_counter += 2;

			do {
				reg_long1 = (unsigned int)cmds[i].reg;
				reg_long2 = (unsigned int)cmds[i+1].reg;
				if (reg_long2 == (reg_long1+1)) {
					bulk_tx[bulk_counter] = cmds[i+1].data;
					bulk_counter++;
				}
				i++;
			} while (reg_long2 == (reg_long1+1) && i < num-1);

			/*int j = 0;
			AUD_DBG("bulk_write : start reg: 0x%02X\n", bulk_tx[j] >> 1);
			for (j = 1; j < bulk_counter; j++)
				AUD_DBG("bulk_write : data: 0x%02X\n", bulk_tx[j]);
			AUD_DBG("bulk_counter = %d, i = %d\n", bulk_counter, i);*/

			tx_addr.tx_buf = bulk_tx;
			tx_addr.len = (bulk_counter);
			tx_addr.cs_change = 1;
			tx_addr.bits_per_word = 8;
			spi_message_add_tail(&tx_addr, &m);
			status = spi_sync(codec_spi_dev, &m);
		}
	}

	return status;
}
Exemple #11
0
static ssize_t ad9852_set_parameter(struct device *dev,
					struct device_attribute *attr,
					const char *buf,
					size_t len)
{
	struct spi_message msg;
	struct spi_transfer xfer;
	int ret;
	struct ad9852_config *config = (struct ad9852_config *)buf;
	struct iio_dev *idev = dev_get_drvdata(dev);
	struct ad9852_state *st = idev->dev_data;

	xfer.len = 3;
	xfer.tx_buf = &config->phajst0[0];
	mutex_lock(&st->lock);

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 3;
	xfer.tx_buf = &config->phajst1[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 6;
	xfer.tx_buf = &config->fretun1[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 6;
	xfer.tx_buf = &config->fretun2[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 6;
	xfer.tx_buf = &config->dltafre[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 5;
	xfer.tx_buf = &config->updtclk[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 4;
	xfer.tx_buf = &config->ramprat[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 5;
	xfer.tx_buf = &config->control[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 3;
	xfer.tx_buf = &config->outpskm[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 2;
	xfer.tx_buf = &config->outpskr[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;

	xfer.len = 3;
	xfer.tx_buf = &config->daccntl[0];

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->sdev, &msg);
	if (ret)
		goto error_ret;
error_ret:
	mutex_unlock(&st->lock);

	return ret ? ret : len;
}
int etspi_mass_read(struct etspi_data *etspi, u8 addr, u8 *buf, int read_len)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	int status;
	struct spi_device *spi;
	struct spi_message m;
	u8 *write_addr = NULL, *read_data = NULL;
	/* Write and read data in one data query section */
	struct spi_transfer t_set_addr = {
		.tx_buf = NULL,
		.len = 2,
	};
	struct spi_transfer t_read_data = {
		.tx_buf = NULL,
		.rx_buf = NULL,
		.len = read_len + 3,
	};
	/* Set start address */

	read_data = kzalloc(read_len + 3, GFP_KERNEL);

	if (read_data == NULL)
		return -ENOMEM;

	write_addr = kzalloc(2, GFP_KERNEL);
	write_addr[0] = ET310_WRITE_ADDRESS;
	write_addr[1] = addr;

	t_set_addr.tx_buf = write_addr;
	t_read_data.tx_buf = t_read_data.rx_buf = read_data;

	pr_info("%s read_len = %d\n", __func__, read_len);

	read_data[0] = ET310_READ_DATA;

	spi = etspi->spi;

	spi_message_init(&m);
	spi_message_add_tail(&t_set_addr, &m);
	status = spi_sync(spi, &m);
	spi_message_init(&m);
	spi_message_add_tail(&t_read_data, &m);
	status = spi_sync(spi, &m);

	kfree(write_addr);

	if (status == 0)
		memcpy(buf, read_data + 3, read_len);
	else
		pr_err(KERN_ERR "%s read data error status = %d\n"
				, __func__, status);
	kfree(read_data);

	return status;
#endif
}

/* Read io register */
int etspi_io_read_register(struct etspi_data *etspi, u8 *addr, u8 *buf)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	int status = 0;
	struct spi_device *spi;
	struct spi_message m;
	int read_len = 1;

	u8 write_addr[] = {ET310_WRITE_ADDRESS, 0x00};
	u8 read_value[] = {ET310_READ_DATA, 0x00};
	u8 val, addrval;
	u8 result[] = {0xFF, 0xFF};

	struct spi_transfer t_set_addr = {
		.tx_buf = write_addr,
		.len = 2,
	};
	struct spi_transfer t = {
		.tx_buf = read_value,
		.rx_buf = result,
		.len = 2,
	};

	if (copy_from_user(&addrval, (const u8 __user *) (uintptr_t) addr
		, read_len)) {
		pr_err(KERN_ERR "%s buffer copy_from_user fail", __func__);
		status = -EFAULT;
		return status;
	}

	DEBUG_PRINT("%s read_len = %d", __func__, read_len);

	spi = etspi->spi;

	write_addr[1] = addrval;

	spi_message_init(&m);
	spi_message_add_tail(&t_set_addr, &m);
	status = spi_sync(spi, &m);
	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	status = spi_sync(spi, &m);
	if (status < 0) {
		pr_err(KERN_ERR "%s read data error status = %d\n"
				, __func__, status);
		return status;
	}

	val = result[1];


#ifdef ET310_SPI_DEBUG
	DEBUG_PRINT("%s address = %x buf = %x", __func__, addr, val);
#endif
	if (copy_to_user((u8 __user *) (uintptr_t) buf, &val, read_len)) {
		pr_err(KERN_ERR "%s buffer copy_to_user fail status", __func__);
		status = -EFAULT;
		return status;
	}

	return status;
#endif
}

/* Write data to register */
int etspi_io_write_register(struct etspi_data *etspi, u8 *buf)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	int status = 0;
	struct spi_device *spi;
	int write_len = 2;
	struct spi_message m;

	u8 write_addr[] = {ET310_WRITE_ADDRESS, 0x00};
	u8 write_value[] = {ET310_WRITE_DATA, 0x00};
	u8 val[2];

	struct spi_transfer t1 = {
		.tx_buf = write_addr,
		.len = 2,
	};
	struct spi_transfer t2 = {
		.tx_buf = write_value,
		.len = 2,
	};

	if (copy_from_user(val, (const u8 __user *) (uintptr_t) buf
		, write_len)) {
		pr_err(KERN_ERR "%s buffer copy_from_user fail", __func__);
		status = -EFAULT;
		return status;
	}

	DEBUG_PRINT("%s write_len = %d", __func__, write_len);

#ifdef ET310_SPI_DEBUG
	DEBUG_PRINT("%s address = %x data = %x", __func__, val[0], val[1]);
#endif

	spi = etspi->spi;

	write_addr[1] = val[0];
	write_value[1] = val[1];

	spi_message_init(&m);
	spi_message_add_tail(&t1, &m);
	status = spi_sync(spi, &m);
	spi_message_init(&m);
	spi_message_add_tail(&t2, &m);
	status = spi_sync(spi, &m);
	if (status < 0) {
		pr_err(KERN_ERR "%s read data error status = %d",
				__func__, status);
		return status;
	}

	return status;
#endif
}

int etspi_read_register(struct etspi_data *etspi, u8 addr, u8 *buf)
{
	int status;
	struct spi_device *spi;
	struct spi_message m;

	u8 write_addr[] = {ET310_WRITE_ADDRESS, addr};
	u8 read_value[] = {ET310_READ_DATA, 0x00};
	u8 result[] = {0xFF, 0xFF};

	struct spi_transfer t1 = {
		.tx_buf = write_addr,
		.len = 2,
	};
	struct spi_transfer t2 = {
		.tx_buf = read_value,
		.rx_buf	= result,
		.len = 2,
	};

	spi = etspi->spi;
	spi_message_init(&m);
	spi_message_add_tail(&t1, &m);
	status = spi_sync(spi, &m);
	spi_message_init(&m);
	spi_message_add_tail(&t2, &m);
	status = spi_sync(spi, &m);

	if (status == 0) {
		*buf = result[1];
		DEBUG_PRINT("et310_read_register address = %x result = %x %x\n"
					, addr, result[0], result[1]);
	} else
		pr_err(KERN_ERR "%s read data error status = %d\n"
				, __func__, status);

	return status;
}

int etspi_io_get_one_image(struct etspi_data *etspi, u8 *buf, u8 *image_buf)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	uint8_t read_val,
		*tx_buf = (uint8_t *)buf,
		*work_buf = NULL,
		*val = kzalloc(6, GFP_KERNEL);
	int status;
	uint32_t frame_not_ready_count = 0, read_count;

	pr_debug("%s\n", __func__);

	if (val == NULL)
		return -ENOMEM;

	if (copy_from_user(val, (const u8 __user *) (uintptr_t) tx_buf, 6)) {
		pr_err(KERN_ERR "%s buffer copy_from_user fail", __func__);
		status = -EFAULT;
		goto end;
	}
	read_count = val[0] * val[1];          /* total pixel , width * hight */

	while (1) {
		status = etspi_read_register
				(etspi, FSTATUS_ET310_ADDR, &read_val);
		if (status < 0)
			goto end;

		if (read_val & FRAME_READY_MASK)
			break;

		if (frame_not_ready_count >= 250) {
			pr_err(KERN_ERR "frame_not_ready_count = %d",
					frame_not_ready_count);
			status = -ETIME;
			goto end;
		}
		frame_not_ready_count++;
	}

	work_buf = kzalloc(read_count, GFP_KERNEL);
	if (work_buf == NULL) {
		status = -ENOMEM;
		goto end;
	}
	status = etspi_mass_read(etspi, FDATA_ET310_ADDR, work_buf, read_count);
	if (status < 0) {
		pr_err(KERN_ERR "%s call et310_mass_read error status = %d"
				, __func__, status);
		goto end;
	}

	if (copy_to_user((u8 __user *) (uintptr_t) image_buf,
		work_buf, read_count)) {
		pr_err(KERN_ERR "buffer copy_to_user fail status = %d", status);
		status = -EFAULT;
	}
end:
	kfree(val);
	kfree(work_buf);
	return status;
#endif
}
/*----------------------- EEPROM ------------------------*/

int etspi_eeprom_wren(struct etspi_data *etspi)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	int status = 0;
	struct spi_device *spi;
	struct spi_message m;

	u8 write_data[] = {FP_EEPROM_WREN_OP};
	struct spi_transfer t = {
		.tx_buf = write_data,
		.len = 1,
	};

	DEBUG_PRINT("%s opcode = %x\n", __func__, write_data[0]);

	spi = etspi->spi;

	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	status = spi_sync(spi, &m);
	if (status < 0) {
		pr_err("%s spi_sync error status = %d",
				__func__, status);
		return status;
	}

	return status;
#endif
}

int etspi_eeprom_wrdi(struct etspi_data *etspi)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	int status = 0;
	struct spi_device *spi;
	struct spi_message m;

	u8 write_data[] = {FP_EEPROM_WRDI_OP};
	struct spi_transfer t = {
		.tx_buf = write_data,
		.len = 1,
	};

	DEBUG_PRINT("%s opcode = %x\n", __func__, write_data[0]);

	spi = etspi->spi;

	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	status = spi_sync(spi, &m);
	if (status < 0) {
		pr_err("%s spi_sync error status = %d",
				__func__, status);
		return status;
	}

	return status;
#endif
}

int etspi_eeprom_rdsr(struct etspi_data *etspi, u8 *buf)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	int status;
	struct spi_device *spi;
	struct spi_message m;
	u8 val,
	   read_value[] = {FP_EEPROM_RDSR_OP, 0x00},
	   result[] = {0xFF, 0xFF};

	struct spi_transfer t = {
		.tx_buf = read_value,
		.rx_buf = result,
		.len = 2,
	};

	spi = etspi->spi;
	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	status = spi_sync(spi, &m);
	if (status < 0) {
		pr_err("%s spi_sync error status = %d",
				__func__, status);
		return status;
	}

	val = result[1];

	DEBUG_PRINT("%s address = %x buf = %x", __func__,
			FP_EEPROM_RDSR_OP, val);

	if (copy_to_user((u8 __user *) (uintptr_t) buf, &val, 1)) {
		pr_err("%s buffer copy_to_user fail status", __func__);
		status = -EFAULT;
		return status;
	}

	return status;
#endif
}

int etspi_eeprom_wrsr(struct etspi_data *etspi, u8 *buf)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	int status;
	struct spi_device *spi;
	struct spi_message m;
	u8 val;

	u8 write_data[] = {FP_EEPROM_WRSR_OP, 0x00};

	struct spi_transfer t = {
		.tx_buf = write_data,
		.len = 2,
	};

	if (copy_from_user(&val, (const u8 __user *) (uintptr_t) buf
		, 1)) {
		pr_err("%s buffer copy_from_user fail", __func__);
		status = -EFAULT;
		return status;
	}

	DEBUG_PRINT("%s data = %x", __func__, val);

	spi = etspi->spi;

	write_data[1] = val;

	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	status = spi_sync(spi, &m);
	if (status < 0) {
		pr_err("%s spi_sync error status = %d",
				__func__, status);
		return status;
	}

	return status;
#endif
}

int etspi_eeprom_read(struct etspi_data *etspi, u8 *addr, u8 *buf, int read_len)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	int status = 0;
	struct spi_device *spi;
	struct spi_message m;
	u8 addrval, *read_value = kzalloc(read_len + 2, GFP_KERNEL);

	struct spi_transfer t = {
		.tx_buf = NULL,
		.rx_buf = NULL,
		.len = read_len + 2,
	};

	if (read_value == NULL)
		return -ENOMEM;

	if (copy_from_user(&addrval, (const u8 __user *) (uintptr_t) addr
		, 1)) {
		pr_err("%s buffer copy_from_user fail", __func__);
		status = -EFAULT;
		goto exit;
	}

	DEBUG_PRINT("%s read_len = %d", __func__, read_len);
	DEBUG_PRINT("%s addrval = %x", __func__, addrval);

	spi = etspi->spi;

	read_value[0] = FP_EEPROM_READ_OP;
	read_value[1] = addrval;

	t.tx_buf = t.rx_buf = read_value;

	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	status = spi_sync(spi, &m);
	if (status < 0) {
		pr_err("%s spi_sync error status = %d\n"
				, __func__, status);
		goto exit;
	}

	if (copy_to_user((u8 __user *) (uintptr_t) buf,
				read_value + 2, read_len)) {
		pr_err("%s buffer copy_to_user fail status", __func__);
		status = -EFAULT;
		goto exit;
	}

exit:
	kfree(read_value);

	return status;
#endif
}

/*
 * buf - the data wrote to sensor with address info
 * write_len - the length of the data write to memory without address
 */
int etspi_eeprom_write(struct etspi_data *etspi, u8 *buf, int write_len)
{
#ifdef ENABLE_SENSORS_FPRINT_SECURE
	return 0;
#else
	int status = 0;
	struct spi_device *spi;
	struct spi_message m;

	u8 *write_value = kzalloc(write_len + 2, GFP_KERNEL);

	struct spi_transfer t = {
		.tx_buf = NULL,
		.len = write_len + 2,
	};

	if (write_value == NULL)
		return -ENOMEM;

	write_value[0] = FP_EEPROM_WRITE_OP;

	if (copy_from_user(write_value + 1, (const u8 __user *) (uintptr_t) buf
		, write_len + 1)) {
		pr_err("%s buffer copy_from_user fail", __func__);
		status = -EFAULT;
		goto exit;
	}

	DEBUG_PRINT("%s write_len = %d\n", __func__, write_len);
	DEBUG_PRINT("%s address = %x\n", __func__, write_value[1]);

	spi = etspi->spi;

	t.tx_buf = write_value;

	spi_message_init(&m);
	spi_message_add_tail(&t, &m);
	status = spi_sync(spi, &m);
	if (status < 0) {
		pr_err("%s read data error status = %d",
				__func__, status);
		goto exit;
	}

exit:
	kfree(write_value);

	return status;
#endif
}
static int cyttsp4_spi_xfer(u8 op, struct cyttsp4_spi *ts,
			u8 reg, u8 *buf, int length)
{
	struct device *dev = &ts->client->dev;
	struct spi_message msg;
	struct spi_transfer xfer[2];
	u8 wr_hdr_buf[CY_SPI_MAX_HEADER_BYTES];
	u8 rd_hdr_buf[CY_SPI_MAX_HEADER_BYTES];
	int rc;

	memset(wr_hdr_buf, 0, CY_SPI_MAX_HEADER_BYTES);
	memset(rd_hdr_buf, 0, CY_SPI_MAX_HEADER_BYTES);
	memset(xfer, 0, sizeof(xfer));

	spi_message_init(&msg);

	/* Header buffer */
	xfer[0].tx_buf = wr_hdr_buf;
	xfer[0].rx_buf = rd_hdr_buf;

	switch (op) {
	case CY_SPI_WR_OP:
		if (length + CY_SPI_WR_HEADER_BYTES > CY_SPI_DATA_SIZE) {
			dev_vdbg(dev,
				"%s: length+%d=%d is greater than SPI max=%d\n",
				__func__, CY_SPI_WR_HEADER_BYTES,
				length + CY_SPI_WR_HEADER_BYTES,
				CY_SPI_DATA_SIZE);
			rc = -EINVAL;
			goto cyttsp4_spi_xfer_exit;
		}

		/* Header byte 0 */
		if (reg > 255)
			wr_hdr_buf[0] = CY_SPI_WR_OP + CY_SPI_A8_BIT;
		else
			wr_hdr_buf[0] = CY_SPI_WR_OP;

		/* Header byte 1 */
		wr_hdr_buf[1] = reg % 256;

		xfer[0].len = CY_SPI_WR_HEADER_BYTES;

		spi_message_add_tail(&xfer[0], &msg);

		/* Data buffer */
		if (buf) {
			xfer[1].tx_buf = buf;
			xfer[1].len = length;

			spi_message_add_tail(&xfer[1], &msg);
		}
		break;

	case CY_SPI_RD_OP:
		if (!buf) {
			dev_err(dev, "%s: No read buffer\n", __func__);
			rc = -EINVAL;
			goto cyttsp4_spi_xfer_exit;
		}

		if ((length + CY_SPI_RD_HEADER_BYTES) > CY_SPI_DATA_SIZE) {
			dev_vdbg(dev,
				"%s: length+%d=%d is greater than SPI max=%d\n",
				__func__, CY_SPI_RD_HEADER_BYTES,
				length + CY_SPI_RD_HEADER_BYTES,
				CY_SPI_DATA_SIZE);
			rc = -EINVAL;
			goto cyttsp4_spi_xfer_exit;
		}

		/* Header byte 0 */
		wr_hdr_buf[0] = CY_SPI_RD_OP;

		xfer[0].len = CY_SPI_RD_HEADER_BYTES;

		spi_message_add_tail(&xfer[0], &msg);

		/* Data buffer */
		xfer[1].rx_buf = buf;
		xfer[1].len = length;

		spi_message_add_tail(&xfer[1], &msg);
		break;

	default:
		dev_dbg(dev, "%s: bad op code=%d\n", __func__, op);
		rc = -EINVAL;
		goto cyttsp4_spi_xfer_exit;
	}

	rc = spi_sync(ts->client, &msg);
	if (rc < 0) {
		dev_vdbg(dev, "%s: spi_sync() error %d, len=%d, op=%d\n",
			__func__, rc, xfer[0].len, op);
		/*
		 * do not return here since probably a bad ACK sequence
		 * let the following ACK check handle any errors and
		 * allow silent retries
		 */
	}

#if 0
	/* DEBUG */
	switch (op) {
	case CY_SPI_WR_OP:
		_cyttsp4_spi_pr_buf(ts, wr_hdr_buf, CY_SPI_WR_HEADER_BYTES,
			"spi_wr_buf HEADER");
		if (buf)
			_cyttsp4_spi_pr_buf(ts, buf, length,
				"spi_wr_buf DATA");
		break;

	case CY_SPI_RD_OP:
		_cyttsp4_spi_pr_buf(ts, rd_hdr_buf, CY_SPI_RD_HEADER_BYTES,
			"spi_rd_buf HEADER");
		_cyttsp4_spi_pr_buf(ts, buf, length, "spi_rd_buf DATA");
		break;
	}
#endif

	if (rd_hdr_buf[CY_SPI_SYNC_BYTE] != CY_SPI_SYNC_ACK) {
		/* signal ACK error so silent retry */
		rc = 1;

		switch (op) {
		case CY_SPI_WR_OP:
			_cyttsp4_spi_pr_buf(ts, wr_hdr_buf,
				CY_SPI_WR_HEADER_BYTES,
				"spi_wr_buf HEAD");
			if (buf)
				_cyttsp4_spi_pr_buf(ts, buf,
					length, "spi_wr_buf DATA");
			break;

		case CY_SPI_RD_OP:
			_cyttsp4_spi_pr_buf(ts, rd_hdr_buf,
				CY_SPI_RD_HEADER_BYTES, "spi_rd_buf HEAD");
			_cyttsp4_spi_pr_buf(ts, buf, length,
				"spi_rd_buf DATA");
			break;

		default:
			/*
			 * should not get here due to error check
			 * in first switch
			 */
			break;
		}
	} else
		rc = 0;

cyttsp4_spi_xfer_exit:
	return rc;
}
Exemple #14
0
static void spi_tty_write_worker(struct work_struct *work)
{
	int c;
	int crc;
	unsigned long flags;
	unsigned long start_t = 0;
	spi_msg_header header;
	struct spi_tty_s *spi_tty =
		container_of(work, struct spi_tty_s, write_work);

	start_t = jiffies;
	SPI_IPC_INFO("%s\n", __func__);

	mutex_lock(&(spi_tty->work_lock));
	spin_lock_irqsave(&(spi_tty->port_lock), flags);

	while(((c = spi_tty_buf_data_avail(spi_tty->write_buf))
		|| (spi_tty->tx_null))
		&& (!spi_tty->throttle)
		&& (!(spi_tty_dev && spi_tty_dev->peer_is_dead)))
	{
		SPI_IPC_INFO("%s: %d outgoing bytes\n", __func__, c);
		if (spi_tty->tx_null)
			spi_tty->tx_null = 0;

		// initiate spi_big_trans
		memset((char*)spi_big_trans.tx_buf, 0x0,
			SPI_TRANSACTION_LEN * 2);
		spi_big_msg.actual_length = 0;

		c = MIN(c, SPI_MTU);
		spi_tty_buf_get(spi_tty->write_buf,
			(char*)spi_big_trans.tx_buf + SPI_MSG_HEADER_LEN,
			c);

		if (spi_tty->tty && spi_tty->open_count)
			tty_wakeup(spi_tty->tty);
		spin_unlock_irqrestore(&(spi_tty->port_lock), flags);

		header.type = 1;
		header.len = c;
		header.dtr = spi_tty->dtr;
		crc = spi_msg_cal_crc(&header);
		header.fcs = crc;
		spi_msg_set_header((u8*)spi_big_trans.tx_buf, &header);
#if SPI_TTY_FORCE_FULL_TRANSACTION
		spi_big_trans.len = SPI_TRANSACTION_LEN;
#else
		spi_big_trans.len = c + SPI_MSG_HEADER_LEN;
#endif
		spi_big_trans.speed_hz = SPI_SPEED_HZ;
		spi_big_trans.bits_per_word = 32;
		spi_ipc_buf_dump("tx header: ", spi_big_trans.tx_buf, SPI_MSG_HEADER_LEN);
		spi_ipc_buf_dump_ascii("tx data: ", spi_big_trans.tx_buf + SPI_MSG_HEADER_LEN, (c>16?16:c));
		if (spi_tty_dev)
			spi_sync(spi_tty_dev->spi, &spi_big_msg);
		else
			pr_warning("%s: dropping data: no spi device "
				   "registered", __func__);

		if (spi_big_msg.actual_length == SPI_TRANSACTION_LEN) {
			tx_count++;
			tx_size += spi_big_trans.len - SPI_MSG_HEADER_LEN;
			spi_tty_handle_data(spi_tty, spi_big_trans.rx_buf, SPI_TRANSACTION_LEN);
		}else {
			pr_err("%s: spi data transfer failed\n", __func__);
		}

		// wake up writes wait on queue
		wake_up_interruptible(&spi_tty->write_wait);

		spin_lock_irqsave(&(spi_tty->port_lock), flags);
	}

	spin_unlock_irqrestore(&(spi_tty->port_lock), flags);
	mutex_unlock(&(spi_tty->work_lock));
	SPI_IPC_INFO("%s: done\n", __func__);
	tx_time += jiffies_to_msecs(jiffies - start_t);
}
Exemple #15
0
static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
					    void *buf, size_t len, bool fixed)
{
	struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
	struct wl1271 *wl = dev_get_drvdata(child);
	struct spi_transfer t[2];
	struct spi_message m;
	u32 *busy_buf;
	u32 *cmd;
	u32 chunk_len;

	while (len > 0) {
		chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);

		cmd = &wl->buffer_cmd;
		busy_buf = wl->buffer_busyword;

		*cmd = 0;
		*cmd |= WSPI_CMD_READ;
		*cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) &
			WSPI_CMD_BYTE_LENGTH;
		*cmd |= addr & WSPI_CMD_BYTE_ADDR;

		if (fixed)
			*cmd |= WSPI_CMD_FIXED;

		spi_message_init(&m);
		memset(t, 0, sizeof(t));

		t[0].tx_buf = cmd;
		t[0].len = 4;
		t[0].cs_change = true;
		spi_message_add_tail(&t[0], &m);

		/* Busy and non busy words read */
		t[1].rx_buf = busy_buf;
		t[1].len = WL1271_BUSY_WORD_LEN;
		t[1].cs_change = true;
		spi_message_add_tail(&t[1], &m);

		spi_sync(to_spi_device(glue->dev), &m);

		if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
		    wl12xx_spi_read_busy(child)) {
			memset(buf, 0, chunk_len);
			return 0;
		}

		spi_message_init(&m);
		memset(t, 0, sizeof(t));

		t[0].rx_buf = buf;
		t[0].len = chunk_len;
		t[0].cs_change = true;
		spi_message_add_tail(&t[0], &m);

		spi_sync(to_spi_device(glue->dev), &m);

		if (!fixed)
			addr += chunk_len;
		buf += chunk_len;
		len -= chunk_len;
	}

	return 0;
}
Exemple #16
0
static irqreturn_t tsc2005_irq_thread(int irq, void *_ts)
{
	struct tsc2005 *ts = _ts;
	unsigned int pressure, pressure_limit, inside_rect;
	u32 x;
	u32 y;
	u32 z1;
	u32 z2;

	mutex_lock(&ts->mutex);

	if (unlikely(ts->disable_depth))
		goto out;

	/* read the coordinates */
	spi_sync(ts->spi, &ts->spi_read_msg);
	x = ts->spi_x.spi_rx;
	y = ts->spi_y.spi_rx;
	z1 = ts->spi_z1.spi_rx;
	z2 = ts->spi_z2.spi_rx;

	/* validate position */
	if (unlikely(x > MAX_12BIT || y > MAX_12BIT))
		goto out;

	/* skip coords if the pressure components are out of range */
	if (unlikely(z1 == 0 || z2 > MAX_12BIT || z1 >= z2))
		goto out;

       /* skip point if this is a pen down with the exact same values as
	* the value before pen-up - that implies SPI fed us stale data
	*/
	if (!ts->pen_down &&
	ts->in_x == x &&
	ts->in_y == y &&
	ts->in_z1 == z1 &&
	ts->in_z2 == z2)
		goto out;

	/* At this point we are happy we have a valid and useful reading.
	* Remember it for later comparisons. We may now begin downsampling
	*/
	ts->in_x = x;
	ts->in_y = y;
	ts->in_z1 = z1;
	ts->in_z2 = z2;

	/* don't run average on the "pen down" event */
	if (ts->sample_sent) {
		ts->avg_x += x;
		ts->avg_y += y;
		ts->avg_z1 += z1;
		ts->avg_z2 += z2;

		if (++ts->sample_cnt < TS_SAMPLES)
			goto out;

		x = ts->avg_x / TS_SAMPLES;
		y = ts->avg_y / TS_SAMPLES;
		z1 = ts->avg_z1 / TS_SAMPLES;
		z2 = ts->avg_z2 / TS_SAMPLES;
	}
	ts->sample_cnt = 0;
	ts->avg_x = 0;
	ts->avg_y = 0;
	ts->avg_z1 = 0;
	ts->avg_z2 = 0;

	/* compute touch pressure resistance using equation #1 */
	pressure = x * (z2 - z1) / z1;
	pressure = pressure * ts->x_plate_ohm / 4096;
	pressure_limit = ts->sample_sent ? ts->p_max : ts->ts_pressure;
	if (unlikely(pressure > pressure_limit)) {
		/* printk(KERN_ERR "skipping ts event, pressure(%u) > pressure_limit(%u)\n", pressure, pressure_limit); */
		goto out;
	}
	/* Discard the event if it still is within the previous rect -
	 * unless the pressure is clearly harder, but then use previous
	 * x,y position. If any coordinate deviates enough, fudging
	 * of all three will still take place in the input layer.
	 */
	inside_rect = (ts->sample_sent &&
		x > (int)ts->out_x - ts->fudge_x &&
		x < (int)ts->out_x + ts->fudge_x &&
		y > (int)ts->out_y - ts->fudge_y &&
		y < (int)ts->out_y + ts->fudge_y);
	if (inside_rect)
		x = ts->out_x, y = ts->out_y;

	if (!inside_rect || pressure < (ts->out_p - ts->fudge_p)) {
		tsc2005_update_pen_state(ts, x, y, pressure);
		ts->sample_sent = 1;
		ts->out_x = x;
		ts->out_y = y;
		ts->out_p = pressure;
	}
	if (ts->sample_sent) {
		/* set the penup timer */
		mod_timer(&ts->penup_timer,
			  jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));

		if (ts->esd_timeout) {
			/* update the watchdog timer */
			mod_timer(&ts->esd_timer,
				  round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout)));
		}
	}
out:
	mutex_unlock(&ts->mutex);
	return IRQ_HANDLED;
}
Exemple #17
0
static int r61505w_spi_write_driver(unsigned short addr, unsigned short data)
{
	u32 buf[1];
	struct spi_message msg;

	struct spi_transfer xfer = {
		.len	= 4,
		.tx_buf	= buf,
	};

	buf[0] = (addr << 16) | data;

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	return spi_sync(g_spi, &msg);
}
static void r61505w_spi_read(unsigned short address, unsigned short *rxbuf)
{
	u32 buf[1];
	int			status;
	struct spi_message msg;

	struct spi_transfer xfer = {
		.len	= 2,
		.rx_buf	= buf,
	};
	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);

	r61505w_spi_write_driver(0x70, address);
	status = spi_sync(g_spi, &msg);	
	if (status == 0) {
		memcpy(rxbuf, buf, sizeof(buf));
		status = msg.status;
	}
	
}

static void r61505w_spi_write(unsigned short address, unsigned short command)
{
	if (address == COMMAND_ONLY)
	{
		r61505w_spi_write_driver(0x70, address);
		r61505w_spi_write_driver(0x70, command);
	}
	else if(address == 0x0022)
	{
		r61505w_spi_write_driver(0x70, address);
	}
	else
	{
		r61505w_spi_write_driver(0x70, address);
		r61505w_spi_write_driver(0x72, command);
	}
}

static void r61505w_panel_send_sequence(const unsigned short *wbuf)
{
	int i = 0;

	while ((wbuf[i] & DEFMASK) != ENDDEF) {
		if ((wbuf[i] & DEFMASK) != SLEEPMSEC)
			r61505w_spi_write(wbuf[i], wbuf[i+1]);
		else
			msleep(wbuf[i+1]);
			//mdelay(wbuf[i+1]);
		i += 2;
	}
}
Exemple #18
0
/* This may want to move to mili g to allow for non integer ranges */
static ssize_t kxsd9_read_scale(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	int ret;
	ssize_t len = 0;
	struct iio_dev *indio_dev = dev_get_drvdata(dev);
	struct kxsd9_state *st = iio_priv(indio_dev);
	struct spi_transfer xfer = {
		.bits_per_word = 8,
		.len = 2,
		.cs_change = 1,
		.tx_buf = st->tx,
		.rx_buf = st->rx,
	};
	struct spi_message msg;

	mutex_lock(&st->buf_lock);
	st->tx[0] = KXSD9_READ(KXSD9_REG_CTRL_C);
	st->tx[1] = 0;
	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);
	ret = spi_sync(st->us, &msg);
	if (ret)
		goto error_ret;

	switch (st->rx[1] & KXSD9_FS_MASK) {
	case KXSD9_FS_8:
		len += sprintf(buf, "%s\n", KXSD9_SCALE_8G);
		break;
	case KXSD9_FS_6:
		len += sprintf(buf, "%s\n", KXSD9_SCALE_6G);
		break;
	case KXSD9_FS_4:
		len += sprintf(buf, "%s\n", KXSD9_SCALE_4G);
		break;
	case KXSD9_FS_2:
		len += sprintf(buf, "%s\n", KXSD9_SCALE_2G);
		break;
	}

error_ret:
	mutex_unlock(&st->buf_lock);

	return ret ? ret : len;
}
static ssize_t kxsd9_write_scale(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t len)
{

	struct spi_message msg;
	int ret;
	struct iio_dev *indio_dev = dev_get_drvdata(dev);
	struct kxsd9_state *st = iio_priv(indio_dev);
	u8 val;
	struct spi_transfer xfers[] = {
		{
			.bits_per_word = 8,
			.len = 2,
			.cs_change = 1,
			.tx_buf = st->tx,
			.rx_buf = st->rx,
		}, {
			.bits_per_word = 8,
			.len = 2,
			.cs_change = 1,
			.tx_buf = st->tx,
		},
	};
Exemple #19
0
/**
 * cros_ec_spi_receive_response - Receive a response from the EC.
 *
 * This function has two phases: reading the preamble bytes (since if we read
 * data from the EC before it is ready to send, we just get preamble) and
 * reading the actual message.
 *
 * The received data is placed into ec_dev->din.
 *
 * @ec_dev: ChromeOS EC device
 * @need_len: Number of message bytes we need to read
 */
static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
					int need_len)
{
	struct cros_ec_spi *ec_spi = ec_dev->priv;
	struct spi_transfer trans;
	struct spi_message msg;
	u8 *ptr, *end;
	int ret;
	unsigned long deadline;
	int todo;

	/* Receive data until we see the header byte */
	deadline = jiffies + msecs_to_jiffies(EC_MSG_DEADLINE_MS);
	while (true) {
		unsigned long start_jiffies = jiffies;

		memset(&trans, 0, sizeof(trans));
		trans.cs_change = 1;
		trans.rx_buf = ptr = ec_dev->din;
		trans.len = EC_MSG_PREAMBLE_COUNT;

		spi_message_init(&msg);
		spi_message_add_tail(&trans, &msg);
		ret = spi_sync(ec_spi->spi, &msg);
		if (ret < 0) {
			dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
			return ret;
		}

		for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
			if (*ptr == EC_MSG_HEADER) {
				dev_dbg(ec_dev->dev, "msg found at %zd\n",
					ptr - ec_dev->din);
				break;
			}
		}
		if (ptr != end)
			break;

		/*
		 * Use the time at the start of the loop as a timeout.  This
		 * gives us one last shot at getting the transfer and is useful
		 * in case we got context switched out for a while.
		 */
		if (time_after(start_jiffies, deadline)) {
			dev_warn(ec_dev->dev, "EC failed to respond in time\n");
			return -ETIMEDOUT;
		}
	}

	/*
	 * ptr now points to the header byte. Copy any valid data to the
	 * start of our buffer
	 */
	todo = end - ++ptr;
	BUG_ON(todo < 0 || todo > ec_dev->din_size);
	todo = min(todo, need_len);
	memmove(ec_dev->din, ptr, todo);
	ptr = ec_dev->din + todo;
	dev_dbg(ec_dev->dev, "need %d, got %d bytes from preamble\n",
		 need_len, todo);
	need_len -= todo;

	/* Receive data until we have it all */
	while (need_len > 0) {
		/*
		 * We can't support transfers larger than the SPI FIFO size
		 * unless we have DMA. We don't have DMA on the ISP SPI ports
		 * for Exynos. We need a way of asking SPI driver for
		 * maximum-supported transfer size.
		 */
		todo = min(need_len, 256);
		dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
			todo, need_len, ptr - ec_dev->din);

		memset(&trans, 0, sizeof(trans));
		trans.cs_change = 1;
		trans.rx_buf = ptr;
		trans.len = todo;
		spi_message_init(&msg);
		spi_message_add_tail(&trans, &msg);

		/* send command to EC and read answer */
		BUG_ON((u8 *)trans.rx_buf - ec_dev->din + todo >
				ec_dev->din_size);
		ret = spi_sync(ec_spi->spi, &msg);
		if (ret < 0) {
			dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
			return ret;
		}

		debug_packet(ec_dev->dev, "interim", ptr, todo);
		ptr += todo;
		need_len -= todo;
	}

	dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din);

	return 0;
}
Exemple #20
0
/*
 * Note about handling of error return of mcp251x_spi_trans: accessing
 * registers via SPI is not really different conceptually than using
 * normal I/O assembler instructions, although it's much more
 * complicated from a practical POV. So it's not advisable to always
 * check the return value of this function. Imagine that every
 * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
 * error();", it would be a great mess (well there are some situation
 * when exception handling C++ like could be useful after all). So we
 * just check that transfers are OK at the beginning of our
 * conversation with the chip and to avoid doing really nasty things
 * (like injecting bogus packets in the network stack).
 */
static int mcp251x_spi_trans(struct spi_device *spi, int len)
{
	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
	struct spi_transfer t = {
		.tx_buf = priv->spi_tx_buf,
		.rx_buf = priv->spi_rx_buf,
		.len = len,
		.cs_change = 0,
	};
	struct spi_message m;
	int ret;

	spi_message_init(&m);

	if (mcp251x_enable_dma) {
		t.tx_dma = priv->spi_tx_dma;
		t.rx_dma = priv->spi_rx_dma;
		m.is_dma_mapped = 1;
	}

	spi_message_add_tail(&t, &m);

	ret = spi_sync(spi, &m);
	if (ret)
		dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
	return ret;
}

static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
{
	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
	u8 val = 0;

	priv->spi_tx_buf[0] = INSTRUCTION_READ;
	priv->spi_tx_buf[1] = reg;

	mcp251x_spi_trans(spi, 3);
	val = priv->spi_rx_buf[2];

	return val;
}

static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
{
	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);

	priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
	priv->spi_tx_buf[1] = reg;
	priv->spi_tx_buf[2] = val;

	mcp251x_spi_trans(spi, 3);
}

static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
			       u8 mask, uint8_t val)
{
	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);

	priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
	priv->spi_tx_buf[1] = reg;
	priv->spi_tx_buf[2] = mask;
	priv->spi_tx_buf[3] = val;

	mcp251x_spi_trans(spi, 4);
}

static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
				int len, int tx_buf_idx)
{
	struct mcp251x_platform_data *pdata = spi->dev.platform_data;
	struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);

	if (pdata->model == CAN_MCP251X_MCP2510) {
		int i;

		for (i = 1; i < TXBDAT_OFF + len; i++)
			mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
					  buf[i]);
	} else {
		memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
		mcp251x_spi_trans(spi, TXBDAT_OFF + len);
	}
}

static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
			  int tx_buf_idx)
{
	u32 sid, eid, exide, rtr;
	u8 buf[SPI_TRANSFER_BUF_LEN];

	exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
	if (exide)
		sid = (frame->can_id & CAN_EFF_MASK) >> 18;
	else
static int stm32fwu_spi_wait_for_ack(struct spi_device *spi,
				     struct stm32fwu_spi_cmd *cmd, u8 dummy_bytes)
{
	static int check_spi_wait_cnt = 1;

	struct spi_message m;
	char tx_buf = 0x0;
	char rx_buf = 0x0;
	struct spi_transfer	t = {
		.tx_buf		= &tx_buf,
		.rx_buf		= &rx_buf,
		.len		= 1,
		.bits_per_word = 8,
	};
	int i = 0;
	int ret;

#if SSP_STM_DEBUG
	pr_info("[SSP] %s: dummy byte = 0x%02hhx\n",
		__func__, dummy_bytes);
#endif
	while (i < cmd->timeout) {
		tx_buf = dummy_bytes;
		spi_message_init(&m);
		spi_message_add_tail(&t, &m);

		ret = spi_sync(spi, &m);
		if (ret < 0) {
			dev_err(&spi->dev, "%s: spi error %d\n", __func__, ret);
			return ret;
		} else  if ((rx_buf == BL_ACK) || (rx_buf == BL_ACK2)) {
			cmd->ack_loops = i;
			return BL_ACK;
		} else if (rx_buf == BL_NACK) {
			return (int)rx_buf;
		}
		if (check_spi_wait_cnt % 20 == 0)
			msleep(1);
		else
			usleep_range(1000, 1100);
		i++;
		check_spi_wait_cnt++;
	}
#if SSP_STM_DEBUG
	dev_err(&spi->dev, "%s: Timeout after %d loops\n", __func__, cmd->timeout);
#endif
	return -EIO;
}

static int stm32fwu_spi_send_cmd(struct spi_device *spi,
				 struct stm32fwu_spi_cmd *cmd)
{
	u8 tx_buf[3] = {0,};
	u8 rx_buf[3] = {0,};
	u8 dummy_byte = 0;
	struct spi_message m;
	int ret;
#if BYTETOBYTE_USED
	int i;
	struct spi_transfer t[STM_MAX_BUFFER_SIZE];
	memset(t, 0, STM_MAX_BUFFER_SIZE * sizeof(struct spi_transfer));
#else
	struct spi_transfer	t = {
		.tx_buf		= tx_buf,
		.rx_buf		= rx_buf,
		.len		= 3,
		.bits_per_word = 8,
	};
#endif
	spi_message_init(&m);
	tx_buf[0] = BL_SPI_SOF;
	tx_buf[1] = cmd->cmd;
	tx_buf[2] = cmd->xor_cmd;

#if BYTETOBYTE_USED
	for (i = 0; i < 3; i++) {
		t[i].tx_buf = &tx_buf[i];
		t[i].rx_buf = &rx_buf[i];
		t[i].len = 1;
		t[i].bits_per_word = 8;
		t[i].delay_usecs = BYTE_DELAY_WRITE;
		spi_message_add_tail(&t[i], &m);
        }
#else
	spi_message_add_tail(&t, &m);
#endif

	ret = spi_sync(spi, &m);
	if (ret < 0) {
		dev_err(&spi->dev, "%s: spi error %d\n", __func__, ret);
		return ret;
	}

	dummy_byte = cmd->ack_pad;

	/* check for ack/nack and loop until found */
	ret = stm32fwu_spi_wait_for_ack(spi, cmd, dummy_byte);
	cmd->status = ret;

	if (ret != BL_ACK) {
		pr_err("[SSP] %s: Got NAK or Error %d\n", __func__, ret);
		return ret;
	}

	return ret;
}
#if STM_SHOULD_BE_IMPLEMENT
static int stm32fwu_spi_read(struct spi_device *spi,
	u8 *buffer, ssize_t len)
{
	int ret;
	int i;
	u8 tx_buf[STM_MAX_BUFFER_SIZE] = {0,};
	struct spi_message m;
	struct spi_transfer	t[STM_MAX_BUFFER_SIZE];

	memset(t, 0, STM_MAX_BUFFER_SIZE * sizeof(struct spi_transfer));

	spi_message_init(&m);

	for (i = 0; i < len; i++) {
		t[i].tx_buf = tx_buf;
		t[i].rx_buf = &buffer[i];
		t[i].len = 1;
		t[i].bits_per_word = 8;
		t[i].delay_usecs = BYTE_DELAY_READ;
		spi_message_add_tail(&t[i], &m);
	}

	ret = spi_sync(spi, &m);

	if (ret < 0) {
		pr_err("[SSP] Error in %d spi_read()\n", ret);
		return ret;
	}

	return len;
}
Exemple #22
0
static int ws2401_spi_write_byte(struct ws2401_dpi *lcd, int addr, int data)
{
    u16 buf;
    struct spi_message msg;

    struct spi_transfer xfer = {
        .len		= 2,
        .tx_buf		= &buf,
    };

    buf = (addr << 8) | data;

    spi_message_init(&msg);
    spi_message_add_tail(&xfer, &msg);

    return spi_sync(lcd->spi, &msg);
}

static int ws2401_spi_read_byte(struct ws2401_dpi *lcd, int command, u8 *data)
{
    u16 buf[2];
    u16 rbuf[2];
    int ret;
    struct spi_message msg;
    struct spi_transfer xfer = {
        .len		= 4,
        .tx_buf		= buf,
        .rx_buf		= rbuf,
    };

    buf[0] = command;
    buf[1] = 0x100;

    spi_message_init(&msg);
    spi_message_add_tail(&xfer, &msg);

    ret = spi_sync(lcd->spi, &msg);
    if (ret)
        return ret;

    *data = (rbuf[1] & 0x1FF) >> 1;

    return ret;
}


static int ws2401_write_dcs_sequence(struct ws2401_dpi *lcd, const u8 *p_seq)
{
    int ret = 0;
    int num_params;
    int param_count;

    mutex_lock(&lcd->lock);

    while ((p_seq[0] != DCS_CMD_SEQ_END) && !ret) {
        if (p_seq[0] == DCS_CMD_SEQ_DELAY_MS) {
            msleep(p_seq[1]);
            p_seq += 2;
        } else {
            ret = ws2401_spi_write_byte(lcd,
                                        SPI_COMMAND, p_seq[1]);

            num_params = p_seq[0] - 1;
            param_count = 0;

            while ((param_count < num_params) & !ret) {
                ret = ws2401_spi_write_byte(lcd,
                                            SPI_DATA, p_seq[param_count + 2]);
                param_count++;
            }

            p_seq += p_seq[0] + 1;
        }
    }

    mutex_unlock(&lcd->lock);

    if (ret != 0)
        dev_err(&lcd->mdd->dev, "failed to send DCS sequence.\n");

    return ret;
}
static inline int spi_xmit(struct spi_device *spi, const u8 *addr, const int len)
{
	int ret;
	struct spi_message msg;

	struct spi_transfer xfer = {
		.len = len,
		.tx_buf = addr,
		.bits_per_word = 8,
	};

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);

	ret = spi_sync(spi, &msg);

	if (ret < 0)
		dev_err(&spi->dev, "error %d\n", ret);

	return ret;
}

static inline int spi_xmit_rx(struct spi_device *spi, u8 *in_buf, size_t len)
{
	int ret;
	u8 read_out_buf[2];

	struct spi_message msg;
	struct spi_transfer xfer = {
		.tx_buf = read_out_buf,
		.rx_buf = in_buf,
		.len	= len,
		.cs_change = 0,
	};

	spi_message_init(&msg);

	spi_message_add_tail(&xfer, &msg);

	ret = spi_sync(spi, &msg);

	if (ret < 0)
		dev_err(&spi->dev, "%s - error %d\n", __func__, ret);

	return ret;
}

/* TDB: Not tested */
int m10mo_spi_read(struct spi_device *spi, u8 *buf, size_t len,
		   const int rxSize)
{
	int k;
	int ret = 0;
	u8 temp_buf[4] = {0};
	u32 count = len / rxSize;
	u32 extra = len % rxSize;

	for (k = 0; k < count; k++) {
		ret = spi_xmit_rx(spi, &buf[rxSize * k], rxSize);
		if (ret < 0) {
			dev_err(&spi->dev, "%s - error %d\n", __func__, ret);
			return -EINVAL;
		}
	}

	if (extra != 0) {
		ret = spi_xmit_rx(spi, &buf[rxSize * k], extra);
		if (ret < 0) {
			dev_err(&spi->dev, "%s - error %d\n", __func__, ret);
			return -EINVAL;
		}
	}

	for (k = 0; k < len - 3; k += 4) {
		memcpy(temp_buf, (char *)&buf[k], sizeof(temp_buf));
		buf[k] = temp_buf[3];
		buf[k+1] = temp_buf[2];
		buf[k+2] = temp_buf[1];
		buf[k+3] = temp_buf[0];
	}
	return 0;
}
int tdmb_fc8080_spi_write_read(uint8* tx_data, int tx_length, uint8 *rx_data, int rx_length)
{
    int rc;

    struct spi_transfer    t = {
            .tx_buf        = tx_data,
            .rx_buf        = rx_data,
            .len        = tx_length+rx_length,
        };

    struct spi_message    m;

    if (fc8080_ctrl_info.spi_ptr == NULL)
    {
        printk("tdmb_fc8080_spi_write_read error txdata=0x%x, length=%d\n", (unsigned int)tx_data, tx_length+rx_length);
        return FALSE;
    }

    mutex_lock(&fc8080_ctrl_info.mutex);

    spi_message_init(&m);
    spi_message_add_tail(&t, &m);
    rc = spi_sync(fc8080_ctrl_info.spi_ptr, &m);

    if ( rc < 0 )
    {
        printk("tdmb_fc8080_spi_read_burst result(%d), actual_len=%d\n",rc, m.actual_length);
    }

    mutex_unlock(&fc8080_ctrl_info.mutex);

    return TRUE;
}

#ifdef FEATURE_DMB_USE_WORKQUEUE
static irqreturn_t broadcast_tdmb_spi_isr(int irq, void *handle)
{
    struct tdmb_fc8080_ctrl_blk* fc8080_info_p;

    fc8080_info_p = (struct tdmb_fc8080_ctrl_blk *)handle;
    if ( fc8080_info_p && fc8080_info_p->TdmbPowerOnState )
    {
        unsigned long flag;
        if (fc8080_info_p->spi_irq_status)
        {
            printk("######### spi read function is so late skip #########\n");
            return IRQ_HANDLED;
        }
//        printk("***** broadcast_tdmb_spi_isr coming *******\n");
        spin_lock_irqsave(&fc8080_info_p->spin_lock, flag);
        queue_work(fc8080_info_p->spi_wq, &fc8080_info_p->spi_work);
        spin_unlock_irqrestore(&fc8080_info_p->spin_lock, flag);
    }
    else
    {
        printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
    }

    return IRQ_HANDLED;
}

static void broacast_tdmb_spi_work(struct work_struct *tdmb_work)
{
    struct tdmb_fc8080_ctrl_blk *pTdmbWorkData;

    pTdmbWorkData = container_of(tdmb_work, struct tdmb_fc8080_ctrl_blk, spi_work);
    if ( pTdmbWorkData )
    {
        tunerbb_drv_fc8080_isr_control(0);
        pTdmbWorkData->spi_irq_status = TRUE;
        broadcast_drv_if_isr();
        pTdmbWorkData->spi_irq_status = FALSE;
        tunerbb_drv_fc8080_isr_control(1);
    }
    else
    {
        printk("~~~~~~~broadcast_tdmb_spi_work call but pTdmbworkData is NULL ~~~~~~~\n");
    }
}
#else
static irqreturn_t broadcast_tdmb_spi_event_handler(int irq, void *handle)
{
    struct tdmb_fc8080_ctrl_blk* fc8080_info_p;

    fc8080_info_p = (struct tdmb_fc8080_ctrl_blk *)handle;
    if ( fc8080_info_p && fc8080_info_p->TdmbPowerOnState )
    {
        if (fc8080_info_p->spi_irq_status)
        {
            printk("######### spi read function is so late skip ignore #########\n");
            return IRQ_HANDLED;
        }

        tunerbb_drv_fc8080_isr_control(0);
        fc8080_info_p->spi_irq_status = TRUE;
        broadcast_drv_if_isr();
        fc8080_info_p->spi_irq_status = FALSE;
        tunerbb_drv_fc8080_isr_control(1);
    }
    else
    {
        printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
    }

    return IRQ_HANDLED;
}
#endif

static int tdmb_configure_gpios(void)
{
    int rc = OK;
    int err_count = 0;

    fc8080_ctrl_info.dmb_en = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,en-gpio",0);

    rc = gpio_request(fc8080_ctrl_info.dmb_en, "DMB_EN");
    if (rc < 0) {
        err_count++;
        printk("%s:Failed GPIO DMB_EN request!!!\n",__func__);
    }

    fc8080_ctrl_info.dmb_irq = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,irq-gpio",0);

    rc = gpio_request(fc8080_ctrl_info.dmb_irq, "DMB_INT_N");
    if (rc < 0) {
        err_count++;
        printk("%s:Failed GPIO DMB_INT_N request!!!\n",__func__);
    }

    gpio_direction_output(fc8080_ctrl_info.dmb_en, 0);
    gpio_direction_input(fc8080_ctrl_info.dmb_irq);

    if(err_count > 0) rc = -EINVAL;

    return rc;
}
static int stm32fwu_spi_wait_for_ack(struct spi_device *spi,
				struct stm32fwu_spi_cmd *cmd, u8 dummy_bytes)
{
	static int check_spi_wait_cnt = 1;

	struct spi_message m;
	char tx_buf = 0x0;
	char rx_buf = 0x0;
	struct spi_transfer	t = {
		.tx_buf		= &tx_buf,
		.rx_buf		= &rx_buf,
		.len		= 1,
		.bits_per_word = 8,
	};
	int i = 0;
	int ret;
	dummy_bytes = BL_DUMMY;
#if SSP_STM_DEBUG
	ssp_infof("dummy byte = 0x%02hhx", dummy_bytes);
#endif
	while (i < cmd->timeout) {
		tx_buf = dummy_bytes;
		spi_message_init(&m);
		spi_message_add_tail(&t, &m);

		ret = spi_sync(spi, &m);

		if (ret < 0) {
			dev_err(&spi->dev, "%s: spi error %d\n", __func__, ret);
			return ret;
		} else if ((rx_buf == BL_ACK) || (rx_buf == BL_NACK)) {
			/* ACK cmd set */
			stm32fwu_spi_send_ack(spi, BL_ACK);
			return (int)rx_buf;
		} else {
			/* Cross cmd set */
			tx_buf = rx_buf;
		}
		if (check_spi_wait_cnt % 20 == 0)
			usleep_range(1000, 1100);
		else
			usleep_range(1000, 1100);
		i++;
		check_spi_wait_cnt++;
	}
#if SSP_STM_DEBUG
	dev_err(&spi->dev, "%s: Timeout after %d loops\n",
			__func__, cmd->timeout);
#endif
	return -EIO;
}

static int stm32fwu_spi_send_cmd(struct spi_device *spi,
				 struct stm32fwu_spi_cmd *cmd)
{
	u8 tx_buf[3] = {0,};
	u8 rx_buf[3] = {0,};
	u8 dummy_byte = 0;
	struct spi_message m;
	int ret;
#if BYTETOBYTE_USED
	int i;
	struct spi_transfer t[STM_MAX_BUFFER_SIZE];
	memset(t, 0, STM_MAX_BUFFER_SIZE * sizeof(struct spi_transfer));
#else
	struct spi_transfer	t = {
		.tx_buf		= tx_buf,
		.rx_buf		= rx_buf,
		.len		= 3,
		.bits_per_word = 8,
	};
#endif
	ssp_dbgf();

	spi_message_init(&m);
	tx_buf[0] = BL_SPI_SOF;
	tx_buf[1] = cmd->cmd;
	tx_buf[2] = cmd->xor_cmd;

#if BYTETOBYTE_USED
	for (i = 0; i < 3; i++) {
		t[i].tx_buf = &tx_buf[i];
		t[i].rx_buf = &rx_buf[i];
		t[i].len = 1;
		t[i].bits_per_word = 8;
		t[i].delay_usecs = BYTE_DELAY_WRITE;
		spi_message_add_tail(&t[i], &m);
	}
#else
	spi_message_add_tail(&t, &m);
#endif

	ret = spi_sync(spi, &m);
	if (ret < 0) {
		dev_err(&spi->dev, "%s: spi error %d\n", __func__, ret);
		return ret;
	}

	dummy_byte = cmd->ack_pad;

	/* check for ack/nack and loop until found */
	ret = stm32fwu_spi_wait_for_ack(spi, cmd, dummy_byte);
	cmd->status = ret;

	if (ret != BL_ACK) {
		ssp_errf("Got NAK or Error %d", ret);
		return ret;
	}

	return ret;
}

static int stm32fwu_spi_write(struct spi_device *spi,
	const u8 *buffer, ssize_t len)
{
	int ret;
	u8 rx_buf[STM_MAX_BUFFER_SIZE] = {0,};
	struct spi_message m;
#if BYTETOBYTE_USED
	struct spi_transfer t[STM_MAX_BUFFER_SIZE];
	memset(t, 0, STM_MAX_BUFFER_SIZE * sizeof(struct spi_transfer));
	int i;
#else
	struct spi_transfer	t = {
		.tx_buf		= buffer,
		.rx_buf		= rx_buf,
		.len		= (unsigned int)len,
		.bits_per_word = 8,
	};
#endif
	spi_message_init(&m);
#if BYTETOBYTE_USED
	for (i = 0; i < len; i++) {
		t[i].tx_buf = &buffer[i];
		t[i].rx_buf = &rx_buf[i];
		t[i].len = 1;
		t[i].bits_per_word = 8;
		t[i].delay_usecs = BYTE_DELAY_WRITE;
		spi_message_add_tail(&t[i], &m);
	}
#else
	spi_message_add_tail(&t, &m);
#endif
	ret = spi_sync(spi, &m);

	if (ret < 0) {
		ssp_err("Error in %d spi_write()", ret);
		return ret;
	}

	return len;
}

static int send_addr(struct spi_device *spi, u32 fw_addr, int send_short)
{
	int res;
	int i = send_short;
	int len = SEND_ADDR_LEN - send_short;
	u8 header[SEND_ADDR_LEN];
	struct stm32fwu_spi_cmd dummy_cmd;
	dummy_cmd.timeout = DEF_ACKROOF_NUMBER;
	ssp_dbgf();

	header[0] = (u8)((fw_addr >> 24) & 0xFF);
	header[1] = (u8)((fw_addr >> 16) & 0xFF);
	header[2] = (u8)((fw_addr >> 8) & 0xFF);
	header[3] = (u8)(fw_addr & 0xFF);
	header[4] = header[0] ^ header[1] ^ header[2] ^ header[3];

	res = stm32fwu_spi_write(spi, &header[i], len);

	if (res < len) {
		ssp_err("Error in sending address. Res %d", res);
		return (res > 0) ? -EIO : res;
	}

	res = stm32fwu_spi_wait_for_ack(spi, &dummy_cmd, BL_ACK);
	if (res != BL_ACK) {
		ssp_err("send_addr(): rcv_ack returned 0x%x", res);
		return res;
	}
	return 0;
}
Exemple #26
0
/*
 * Erase pages of flash.
 */
static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
	struct dataflash	*priv = mtd->priv;
	struct spi_device	*spi = priv->spi;
	struct spi_transfer	x = { .tx_dma = 0, };
	struct spi_message	msg;
	unsigned		blocksize = priv->page_size << 3;
	uint8_t			*command;
	uint32_t		rem;

	pr_debug("%s: erase addr=0x%llx len 0x%llx\n",
	      dev_name(&spi->dev), (long long)instr->addr,
	      (long long)instr->len);

	/* Sanity checks */
	if (instr->addr + instr->len > mtd->size)
		return -EINVAL;
	div_u64_rem(instr->len, priv->page_size, &rem);
	if (rem)
		return -EINVAL;
	div_u64_rem(instr->addr, priv->page_size, &rem);
	if (rem)
		return -EINVAL;

	spi_message_init(&msg);

	x.tx_buf = command = priv->command;
	x.len = 4;
	spi_message_add_tail(&x, &msg);

	mutex_lock(&priv->lock);
	while (instr->len > 0) {
		unsigned int	pageaddr;
		int		status;
		int		do_block;

		/* Calculate flash page address; use block erase (for speed) if
		 * we're at a block boundary and need to erase the whole block.
		 */
		pageaddr = div_u64(instr->addr, priv->page_size);
		do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
		pageaddr = pageaddr << priv->page_offset;

		command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
		command[1] = (uint8_t)(pageaddr >> 16);
		command[2] = (uint8_t)(pageaddr >> 8);
		command[3] = 0;

		pr_debug("ERASE %s: (%x) %x %x %x [%i]\n",
			do_block ? "block" : "page",
			command[0], command[1], command[2], command[3],
			pageaddr);

		status = spi_sync(spi, &msg);
		(void) dataflash_waitready(spi);

		if (status < 0) {
			printk(KERN_ERR "%s: erase %x, err %d\n",
				dev_name(&spi->dev), pageaddr, status);
			/* REVISIT:  can retry instr->retries times; or
			 * giveup and instr->fail_addr = instr->addr;
			 */
			continue;
		}

		if (do_block) {
			instr->addr += blocksize;
			instr->len -= blocksize;
		} else {
			instr->addr += priv->page_size;
			instr->len -= priv->page_size;
		}
	}
	mutex_unlock(&priv->lock);

	/* Inform MTD subsystem that erase is complete */
	instr->state = MTD_ERASE_DONE;
	mtd_erase_callback(instr);

	return 0;
}

/*
 * Read from the DataFlash device.
 *   from   : Start offset in flash device
 *   len    : Amount to read
 *   retlen : About of data actually read
 *   buf    : Buffer containing the data
 */
static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
			       size_t *retlen, u_char *buf)
{
	struct dataflash	*priv = mtd->priv;
	struct spi_transfer	x[2] = { { .tx_dma = 0, }, };
	struct spi_message	msg;
	unsigned int		addr;
	uint8_t			*command;
	int			status;

	pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
			(unsigned)from, (unsigned)(from + len));

	*retlen = 0;

	/* Sanity checks */
	if (!len)
		return 0;
	if (from + len > mtd->size)
		return -EINVAL;

	/* Calculate flash page/byte address */
	addr = (((unsigned)from / priv->page_size) << priv->page_offset)
		+ ((unsigned)from % priv->page_size);

	command = priv->command;

	pr_debug("READ: (%x) %x %x %x\n",
		command[0], command[1], command[2], command[3]);

	spi_message_init(&msg);

	x[0].tx_buf = command;
	x[0].len = 8;
	spi_message_add_tail(&x[0], &msg);

	x[1].rx_buf = buf;
	x[1].len = len;
	spi_message_add_tail(&x[1], &msg);

	mutex_lock(&priv->lock);

	/* Continuous read, max clock = f(car) which may be less than
	 * the peak rate available.  Some chips support commands with
	 * fewer "don't care" bytes.  Both buffers stay unchanged.
	 */
	command[0] = OP_READ_CONTINUOUS;
	command[1] = (uint8_t)(addr >> 16);
	command[2] = (uint8_t)(addr >> 8);
	command[3] = (uint8_t)(addr >> 0);
	/* plus 4 "don't care" bytes */

	status = spi_sync(priv->spi, &msg);
	mutex_unlock(&priv->lock);

	if (status >= 0) {
		*retlen = msg.actual_length - 8;
		status = 0;
	} else
		pr_debug("%s: read %x..%x --> %d\n",
			dev_name(&priv->spi->dev),
			(unsigned)from, (unsigned)(from + len),
			status);
	return status;
}
static int aic3254_config_ex(CODEC_SPI_CMD *cmds, int size)
{
	int i = 0;
	int ret = -EINVAL;
	struct spi_transfer *spi_t_cmds = NULL;
	struct spi_message m;
	unsigned char *buffer = NULL;
	unsigned char *ptr = NULL;

	if (!codec_dev) {
		pr_aud_err("%s: no spi device\n", __func__);
		return -EFAULT;
	}

	if (cmds == NULL || size == 0) {
		pr_aud_err("%s: invalid spi parameters\n", __func__);
		return -EINVAL;
	} else {
		/* pr_info("%s: size = %d", __func__, size); */
	}

	spi_t_cmds = (struct spi_transfer *) kmalloc(size*sizeof(struct spi_transfer), GFP_KERNEL);
	if (spi_t_cmds == NULL) {
		pr_aud_err("%s: kmalloc spi transfer struct fail\n", __func__);
		goto error;
	} else
		memset(spi_t_cmds, 0, size*sizeof(struct spi_transfer));

	buffer = (unsigned char *) kmalloc(size * 2 * sizeof(unsigned char), GFP_KERNEL);
	if (buffer == NULL) {
		pr_aud_err("%s: kmalloc buffer fail\n", __func__);
		goto error;
	} else
		memset(buffer, 0, size*sizeof(CODEC_SPI_CMD)*sizeof(unsigned char));

	if (ctl_ops->spibus_enable)
		ctl_ops->spibus_enable(1);

	spi_message_init(&m);
	for (i = 0, ptr = buffer; i < size; i++, ptr += 2) {
		ptr[0] = cmds[i].reg << 1;
		ptr[1] = cmds[i].data;

		spi_t_cmds[i].tx_buf = ptr;
		spi_t_cmds[i].len = 2;
		spi_message_add_tail(&spi_t_cmds[i], &m);
	}
	codec_dev->bits_per_word = 16;
	ret = spi_sync(codec_dev, &m);

	if (ctl_ops->spibus_enable)
		ctl_ops->spibus_enable(0);

error:
	if (buffer)
		kfree(buffer);

	if (spi_t_cmds)
		kfree(spi_t_cmds);
	return ret;
}
Exemple #28
0
int ssp_spi_write_sync(struct spi_device *spi, const u8 *addr, const int len)
{
	int ret;
#if defined(CHANGE_ENDIAN)
	u8 buf[8] = {0};
#endif

	struct spi_message msg;

	struct spi_transfer xfer = {
		.len = len,
#if !defined(CHANGE_ENDIAN)
		.tx_buf = addr,
		/*QCTK ALRAN QUP_CONFIG 0-4 bits BIG ENDIAN*/
		.bits_per_word = 8,
#else
		.tx_buf = buf,
#endif
	};

#if defined(CHANGE_ENDIAN)
	buf[0] = addr[3];
	buf[1] = addr[2];
	buf[2] = addr[1];
	buf[3] = addr[0];
	buf[4] = addr[7];
	buf[5] = addr[6];
	buf[6] = addr[5];
	buf[7] = addr[4];
#endif

	spi_message_init(&msg);
	spi_message_add_tail(&xfer, &msg);

	ret = spi_sync(spi, &msg);

	if (ret < 0)
		ssp_log("error %d\n", ret);

	return ret;
}


int ssp_spi_read_sync(struct spi_device *spi, u8 *in_buf, size_t len)
{
	int ret;
	u8 read_out_buf[2];

	struct spi_message msg;
	struct spi_transfer xfer = {
		.tx_buf = read_out_buf,
		.rx_buf = in_buf,
		.len	= len,
		.cs_change = 0,
	};

	spi_message_init(&msg);

	spi_message_add_tail(&xfer, &msg);

	ret = spi_sync(spi, &msg);

	if (ret < 0)
		ssp_log("%s - error %d\n",
				__func__, ret);

	return ret;
}


int ssp_spi_sync(struct spi_device *spi, u8 *out_buf,
	size_t out_len, u8 *in_buf)
{
	int ret;

	struct spi_message msg;
	struct spi_transfer xfer = {
		.tx_buf = out_buf,
		.rx_buf = in_buf,
		.len	= out_len,
		.cs_change = 0,
	};

	spi_message_init(&msg);

	spi_message_add_tail(&xfer, &msg);

	ret = spi_sync(spi, &msg);
	ssp_log("%s - received %d\n", __func__, xfer.len);

	if (ret < 0)
		ssp_log("%s - error %d\n",
				__func__, ret);

	return ret;
}

unsigned int g_flag_spirecv;
void ssp_spi_async_complete(void *context)
{
	g_flag_spirecv = 1;
}

int ssp_spi_async(struct spi_device *spi,  u8 *out_buf,
	size_t out_len, u8 *in_buf)
{
	int ret;

	struct spi_message msg;
	struct spi_transfer xfer = {
		.tx_buf = out_buf,
		.rx_buf = in_buf,
		.len	= out_len,
		.cs_change = 0,
	};


	spi_message_init(&msg);

	spi_message_add_tail(&xfer, &msg);
	msg.complete = ssp_spi_async_complete;

	ret = spi_async(spi, &msg);

	if (ret < 0)
		ssp_log("%s - error %d\n",
				__func__, ret);

	return ret;
}




int ssp_spi_read(struct spi_device *spi, u8 *buf, size_t len, const int rxSize)
{
	int k;
	int ret = 0;
	u8 temp_buf[4] = {0};
	u32 count = len/rxSize;
	u32 extra = len%rxSize;

	for (k = 0; k < count; k++) {
		ret = ssp_spi_read_sync(spi, &buf[rxSize*k], rxSize);
		if (ret < 0) {
			ssp_log("%s - error %d\n",
				__func__, ret);
			return -EINVAL;
		}
	}

	if (extra != 0) {
		ret = ssp_spi_read_sync(spi, &buf[rxSize*k], extra);
		if (ret < 0) {
			ssp_log("%s - error %d\n",
				__func__, ret);
			return -EINVAL;
		}
	}

	for (k = 0; k < len-3; k += 4) {
		memcpy(temp_buf, (char *)&buf[k], sizeof(temp_buf));
		buf[k] = temp_buf[3];
		buf[k+1] = temp_buf[2];
		buf[k+2] = temp_buf[1];
		buf[k+3] = temp_buf[0];
	}

	return 0;
}
Exemple #29
0
static ssize_t ad9834_write(struct device *dev,
		struct device_attribute *attr,
		const char *buf,
		size_t len)
{
	struct iio_dev *dev_info = dev_get_drvdata(dev);
	struct ad9834_state *st = iio_priv(dev_info);
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
	int ret;
	long val;

	ret = strict_strtoul(buf, 10, &val);
	if (ret)
		goto error_ret;

	mutex_lock(&dev_info->mlock);
	switch (this_attr->address) {
	case AD9834_REG_FREQ0:
	case AD9834_REG_FREQ1:
		ret = ad9834_write_frequency(st, this_attr->address, val);
		break;
	case AD9834_REG_PHASE0:
	case AD9834_REG_PHASE1:
		ret = ad9834_write_phase(st, this_attr->address, val);
		break;
	case AD9834_OPBITEN:
		if (st->control & AD9834_MODE) {
			ret = -EINVAL;  /* AD9843 reserved mode */
			break;
		}

		if (val)
			st->control |= AD9834_OPBITEN;
		else
			st->control &= ~AD9834_OPBITEN;

		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
		ret = spi_sync(st->spi, &st->msg);
		break;
	case AD9834_PIN_SW:
		if (val)
			st->control |= AD9834_PIN_SW;
		else
			st->control &= ~AD9834_PIN_SW;
		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
		ret = spi_sync(st->spi, &st->msg);
		break;
	case AD9834_FSEL:
	case AD9834_PSEL:
		if (val == 0)
			st->control &= ~(this_attr->address | AD9834_PIN_SW);
		else if (val == 1) {
			st->control |= this_attr->address;
			st->control &= ~AD9834_PIN_SW;
		} else {
			ret = -EINVAL;
			break;
		}
		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
		ret = spi_sync(st->spi, &st->msg);
		break;
	case AD9834_RESET:
		if (val)
			st->control &= ~AD9834_RESET;
		else
			st->control |= AD9834_RESET;

		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
		ret = spi_sync(st->spi, &st->msg);
		break;
	default:
		ret = -ENODEV;
	}
	mutex_unlock(&dev_info->mlock);

error_ret:
	return ret ? ret : len;
}
unsigned char mtv222_spi_read(unsigned char page, unsigned char reg)
{
	int ret;
	u8 out_buf[4], in_buf[4];
	struct spi_message msg;
	struct spi_transfer msg_xfer = {
		.tx_buf = out_buf,
		.rx_buf = in_buf,
		.len = 4,
		.cs_change = 0,
		.delay_usecs = 0
	};

	out_buf[0] = 0x90 | page;
	out_buf[1] = reg;
	out_buf[2] = 1; /* Read size */

	spi_message_init(&msg);
	spi_message_add_tail(&msg_xfer, &msg);

	ret = spi_sync(mtv_spi, &msg);
	if (ret) {
		DMBERR("error: %d\n", ret);
		return 0xFF;
	}

#if 0
	DMBMSG("0x%02X 0x%02X 0x%02X 0x%02X\n",
			in_buf[0], in_buf[1], in_buf[2], in_buf[3]);
#endif

	return in_buf[SPI_CMD_SIZE];
}

void mtv222_spi_read_burst(unsigned char page, unsigned char reg,
			unsigned char *buf, int size)
{
	int ret;
	u8 out_buf[SPI_CMD_SIZE];
	struct spi_message msg;
	struct spi_transfer msg_xfer0 = {
		.tx_buf = out_buf,
		.rx_buf = buf,
		.len = SPI_CMD_SIZE,
		.cs_change = 0,
		.delay_usecs = 0
	};

	struct spi_transfer msg_xfer1 = {
		.tx_buf = buf,
		.rx_buf = buf,
		.len = size,
		.cs_change = 0,
		.delay_usecs = 0
	};

	if (page > 15) { /* 0 ~ 15: not SPI memory */
		out_buf[0] = 0xA0; /* Memory read */
		out_buf[1] = 0x00;
		out_buf[2] = 188; /* Fix */
	} else {
		out_buf[0] = 0x90 | page; /* Register read */
		out_buf[1] = reg;
		out_buf[2] = size;
	}

	spi_message_init(&msg);
	spi_message_add_tail(&msg_xfer0, &msg);
	spi_message_add_tail(&msg_xfer1, &msg);

	ret = spi_sync(mtv_spi, &msg);
	if (ret)
		DMBERR("1 error: %d\n", ret);	
}

void mtv222_spi_write(unsigned char page, unsigned char reg, unsigned char val)
{
	u8 out_buf[4];
	u8 in_buf[4];
	struct spi_message msg;
	struct spi_transfer msg_xfer = {
		.tx_buf = out_buf,
		.rx_buf = in_buf,
		.len = 4,
		.cs_change = 0,
		.delay_usecs = 0
	};
	int ret;

	out_buf[0] = 0x80 | page;
	out_buf[1] = reg;
	out_buf[2] = 1; /* size */
	out_buf[3] = val;

	spi_message_init(&msg);
	spi_message_add_tail(&msg_xfer, &msg);

	ret = spi_sync(mtv_spi, &msg);
	if (ret)
		DMBERR("error: %d\n", ret);
}

void mtv222_spi_recover(unsigned char *buf, unsigned int size)
{
	int ret;
	struct spi_message msg;
	struct spi_transfer msg_xfer = {
		.tx_buf = buf,
		.rx_buf = buf,
		.len = size,
		.cs_change = 0,
		.delay_usecs = 0,
	};

	memset(buf, 0xFF, size);

	spi_message_init(&msg);
	spi_message_add_tail(&msg_xfer, &msg);

	ret = spi_sync(mtv_spi, &msg);
	if (ret)
		DMBERR("error: %d\n", ret);
}