Exemple #1
0
/*
 *
 * hv_ringbuffer_write()
 *
 * Write to the ring buffer
 *
 */
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
		    struct scatterlist *sglist, u32 sgcount)
{
	int i = 0;
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 totalbytes_towrite = 0;

	struct scatterlist *sg;
	u32 next_write_location;
	u64 prev_indices = 0;
	unsigned long flags;

	for_each_sg(sglist, sg, sgcount, i)
	{
		totalbytes_towrite += sg->length;
	}

	totalbytes_towrite += sizeof(u64);

	spin_lock_irqsave(&outring_info->ring_lock, flags);

	hv_get_ringbuffer_availbytes(outring_info,
				&bytes_avail_toread,
				&bytes_avail_towrite);


	/* If there is only room for the packet, assume it is full. */
	/* Otherwise, the next time around, we think the ring buffer */
	/* is empty since the read index == write index */
	if (bytes_avail_towrite <= totalbytes_towrite) {
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
		return -1;
	}

	/* Write to the ring buffer */
	next_write_location = hv_get_next_write_location(outring_info);

	for_each_sg(sglist, sg, sgcount, i)
	{
		next_write_location = hv_copyto_ringbuffer(outring_info,
						     next_write_location,
						     sg_virt(sg),
						     sg->length);
	}
Exemple #2
0
/*---------------------------------------------------------------------------*/
static void process_request(struct xio_msg *msg)
{
	static int cnt;

	if (!msg) {
		cnt = 0;
		return;
	}
	if (++cnt == PRINT_COUNTER) {
		struct scatterlist *sgl = msg->in.data_tbl.sgl;

		pr_info("**** message [%llu] %s - %s\n",
			(msg->sn + 1),
			(char *)msg->in.header.iov_base,
			(char *)sg_virt(sgl));
		cnt = 0;
	}
}
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
	enum dma_data_direction direction)
{
	unsigned long addr;
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nhwentries; i++, sg++) {
		if (!plat_device_is_coherent(dev) &&
		    direction != DMA_TO_DEVICE) {
			addr = (unsigned long) sg_virt(sg);
			if (addr)
				__dma_sync(addr, sg->length, direction);
		}
		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
	}
}
Exemple #4
0
static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
				struct mmc_request *mrq)
{
	struct mmc_data *data = mrq->data;

	if (!data->sg_len || !data->sg->length)
		return;

	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
		BLOCK_SIZE_MASK;

	host->wait_for = MMCIF_WAIT_FOR_MREAD;
	host->sg_idx = 0;
	host->sg_blkidx = 0;
	host->pio_ptr = sg_virt(data->sg);
	schedule_delayed_work(&host->timeout_work, host->timeout);
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
}
static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
				struct mmc_request *mrq)
{
	struct mmc_data *data = mrq->data;

	if (!data->sg_len || !data->sg->length)
		return;

	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
		BLOCK_SIZE_MASK;

	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
	host->sg_idx = 0;
	host->sg_blkidx = 0;
	host->pio_ptr = sg_virt(data->sg);

	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}
Exemple #6
0
int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
	   enum dma_data_direction direction)
{
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nents; i++, sg++) {
		sg->dma_address = (dma_addr_t) sg_virt(sg);

		invalidate_dcache_range(sg_dma_address(sg),
					sg_dma_address(sg) +
					sg_dma_len(sg));
	}

	return nents;
}
static void pmpmci_receive_pio(struct pmpmci_host *host)
{
	struct mmc_data *data;
    struct sd_data_s *sd_data= host->platdata;		
	int max, count, sg_len = 0;
	unsigned int *sg_ptr = NULL;
	struct scatterlist *sg;

	data = host->mrq->data;
	if (!(host->flags & HOST_F_RECV))
		return;

	max = host->pio.len;

	if (host->pio.index < host->dma.sgmap_len) {
		sg = &data->sg[host->pio.index];
		sg_ptr = sg_virt(sg) + host->pio.offset;

		sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;

		if (sg_len < max)
			max = sg_len;
	}
    count = sd_data->ops->transData(&(sd_data->info),
                                               SD_DATA_FIFO | SD_DATA_READ,
                                               (void *)sg_ptr,
                                               max);
	host->pio.len -= count;
	host->pio.offset += count;
	if (sg_len && count == sg_len) {
		host->pio.index++;
		host->pio.offset = 0;
	}

	if (host->pio.len == 0) {
		sd_data->ops->intrpt_enable(&(sd_data->info), 
			         SD_MMC_INT_DATABUFFULL ,
			         0);	
        host->flags &= ~(HOST_F_XMIT | HOST_F_RECV);	
    //printk("\n%s(),%d.", __FUNCTION__, __LINE__);
		//tasklet_schedule(&host->data_task);		
		pmpmci_data_complete(host, sd_data->ops->getStatus(&(sd_data->info)));
	}
}
static void pmpmci_send_pio(struct pmpmci_host *host)
{
	struct mmc_data *data;
    struct sd_data_s *sd_data= host->platdata;		
	int sg_len, max, count;
	unsigned int *sg_ptr;

	   

	struct scatterlist *sg;
	data = host->mrq->data;

	if (!(host->flags & HOST_F_XMIT))
		return;

	sg = &data->sg[host->pio.index];
	sg_ptr = sg_virt(sg) + host->pio.offset;

	sg_len = data->sg[host->pio.index].length - host->pio.offset;

	max = (sg_len > host->pio.len) ? host->pio.len : sg_len;

    count = sd_data->ops->transData(&(sd_data->info),
                                               SD_DATA_FIFO | SD_DATA_WRITE,
                                               (void *)sg_ptr,
                                               max);	  
	
	host->pio.len -= count;
	host->pio.offset += count;
	if (count == sg_len) {
		host->pio.index++;
		host->pio.offset = 0;
	}

	if (host->pio.len == 0) {
		sd_data->ops->intrpt_enable(&(sd_data->info), 
			         SD_MMC_INT_DATABUFEMPTY ,
			         0);			
        host->flags &= ~(HOST_F_XMIT | HOST_F_RECV);		
    //printk("\n%s(),%d.", __FUNCTION__, __LINE__);
		//tasklet_schedule(&host->data_task);
		pmpmci_data_complete(host, sd_data->ops->getStatus(&(sd_data->info)));
	}
}
Exemple #9
0
/*---------------------------------------------------------------------------*/
static void process_request(struct xio_msg *req)
{
	struct sg_table *sgt = &req->in.data_tbl;
	struct scatterlist	*sg;
	char			*str;
	int			len, i;
	char			tmp;

	/* note all data is packed together so in order to print each
	 * part on its own NULL character is temporarily stuffed
	 * before the print and the original character is restored after
	 * the printf
	 */
	if (++g_server_data->cnt == PRINT_COUNTER) {
		str = (char *)req->in.header.iov_base;
		len = req->in.header.iov_len;
		if (str) {
			if (((unsigned)len) > 64)
				len = 64;
			tmp = str[len];
			str[len] = '\0';
			pr_info("message header : [%llu] - %s\n",
				(req->sn + 1), str);
			str[len] = tmp;
		}
		sg = sgt->sgl;
		for (i = 0; i < sgt->nents; i++) {
			str = (char *)sg_virt(sg);
			len = sg->length;
			if (str) {
				if (((unsigned)len) > 64)
					len = 64;
				tmp = str[len];
				str[len] = '\0';
				pr_info("message data: [%llu][%d][%d] - %s\n",
					(req->sn + 1), i, len, str);
				str[len] = tmp;
			}
			sg = sg_next(sg);
		}
		g_server_data->cnt = 0;
	}
}
Exemple #10
0
static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
		       struct scatterlist *sghead, u32 pages, u32 pg_size)
{
	struct scatterlist *sg;
	bool is_umem = false;
	int i;

	/* page ptr arrays */
	pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
	if (!pbl->pg_arr)
		return -ENOMEM;

	pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
	if (!pbl->pg_map_arr) {
		kfree(pbl->pg_arr);
		pbl->pg_arr = NULL;
		return -ENOMEM;
	}
	pbl->pg_count = 0;
	pbl->pg_size = pg_size;

	if (!sghead) {
		for (i = 0; i < pages; i++) {
			pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
							     pbl->pg_size,
							     &pbl->pg_map_arr[i],
							     GFP_KERNEL);
			if (!pbl->pg_arr[i])
				goto fail;
			pbl->pg_count++;
		}
	} else {
		i = 0;
		is_umem = true;
		for_each_sg(sghead, sg, pages, i) {
			pbl->pg_map_arr[i] = sg_dma_address(sg);
			pbl->pg_arr[i] = sg_virt(sg);
			if (!pbl->pg_arr[i])
				goto fail;

			pbl->pg_count++;
		}
	}
Exemple #11
0
static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
{
	struct mt76_usb *usb = &dev->usb;
	struct mt76u_buf *buf = &usb->mcu.res;
	struct urb *urb = buf->urb;
	int i, ret;
	u32 rxfce;
	u8 *data;

	for (i = 0; i < 5; i++) {
		if (!wait_for_completion_timeout(&usb->mcu.cmpl,
						 msecs_to_jiffies(300)))
			continue;

		if (urb->status)
			return -EIO;

		data = sg_virt(&urb->sg[0]);
		if (usb->mcu.rp)
			mt76x02u_multiple_mcu_reads(dev, data + 4,
						    urb->actual_length - 8);

		rxfce = get_unaligned_le32(data);
		ret = mt76u_submit_buf(dev, USB_DIR_IN,
				       MT_EP_IN_CMD_RESP,
				       buf, GFP_KERNEL,
				       mt76u_mcu_complete_urb,
				       &usb->mcu.cmpl);
		if (ret)
			return ret;

		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
		    FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
			return 0;

		dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
			FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
			seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
	}

	dev_err(dev->dev, "error: %s timed out\n", __func__);
	return -ETIMEDOUT;
}
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
	enum dma_data_direction direction)
{
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nents; i++, sg++) {
		unsigned long addr;

		addr = (unsigned long) sg_virt(sg);
		if (!plat_device_is_coherent(dev) && addr)
			__dma_sync(addr, sg->length, direction);
		sg->dma_address = plat_map_dma_mem(dev,
				                   (void *)addr, sg->length);
	}

	return nents;
}
static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
{
	struct mmc_data *data = host->mrq->data;

	host->sg_blkidx += host->blocksize;

	/* data->sg->length must be a multiple of host->blocksize? */
	BUG_ON(host->sg_blkidx > data->sg->length);

	if (host->sg_blkidx == data->sg->length) {
		host->sg_blkidx = 0;
		if (++host->sg_idx < data->sg_len)
			host->pio_ptr = sg_virt(++data->sg);
	} else {
		host->pio_ptr = p;
	}

	return host->sg_idx != data->sg_len;
}
Exemple #14
0
static int
mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
{
	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
	u8 *data = sg_virt(&urb->sg[0]);
	int data_len, len, nsgs = 1;
	struct sk_buff *skb;

	if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
		return 0;

	len = mt76u_get_rx_entry_len(data, urb->actual_length);
	if (len < 0)
		return 0;

	skb = build_skb(data, q->buf_size);
	if (!skb)
		return 0;

	data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
	skb_reserve(skb, MT_DMA_HDR_LEN);
	if (skb->tail + data_len > skb->end) {
		dev_kfree_skb(skb);
		return 1;
	}

	__skb_put(skb, data_len);
	len -= data_len;

	while (len > 0) {
		data_len = min_t(int, len, urb->sg[nsgs].length);
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
				sg_page(&urb->sg[nsgs]),
				urb->sg[nsgs].offset,
				data_len, q->buf_size);
		len -= data_len;
		nsgs++;
	}
	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);

	return nsgs;
}
Exemple #15
0
static void
mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
{
	int pipe;
	struct scatterlist * sg;
	
	MTS_DEBUG_GOT_HERE();

	desc->context.instance = desc;
	desc->context.srb = srb;
	desc->context.fragment = 0;

	if (!scsi_bufflen(srb)) {
		desc->context.data = NULL;
		desc->context.data_length = 0;
		return;
	} else {
		sg = scsi_sglist(srb);
		desc->context.data = sg_virt(&sg[0]);
		desc->context.data_length = sg[0].length;
	}


	/* can't rely on srb->sc_data_direction */

	/* Brutally ripped from usb-storage */

	if ( !memcmp( srb->cmnd, mts_read_image_sig, mts_read_image_sig_len )
) { 		pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_image);
		MTS_DEBUG( "transfering from desc->ep_image == %d\n",
			   (int)desc->ep_image );
	} else if ( MTS_DIRECTION_IS_IN(srb->cmnd[0]) ) {
			pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_response);
			MTS_DEBUG( "transfering from desc->ep_response == %d\n",
				   (int)desc->ep_response);
	} else {
		MTS_DEBUG("transfering to desc->ep_out == %d\n",
			  (int)desc->ep_out);
		pipe = usb_sndbulkpipe(desc->usb_dev,desc->ep_out);
	}
	desc->context.data_pipe = pipe;
}
Exemple #16
0
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
			       struct scatterlist *sgl,
			       struct scatterlist *sglout,
			       struct qat_crypto_request *qat_req)
{
	struct device *dev = &GET_DEV(inst->accel_dev);
	int i, sg_nctr = 0;
	int n = sg_nents(sgl);
	struct qat_alg_buf_list *bufl;
	struct qat_alg_buf_list *buflout = NULL;
	dma_addr_t blp;
	dma_addr_t bloutp = 0;
	struct scatterlist *sg;
	size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
			((1 + n) * sizeof(struct qat_alg_buf));

	if (unlikely(!n))
		return -EINVAL;

	bufl = kzalloc_node(sz, GFP_ATOMIC,
			    dev_to_node(&GET_DEV(inst->accel_dev)));
	if (unlikely(!bufl))
		return -ENOMEM;

	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, blp)))
		goto err_in;

	for_each_sg(sgl, sg, n, i) {
		int y = sg_nctr;

		if (!sg->length)
			continue;

		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
						      sg->length,
						      DMA_BIDIRECTIONAL);
		bufl->bufers[y].len = sg->length;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
			goto err_in;
		sg_nctr++;
	}
Exemple #17
0
static void
mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
{
	int pipe;
	struct scatterlist * sg;
	
	MTS_DEBUG_GOT_HERE();

	desc->context.instance = desc;
	desc->context.srb = srb;
	desc->context.fragment = 0;

	if (!scsi_bufflen(srb)) {
		desc->context.data = NULL;
		desc->context.data_length = 0;
		return;
	} else {
		sg = scsi_sglist(srb);
		desc->context.data = sg_virt(&sg[0]);
		desc->context.data_length = sg
Exemple #18
0
static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = sg_virt(data->sg);
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
		return false;
	}

	for (i = 0; i < host->blocksize / 4; i++)
		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);

	/* buffer write end */
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;

	return true;
}
Exemple #19
0
static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
{
	struct mmc_data *data = host->mrq->data;
	u32 *p = sg_virt(data->sg);
	int i;

	if (host->sd_error) {
		data->error = sh_mmcif_error_manage(host);
		return false;
	}

	for (i = 0; i < host->blocksize / 4; i++)
		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);

	/* buffer read end */
	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
	host->wait_for = MMCIF_WAIT_FOR_READ_END;

	return true;
}
/*
 *	PIO data transfer routine using the scatter gather table.
 */
static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
				unsigned int bcount, int write)
{
	ide_hwif_t *hwif = drive->hwif;
	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
	xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
	char *buf;
	int count;

	while (bcount) {
		count = min(pc->sg->length - pc->b_count, bcount);
		if (PageHighMem(sg_page(pc->sg))) {
			unsigned long flags;

			local_irq_save(flags);
			buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
					  pc->sg->offset;
			xf(drive, NULL, buf + pc->b_count, count);
			kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
			local_irq_restore(flags);
		} else {
			buf = sg_virt(pc->sg);
			xf(drive, NULL, buf + pc->b_count, count);
		}
		bcount -= count; pc->b_count += count;
		if (pc->b_count == pc->sg->length) {
			if (!--pc->sg_cnt)
				break;
			pc->sg = sg_next(pc->sg);
			pc->b_count = 0;
		}
	}

	if (bcount) {
		printk(KERN_ERR "%s: scatter gather table too small, %s\n",
				drive->name, write ? "padding with zeros"
						   : "discarding data");
		ide_pad_transfer(drive, write, bcount);
	}
}
static int dx_map_sg(struct device *dev, struct scatterlist *sg,
		     unsigned int nbytes, int direction,
		     uint32_t *nents, uint32_t max_sg_nents,
		     int *lbytes)
{
	if (sg_is_last(sg)) {
		/* One entry only case -set to DLLI */
		if ( unlikely( dma_map_sg(dev, sg, 1, direction) != 1 ) ) {
			DX_LOG_ERR("dma_map_sg() single buffer failed %s\n ",
				   get_dir_type(direction));
			return -ENOMEM;
		}
		DX_LOG_DEBUG("Mapped sg: dma_address=0x%08lX "
			     "page_link=0x%08lX addr=0x%08lX offset=%u "
			     "length=%u\n",
			     (unsigned long)sg_dma_address(sg),
			     sg->page_link,
			     (unsigned long)sg_virt(sg),
			     sg->offset, sg->length);
		*lbytes = nbytes;
		*nents = 1;
	} else {  /*sg_is_last*/

		*nents = sg_count_ents(sg, nbytes, lbytes);
		if (*nents > max_sg_nents) {
			DX_LOG_ERR("Too many fragments. current %d max %d\n",
				   *nents, max_sg_nents);
			return -ENOMEM;
		}
		/* TODO - verify num of entries */
		if ( unlikely( dma_map_sg(dev, sg, *nents, direction)
			      != *nents ) ) {
			DX_LOG_ERR("dma_map_sg() sg buffer failed - %s\n",
				   get_dir_type(direction));
			return -ENOMEM;
		}
	}

	return 0;
}
Exemple #22
0
static void jz_mmc_send_pio(struct jz_mmc_host *host)
{

	struct mmc_data *data = 0;
	int sg_len, max, count = 0;
	u32 *wbuf = 0;
	struct scatterlist *sg;
	unsigned int nob;

	data = host->mrq->data;
	nob = data->blocks;

	REG_MSC_NOB = nob;
	REG_MSC_BLKLEN = data->blksz;

	/* This is the pointer to the data buffer */
	sg = &data->sg[host->pio.index];
	wbuf = sg_virt(sg) + host->pio.offset;

	/* This is the space left inside the buffer */
	sg_len = data->sg[host->pio.index].length - host->pio.offset;

	/* Check to if we need less then the size of the sg_buffer */
	max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
	max = max / 4;
	for(count = 0; count < max; count++ ) {
		while (REG_MSC_STAT & MSC_STAT_DATA_FIFO_FULL)
				;
		REG_MSC_TXFIFO = *wbuf++;
	}

	host->pio.len -= count;
	host->pio.offset += count;

	if (count == sg_len) {
		host->pio.index++;
		host->pio.offset = 0;
	}
}
Exemple #23
0
static void
goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, struct mmc_data *data)
{
	if (host->dma_in_use) {
		enum dma_data_direction dma_data_dir;

		if (data->flags & MMC_DATA_WRITE)
			dma_data_dir = DMA_TO_DEVICE;
		else
			dma_data_dir = DMA_FROM_DEVICE;

		if (dma_data_dir == DMA_FROM_DEVICE) {
			// we don't really have DMA, so we need to copy from our platform driver buffer
			uint8_t *dest = (uint8_t *) sg_virt(data->sg);
			memcpy(dest, host->virt_base, data->sg->length);
		}

		host->data->bytes_xfered += data->sg->length;

		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
			     dma_data_dir);
	}

	host->data = NULL;
	host->sg_len = 0;

	/* NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing
	 * dozens of requests until the card finishes writing data.
	 * It'd be cheaper to just wait till an EOFB interrupt arrives...
	 */

	if (!data->stop) {
		host->mrq = NULL;
		mmc_request_done(host->mmc, data->mrq);
		return;
	}

	goldfish_mmc_start_command(host, data->stop);
}
/**
 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
 * @qdio: pointer to struct zfcp_qdio
 * @q_req: pointer to struct zfcp_qdio_req
 * @sg: scatter-gather list
 * @max_sbals: upper bound for number of SBALs to be used
 * Returns: zero or -EINVAL on error
 */
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
			    struct scatterlist *sg)
{
	struct qdio_buffer_element *sbale;

	/* set storage-block type for this request */
	sbale = zfcp_qdio_sbale_req(qdio, q_req);
	sbale->sflags |= q_req->sbtype;

	for (; sg; sg = sg_next(sg)) {
		sbale = zfcp_qdio_sbale_next(qdio, q_req);
		if (!sbale) {
			atomic_inc(&qdio->req_q_full);
			zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
					     q_req->sbal_number);
			return -EINVAL;
		}
		sbale->addr = sg_virt(sg);
		sbale->length = sg->length;
	}
	return 0;
}
static int fd_do_writev(struct se_task *task)
{
	struct fd_request *req = FILE_REQ(task);
	struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
	struct fd_dev *dev = se_dev->dev_ptr;
	struct file *fd = dev->fd_file;
	struct scatterlist *sg = task->task_sg;
	struct iovec *iov;
	mm_segment_t old_fs;
	loff_t pos = (task->task_lba *
		      se_dev->se_sub_dev->se_dev_attrib.block_size);
	int ret, i = 0;

	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
	if (!iov) {
		pr_err("Unable to allocate fd_do_writev iov[]\n");
		return -ENOMEM;
	}

	for (i = 0; i < task->task_sg_nents; i++) {
		iov[i].iov_len = sg[i].length;
		iov[i].iov_base = sg_virt(&sg[i]);
	}

	old_fs = get_fs();
	set_fs(get_ds());
	ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
	set_fs(old_fs);

	kfree(iov);

	if (ret < 0 || ret != task->task_size) {
		pr_err("vfs_writev() returned %d\n", ret);
		return (ret < 0 ? ret : -EINVAL);
	}

	return 1;
}
Exemple #26
0
static void jz_mmc_receive_pio(struct jz_mmc_host *host)
{

	struct mmc_data *data = 0;
	int sg_len = 0, max = 0, count = 0;
	u32 *buf = 0;
	struct scatterlist *sg;
	unsigned int nob;

	data = host->mrq->data;
	nob = data->blocks;
	REG_MSC_NOB = nob;
	REG_MSC_BLKLEN = data->blksz;

	max = host->pio.len;
	if (host->pio.index < host->dma.len) {
		sg = &data->sg[host->pio.index];
		buf = sg_virt(sg) + host->pio.offset;

		/* This is the space left inside the buffer */
		sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
		/* Check to if we need less then the size of the sg_buffer */
		if (sg_len < max) max = sg_len;
	}
	max = max / 4;
	for(count = 0; count < max; count++) {
		while (REG_MSC_STAT & MSC_STAT_DATA_FIFO_EMPTY)
			;
		*buf++ = REG_MSC_RXFIFO;
	} 
	host->pio.len -= count;
	host->pio.offset += count;

	if (sg_len && count == sg_len) {
		host->pio.index++;
		host->pio.offset = 0;
	}
}
Exemple #27
0
static int
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
		 int nsgs, int len, int sglen)
{
	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
	struct urb *urb = buf->urb;
	int i;

	spin_lock_bh(&q->rx_page_lock);
	for (i = 0; i < nsgs; i++) {
		struct page *page;
		void *data;
		int offset;

		data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
		if (!data)
			break;

		page = virt_to_head_page(data);
		offset = data - page_address(page);
		sg_set_page(&urb->sg[i], page, sglen, offset);
	}
	spin_unlock_bh(&q->rx_page_lock);

	if (i < nsgs) {
		int j;

		for (j = nsgs; j < urb->num_sgs; j++)
			skb_free_frag(sg_virt(&urb->sg[j]));
		urb->num_sgs = i;
	}

	urb->num_sgs = max_t(int, i, urb->num_sgs);
	buf->len = urb->num_sgs * sglen,
	sg_init_marker(urb->sg, urb->num_sgs);

	return i ? : -ENOMEM;
}
Exemple #28
0
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
			       struct scatterlist *assoc,
			       struct scatterlist *sgl,
			       struct scatterlist *sglout, uint8_t *iv,
			       uint8_t ivlen,
			       struct qat_crypto_request *qat_req)
{
	struct device *dev = &GET_DEV(inst->accel_dev);
	int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
	struct qat_alg_buf_list *bufl;
	struct qat_alg_buf_list *buflout = NULL;
	dma_addr_t blp;
	dma_addr_t bloutp = 0;
	struct scatterlist *sg;
	size_t sz = sizeof(struct qat_alg_buf_list) +
			((1 + n + assoc_n) * sizeof(struct qat_alg_buf));

	if (unlikely(!n))
		return -EINVAL;

	bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
	if (unlikely(!bufl))
		return -ENOMEM;

	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, blp)))
		goto err;

	for_each_sg(assoc, sg, assoc_n, i) {
		bufl->bufers[bufs].addr = dma_map_single(dev,
							 sg_virt(sg),
							 sg->length,
							 DMA_BIDIRECTIONAL);
		bufl->bufers[bufs].len = sg->length;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
			goto err;
		bufs++;
	}
Exemple #29
0
static void mts_do_sg (struct urb* transfer)
{
	struct scatterlist * sg;
	int status = transfer->status;
	MTS_INT_INIT();

	MTS_DEBUG("Processing fragment %d of %d\n", context->fragment,
	                                          scsi_sg_count(context->srb));

	if (unlikely(status)) {
                context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16;
		mts_transfer_cleanup(transfer);
        }

	sg = scsi_sglist(context->srb);
	context->fragment++;
	mts_int_submit_urb(transfer,
			   context->data_pipe,
			   sg_virt(&sg[context->fragment]),
			   sg[context->fragment].length,
			   context->fragment + 1 == scsi_sg_count(context->srb) ?
			   mts_data_done : mts_do_sg);
}
Exemple #30
0
static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
		u32 sgl_nents)
{
	struct se_device *se_dev = cmd->se_dev;
	struct fd_dev *dev = se_dev->dev_ptr;
	struct file *fd = dev->fd_file;
	struct scatterlist *sg;
	struct iovec *iov;
	mm_segment_t old_fs;
	loff_t pos = (cmd->t_task_lba *
		      se_dev->se_sub_dev->se_dev_attrib.block_size);
	int ret = 0, i;

	iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
	if (!iov) {
		pr_err("Unable to allocate fd_do_readv iov[]\n");
		return -ENOMEM;
	}

	for_each_sg(sgl, sg, sgl_nents, i) {
		iov[i].iov_len = sg->length;
		iov[i].iov_base = sg_virt(sg);
	}