Ejemplo n.º 1
0
/**
 * crystalhd_unmap_sgl - Release mapped resources
 * @adp: Adapter instance
 * @dio: DIO request instance
 *
 * Return:
 *	Status.
 *
 * This routine is to unmap the user buffer pages.
 */
BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, struct crystalhd_dio_req *dio)
{
	struct page *page = NULL;
	int j = 0;

	if (!adp || !dio) {
		printk(KERN_ERR "%s: Invalid arg\n", __func__);
		return BC_STS_INV_ARG;
	}

	if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) {
		for (j = 0; j < dio->page_cnt; j++) {
			page = dio->pages[j];
			if (page) {
				if (!PageReserved(page) &&
				    (dio->direction == DMA_FROM_DEVICE))
					SetPageDirty(page);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
				put_page(page);
#else
				page_cache_release(page);
#endif
			}
		}
	}
	if (dio->sig == crystalhd_dio_sg_mapped)
		pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction);

	crystalhd_free_dio(adp, dio);

	return BC_STS_SUCCESS;
}
Ejemplo n.º 2
0
void ivtv_udma_unmap(struct ivtv *itv)
{
	struct ivtv_user_dma *dma = &itv->udma;
	int i;

	IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");

	/* Nothing to free */
	if (dma->page_count == 0)
		return;

	/* Unmap Scatterlist */
	if (dma->SG_length) {
		pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
		dma->SG_length = 0;
	}
	/* sync DMA */
	ivtv_udma_sync_for_cpu(itv);

	/* Release User Pages */
	for (i = 0; i < dma->page_count; i++) {
		put_page(dma->map[i]);
	}
	dma->page_count = 0;
}
Ejemplo n.º 3
0
void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt)
{
    pci_unmap_sg(pci, pt->slist, pt->nents, PCI_DMA_FROMDEVICE);
    saa7146_pgtable_free(pci, pt);
    kfree(pt->slist);
    pt->slist = NULL;
    vfree(mem);
}
Ejemplo n.º 4
0
/**
 * vme_dma_setup() - Setup a DMA transfer
 * @desc: DMA channel to setup
 * @to_user:	1 if the transfer is to/from a user-space buffer.
 *		0 if it is to/from a kernel buffer.
 *
 *  Setup a DMA transfer.
 *
 *  Returns 0 on success, or a standard kernel error code on failure.
 */
static int vme_dma_setup(struct dma_channel *channel, int to_user)
{
	int rc = 0;
	struct vme_dma *desc = &channel->desc;
	unsigned int length = desc->length;
	unsigned int uaddr;
	int nr_pages;

	/* Create the scatter gather list */
	uaddr = (desc->dir == VME_DMA_TO_DEVICE) ?
		desc->src.addrl : desc->dst.addrl;

	/* Check for overflow */
	if ((uaddr + length) < uaddr)
		return -EINVAL;

	nr_pages = ((uaddr & ~PAGE_MASK) + length + ~PAGE_MASK) >> PAGE_SHIFT;

	if ((channel->sgl = kmalloc(nr_pages * sizeof(struct scatterlist),
				    GFP_KERNEL)) == NULL)
		return -ENOMEM;

	/* Map the user pages into the scatter gather list */
	channel->sg_pages = sgl_map_user_pages(channel->sgl, nr_pages, uaddr,
					       length,
					       (desc->dir==VME_DMA_FROM_DEVICE),
					       to_user);

	if (channel->sg_pages <= 0) {
		rc = channel->sg_pages;
		goto out_free_sgl;
	}

	/* Map the sg list entries onto the PCI bus */
	channel->sg_mapped = pci_map_sg(vme_bridge->pdev, channel->sgl,
					channel->sg_pages, desc->dir);

	rc = tsi148_dma_setup(channel);


	if (rc)
		goto out_unmap_sgl;

	return 0;

out_unmap_sgl:
	pci_unmap_sg(vme_bridge->pdev, channel->sgl, channel->sg_mapped,
		     desc->dir);

	sgl_unmap_user_pages(channel->sgl, channel->sg_pages, 0, to_user);

out_free_sgl:
	kfree(channel->sgl);

	return rc;
}
Ejemplo n.º 5
0
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
		enum dma_data_direction direction)
{
	if (dev->bus == &pci_bus_type)
		pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
	else if (dev->bus == &vio_bus_type)
		vio_unmap_sg(to_vio_dev(dev), sg, nhwentries, direction);
	else
		BUG();
}
Ejemplo n.º 6
0
int videobuf_dma_pci_unmap(struct pci_dev *dev, struct videobuf_dmabuf *dma)
{
	if (!dma->sglen)
		return 0;

	if (!dma->bus_addr)
		pci_unmap_sg(dev,dma->sglist,dma->nr_pages,dma->direction);
	kfree(dma->sglist);
	dma->sglist = NULL;
	dma->sglen = 0;
	return 0;
}
Ejemplo n.º 7
0
/**
 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
 * @netdev: the corresponding net_device
 * @xid: the xid that corresponding ddp will be freed
 *
 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
 * and it is expected to be called by ULD, i.e., FCP layer of libfc
 * to release the corresponding ddp context when the I/O is done.
 *
 * Returns : data length already ddp-ed in bytes
 */
int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
{
	int len = 0;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_adapter *adapter;
	struct ixgbe_fcoe_ddp *ddp;
	u32 fcbuff;

	if (!netdev)
		goto out_ddp_put;

	if (xid >= IXGBE_FCOE_DDP_MAX)
		goto out_ddp_put;

	adapter = netdev_priv(netdev);
	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (!ddp->udl)
		goto out_ddp_put;

	len = ddp->len;
	/* if there an error, force to invalidate ddp context */
	if (ddp->err) {
		spin_lock_bh(&fcoe->lock);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
				(xid | IXGBE_FCFLTRW_WE));
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
				(xid | IXGBE_FCDMARW_WE));

		/* guaranteed to be invalidated after 100us */
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
				(xid | IXGBE_FCDMARW_RE));
		fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
		spin_unlock_bh(&fcoe->lock);
		if (fcbuff & IXGBE_FCBUFF_VALID)
			udelay(100);
	}
	if (ddp->sgl)
		pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
			     DMA_FROM_DEVICE);
	if (ddp->pool) {
		pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
		ddp->pool = NULL;
	}

	ixgbe_fcoe_clear_ddp(ddp);

out_ddp_put:
	return len;
}
Ejemplo n.º 8
0
static void skd_postop_sg_list(struct skd_device *skdev,
			       struct skd_request_context *skreq)
{
	int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
	int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;

	/*
	 * restore the next ptr for next IO request so we
	 * don't have to set it every time.
	 */
	skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
		skreq->sksg_dma_address +
		((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
	pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
}
Ejemplo n.º 9
0
void dma_region_free(struct dma_region *dma)
{
	if (dma->n_dma_pages) {
		pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction);
		dma->n_dma_pages = 0;
		dma->dev = NULL;
	}

	vfree(dma->sglist);
	dma->sglist = NULL;

	vfree(dma->kvirt);
	dma->kvirt = NULL;
	dma->n_pages = 0;
}
Ejemplo n.º 10
0
int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
{
	void                   *dev=q->dev;

	MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
	if (!dma->sglen)
		return 0;

	pci_unmap_sg (dev,dma->sglist,dma->nr_pages,dma->direction);

	kfree(dma->sglist);
	dma->sglist = NULL;
	dma->sglen = 0;
	return 0;
}
Ejemplo n.º 11
0
static inline void asd_unmap_scatterlist(struct asd_ascb *ascb)
{
	struct asd_ha_struct *asd_ha = ascb->ha;
	struct sas_task *task = ascb->uldd_task;

	if (task->data_dir == PCI_DMA_NONE)
		return;

	if (task->num_scatter == 0) {
		dma_addr_t dma = (dma_addr_t)
		       le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
		pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
				 task->data_dir);
		return;
	}

	asd_free_coherent(asd_ha, ascb->sg_arr);
	pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
		     task->data_dir);
}
Ejemplo n.º 12
0
void ivtv_udma_free(struct ivtv *itv)
{
	int i;

	/* Unmap SG Array */
	if (itv->udma.SG_handle) {
		pci_unmap_single(itv->pdev, itv->udma.SG_handle,
			 sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
	}

	/* Unmap Scatterlist */
	if (itv->udma.SG_length) {
		pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
	}

	for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
		if (itv->udma.bouncemap[i])
			__free_page(itv->udma.bouncemap[i]);
	}
}
Ejemplo n.º 13
0
void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt)
{
    int pages = (length+PAGE_SIZE-1)/PAGE_SIZE;
    void *mem = vmalloc_32(length);
    int slen = 0;

    if (NULL == mem)
        goto err_null;

    if (!(pt->slist = vmalloc_to_sg(mem, pages)))
        goto err_free_mem;

    if (saa7146_pgtable_alloc(pci, pt))
        goto err_free_slist;

    pt->nents = pages;
    slen = pci_map_sg(pci,pt->slist,pt->nents,PCI_DMA_FROMDEVICE);
    if (0 == slen)
        goto err_free_pgtable;

    if (0 != saa7146_pgtable_build_single(pci, pt, pt->slist, slen))
        goto err_unmap_sg;

    return mem;

err_unmap_sg:
    pci_unmap_sg(pci, pt->slist, pt->nents, PCI_DMA_FROMDEVICE);
err_free_pgtable:
    saa7146_pgtable_free(pci, pt);
err_free_slist:
    kfree(pt->slist);
    pt->slist = NULL;
err_free_mem:
    vfree(mem);
err_null:
    return NULL;
}
Ejemplo n.º 14
0
/**
 * crystalhd_dioq_fetch_wait - Fetch element from Head.
 * @ioq: DIO queue instance
 * @to_secs: Wait timeout in seconds..
 *
 * Return:
 *	element from the head..
 *
 * Return element from head if Q is not empty. Wait for new element
 * if Q is empty for Timeout seconds.
 */
void *crystalhd_dioq_fetch_wait(struct crystalhd_hw *hw, uint32_t to_secs, uint32_t *sig_pend)
{
    struct device *dev = chddev();
    unsigned long flags = 0;
    int rc = 0;

    crystalhd_rx_dma_pkt *r_pkt = NULL;
    crystalhd_dioq_t *ioq = hw->rx_rdyq;
    uint32_t picYcomp = 0;

    unsigned long fetchTimeout = jiffies + msecs_to_jiffies(to_secs * 1000);

    if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !to_secs || !sig_pend) {
        dev_err(dev, "%s: Invalid arg\n", __func__);
        return r_pkt;
    }

    spin_lock_irqsave(&ioq->lock, flags);
#ifndef __APPLE__
    while (!time_after_eq(jiffies, fetchTimeout)) {
#else
    while (fetchTimeout >= jiffies) {
#endif
        if(ioq->count == 0) {
            spin_unlock_irqrestore(&ioq->lock, flags);
            crystalhd_wait_on_event(&ioq->event, (ioq->count > 0),
                                    250, rc, false);
        }
        else
            spin_unlock_irqrestore(&ioq->lock, flags);
        if (rc == 0) {
            // Found a packet. Check if it is a repeated picture or not
            // Drop the picture if it is a repeated picture
            // Lock against checks from get status calls
            if(down_interruptible(&hw->fetch_sem))
                goto sem_error;
#ifndef __APPLE__
            r_pkt = crystalhd_dioq_fetch(ioq);
#else
            r_pkt = (crystalhd_rx_dma_pkt*)crystalhd_dioq_fetch(ioq);
#endif
            // If format change packet, then return with out checking anything
            if (r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE))
                goto sem_rel_return;
            if (hw->adp->pdev->device == BC_PCI_DEVID_LINK) {
                picYcomp = link_GetRptDropParam(hw, hw->PICHeight, hw->PICWidth, (void *)r_pkt);
            }
            else {
                // For Flea, we don't have the width and height handy since they
                // come in the PIB in the picture, so this function will also
                // populate the width and height
                picYcomp = flea_GetRptDropParam(hw, (void *)r_pkt);
                // For flea it is the above function that indicated format change
                if(r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE))
                    goto sem_rel_return;
            }
            if(!picYcomp || (picYcomp == hw->LastPicNo) ||
                    (picYcomp == hw->LastTwoPicNo)) {
                //Discard picture
                if(picYcomp != 0) {
                    hw->LastTwoPicNo = hw->LastPicNo;
                    hw->LastPicNo = picYcomp;
                }
                crystalhd_dioq_add(hw->rx_freeq, r_pkt, false, r_pkt->pkt_tag);
                r_pkt = NULL;
                up(&hw->fetch_sem);
            } else {
                if(hw->adp->pdev->device == BC_PCI_DEVID_LINK) {
                    if((picYcomp - hw->LastPicNo) > 1) {
                        dev_info(dev, "MISSING %u PICTURES\n", (picYcomp - hw->LastPicNo));
                    }
                }
                hw->LastTwoPicNo = hw->LastPicNo;
                hw->LastPicNo = picYcomp;
                goto sem_rel_return;
            }
        } else if (rc == -EINTR) {
            *sig_pend = 1;
            return NULL;
        }
        spin_lock_irqsave(&ioq->lock, flags);
    }
    dev_info(dev, "FETCH TIMEOUT\n");
    spin_unlock_irqrestore(&ioq->lock, flags);
    return r_pkt;
sem_error:
    return NULL;
sem_rel_return:
    up(&hw->fetch_sem);
    return r_pkt;
}

#ifdef __APPLE__
static bool CustomSegmentFunction(IODMACommand *target, IODMACommand::Segment64 segment, void *sglMem, UInt32 segmentIndex)
{
    struct scatterlist *sg = (scatterlist*)sglMem;
    sg[segmentIndex].dma_address = (uint32_t)segment.fIOVMAddr;
    sg[segmentIndex].dma_length = (unsigned int)segment.fLength;
    //MPCLOG(MPCLOG_DBG,"CustomSegmentFunction: 0x%X/%d/%d\n",(unsigned int)segment.fIOVMAddr,(unsigned int)segment.fLength, (unsigned int)segmentIndex);
    return true;
}
#endif
/**
 * crystalhd_map_dio - Map user address for DMA
 * @adp:	Adapter instance
 * @ubuff:	User buffer to map.
 * @ubuff_sz:	User buffer size.
 * @uv_offset:	UV buffer offset.
 * @en_422mode: TRUE:422 FALSE:420 Capture mode.
 * @dir_tx:	TRUE for Tx (To device from host)
 * @dio_hnd:	Handle to mapped DIO request.
 *
 * Return:
 *	Status.
 *
 * This routine maps user address and lock pages for DMA.
 *
 */
BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
                            uint32_t ubuff_sz, uint32_t uv_offset,
                            bool en_422mode, bool dir_tx,
                            crystalhd_dio_req **dio_hnd)
{
    struct device *dev;
    crystalhd_dio_req	*dio;
    uint32_t start = 0, end = 0, count = 0;
#ifndef __APPLE__
    uint32_t spsz = 0;
    unsigned long uaddr = 0, uv_start = 0;
    int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0;
#else
    unsigned long uaddr = 0, uv_start = 0;
    int rw = 0;
    uint32_t nr_pages = 0;
#endif

    if (!adp || !ubuff || !ubuff_sz || !dio_hnd) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return BC_STS_INV_ARG;
    }

    dev = &adp->pdev->dev;

    /* Compute pages */
    uaddr = (unsigned long)ubuff;
    count = ubuff_sz;
    end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
    start = uaddr >> PAGE_SHIFT;
    nr_pages = end - start;

    if (!count || ((uaddr + count) < uaddr)) {
        dev_err(dev, "User addr overflow!!\n");
        return BC_STS_INV_ARG;
    }

    dio = crystalhd_alloc_dio(adp);
    if (!dio) {
        dev_err(dev, "dio pool empty..\n");
        return BC_STS_INSUFF_RES;
    }

    if (dir_tx) {
        rw = WRITE;
        dio->direction = DMA_TO_DEVICE;
    } else {
        rw = READ;
        dio->direction = DMA_FROM_DEVICE;
    }

    if (nr_pages > dio->max_pages) {
        dev_err(dev, "max_pages(%d) exceeded(%d)!!\n",
                dio->max_pages, nr_pages);
        crystalhd_unmap_dio(adp, dio);
        return BC_STS_INSUFF_RES;
    }

#ifndef __APPLE__
    if (uv_offset) {
        uv_start = (uaddr + uv_offset)  >> PAGE_SHIFT;
        dio->uinfo.uv_sg_ix = uv_start - start;
        dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & ~PAGE_MASK);
    }

    dio->fb_size = ubuff_sz & 0x03;
    if (dio->fb_size) {
        res = copy_from_user(dio->fb_va,
                             (void *)(uaddr + count - dio->fb_size),
                             dio->fb_size);
        if (res) {
            dev_err(dev, "failed %d to copy %u fill bytes from %p\n",
                    res, dio->fb_size,
                    (void *)(uaddr + count-dio->fb_size));
            crystalhd_unmap_dio(adp, dio);
            return BC_STS_INSUFF_RES;
        }
    }

    down_read(&current->mm->mmap_sem);
    res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ,
                         0, dio->pages, NULL);
    up_read(&current->mm->mmap_sem);

    /* Save for release..*/
    dio->sig = crystalhd_dio_locked;
    if (res < nr_pages) {
        dev_err(dev, "get pages failed: %d-%d\n", nr_pages, res);
        dio->page_cnt = res;
        crystalhd_unmap_dio(adp, dio);
        return BC_STS_ERROR;
    }

    dio->page_cnt = nr_pages;
    /* Get scatter/gather */
    crystalhd_init_sg(dio->sg, dio->page_cnt);
    crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK);
    if (nr_pages > 1) {
        dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
        dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
        count -= dio->sg[0].length;
        for (i = 1; i < nr_pages; i++) {
            if (count < 4) {
                spsz = count;
                skip_fb_sg = 1;
            } else {
                spsz = (count < PAGE_SIZE) ?
                       (count & ~0x03) : PAGE_SIZE;
            }
            crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0);
            count -= spsz;
        }
    } else {
        if (count < 4) {
            dio->sg[0].length = count;
            skip_fb_sg = 1;
        } else {
            dio->sg[0].length = count - dio->fb_size;
        }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
        dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
    }
    dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg,
                             dio->page_cnt, dio->direction);
    if (dio->sg_cnt <= 0) {
        dev_err(dev, "sg map %d-%d\n", dio->sg_cnt, dio->page_cnt);
        crystalhd_unmap_dio(adp, dio);
        return BC_STS_ERROR;
    }
    if (dio->sg_cnt && skip_fb_sg)
        dio->sg_cnt -= 1;
#else
    IODMACommand          *dma_command;
    IOMemoryDescriptor    *mem_desc;
    IOReturn              result;

    if (uv_offset) {
        uv_start = (uaddr + uv_offset)  >> PAGE_SHIFT;
        dio->uinfo.uv_sg_ix = uv_start - start;
        dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & PAGE_MASK);
    }

    dio->fb_size = ubuff_sz & 0x03;

    // map user memory into kernel memory
    mem_desc = IOMemoryDescriptor::withAddress(uaddr, count, dir_tx ? kIODirectionIn : kIODirectionOut,
               current_task() );
    if (mem_desc) {
        result = mem_desc->prepare();
        //IOLog("bc_link_map_dio:mem_desc=0x%X, prepare result 0x%X \n", (unsigned int)mem_desc, (int)result);
        dio->io_class = (void*)mem_desc;
    } else {
        dev_err(&adp->pdev->dev, "bc_link_map_dio:IOMemoryDescriptor::withAddress failed\n");
        crystalhd_free_dio(adp,dio);
        return BC_STS_INSUFF_RES;
    }

    // Save for release..
    dio->sig = crystalhd_dio_locked;

    // check transfer count, counts less than four are handled using only the fill byte page
    if (count > 3) {
        do {
            // 32 bit physical address generation using IODMACommand
            // any memory above 4Gb in the memory descriptor will be buffered
            // to memory below the 4G line, on machines without remapping HW support
            dma_command = IODMACommand::withSpecification(
                              // custom segment function
                              (IODMACommand::SegmentFunction)CustomSegmentFunction,
                              // numAddressBits
                              32,
                              // maxSegmentSize
                              PAGE_SIZE,
                              // mappingOptions - kMapped for DMA addresses
                              IODMACommand::kMapped,
                              // maxTransferSize - no restriction
                              0,
                              // alignment - no restriction
                              1 );
            if (!dma_command) {
                dev_err(&adp->pdev->dev, "IODMACommand::withSpecification failed\n");
                break;
            }

            //IOLog("bc_link_map_dio:dma_command=0x%X \n", (unsigned int)dma_command);
            // point IODMACommand at the memory descriptor, don't use auto prepare option
            result = dma_command->setMemoryDescriptor(mem_desc, false);
            if (kIOReturnSuccess != result) {
                dev_err(&adp->pdev->dev, "setMemoryDescriptor failed (0x%x)\n", result);
                break;
            }
            dio->io_class = (void*)dma_command;
            result = dma_command->prepare(0, count, true);
            //IOLog("bc_link_map_dio:dma_command->prepare() result 0x%X \n",(int)result);

            // generate scatter/gather list using custom segment function. This routine will make
            // sure that the first s/g entry will have the correct address and length for user
            // addresses that are not page aligned.
            UInt64 offset = 0;
            result = dma_command->gen32IOVMSegments(&offset, (IODMACommand::Segment32*)dio->sg, (UInt32*)&nr_pages);

            //IOLog("bc_link_map_dio:gen32IOVMSegments nr_pages %d, result %d\n", (int)nr_pages, (int)result);
            // if ending page is not end 4 byte aligned, decrease last page transfer length
            //  as those bytes will be handled using the fill byte page.
            if(dio->fb_size) {
                dio->sg[nr_pages-1].dma_length -= dio->fb_size;
                // check for last page == same size as dio->fb_size
                if (dio->sg[nr_pages-1].dma_length == 0) {
                    nr_pages--;
                }
            }
            // If need a fill byte page
            if(dio->fb_size) {
                UInt64          byte_count;
                UInt64          length;

                // manually copy those bytes into the fill byte page
                offset = count - dio->fb_size;
                length = dio->fb_size;
                byte_count = mem_desc->readBytes(offset, dio->fb_va, length);
            }
            dio->sg_cnt = nr_pages;
        } while(false);

        if (dio->sg_cnt <= 0) {
            dev_err(&adp->pdev->dev, "sg map %d \n",dio->sg_cnt);
            crystalhd_unmap_dio(adp,dio);
            return BC_STS_ERROR;
        }
    } else {
        // three bytes or less, handle this transfer using only the fill_byte page.
        UInt64          byte_count;
        UInt64          offset;
        UInt64          length;

        offset = 0;
        length = dio->fb_size;
        byte_count = mem_desc->readBytes(offset, dio->fb_va, length);
        dio->sg_cnt = 0;
        dio->sg[0].dma_length = count;
    }
#endif
    dio->sig = crystalhd_dio_sg_mapped;
    /* Fill in User info.. */
    dio->uinfo.xfr_len   = ubuff_sz;
#ifndef __APPLE__
    dio->uinfo.xfr_buff  = ubuff;
#else
    dio->uinfo.xfr_buff  = (uint8_t*)ubuff;
#endif
    dio->uinfo.uv_offset = uv_offset;
    dio->uinfo.b422mode  = en_422mode;
    dio->uinfo.dir_tx    = dir_tx;

    *dio_hnd = dio;

    return BC_STS_SUCCESS;
}

/**
 * crystalhd_unmap_sgl - Release mapped resources
 * @adp: Adapter instance
 * @dio: DIO request instance
 *
 * Return:
 *	Status.
 *
 * This routine is to unmap the user buffer pages.
 */
BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio)
{
#ifndef __APPLE__
    struct page *page = NULL;
    int j = 0;

    if (!adp || !dio) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return BC_STS_INV_ARG;
    }

    if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) {
        for (j = 0; j < dio->page_cnt; j++) {
            page = dio->pages[j];
            if (page) {
                if (!PageReserved(page) &&
                        (dio->direction == DMA_FROM_DEVICE))
                    SetPageDirty(page);
                page_cache_release(page);
            }
        }
    }
    if (dio->sig == crystalhd_dio_sg_mapped)
        pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction);
#else
    IODMACommand		*dma_command;
    IOMemoryDescriptor	*mem_desc;

    if(!adp || !dio ) {
        dev_err(chddev(), "bc_link_unmap_dio:Invalid arg \n");
        return BC_STS_INV_ARG;
    }
    dma_command = OSDynamicCast(IODMACommand, (OSMetaClassBase*)dio->io_class);
    //MPCLOG(MPCLOG_DBG, "bc_link_unmap_dio:dma_command=0x%X \n", (unsigned int)dma_command);
    if (dma_command) {
        // fetch current IOMemoryDescriptor before dma_command->clearMemoryDescriptor;
        mem_desc = (IOMemoryDescriptor*)dma_command->getMemoryDescriptor();
        dma_command->complete();
        dma_command->clearMemoryDescriptor();
        SAFE_RELEASE(dma_command);
        mem_desc->complete();
        SAFE_RELEASE(mem_desc);
        dio->io_class = NULL;
    }
#endif

    crystalhd_free_dio(adp, dio);

    return BC_STS_SUCCESS;
}

/**
 * crystalhd_create_dio_pool - Allocate mem pool for DIO management.
 * @adp: Adapter instance
 * @max_pages: Max pages for size calculation.
 *
 * Return:
 *	system error.
 *
 * This routine creates a memory pool to hold dio context for
 * for HW Direct IO operation.
 */
int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages)
{
    struct device *dev;
    uint32_t asz = 0, i = 0;
    uint8_t	*temp;
    crystalhd_dio_req *dio;

    if (!adp || !max_pages) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return -EINVAL;
    }

    dev = &adp->pdev->dev;

    /* Get dma memory for fill byte handling..*/
    adp->fill_byte_pool = pci_pool_create("crystalhd_fbyte",
                                          adp->pdev, 8, 8, 0);
    if (!adp->fill_byte_pool) {
        dev_err(dev, "failed to create fill byte pool\n");
        return -ENOMEM;
    }

#ifndef __APPLE__
    /* Get the max size from user based on 420/422 modes */
    asz =  (sizeof(*dio->pages) * max_pages) +
           (sizeof(*dio->sg) * max_pages) + sizeof(*dio);
#else
    asz = (sizeof(*dio->sg) * max_pages) + sizeof(*dio);
#endif

    dev_dbg(dev, "Initializing Dio pool %d %d %x %p\n",
            BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool);

    for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) {
        temp = (uint8_t *)kzalloc(asz, GFP_KERNEL);
        if ((temp) == NULL) {
            dev_err(dev, "Failed to alloc %d mem\n", asz);
            return -ENOMEM;
        }

        dio = (crystalhd_dio_req *)temp;
        temp += sizeof(*dio);
#ifndef __APPLE__
        dio->pages = (struct page **)temp;
        temp += (sizeof(*dio->pages) * max_pages);
#else
        temp += sizeof(*dio);
#endif
        dio->sg = (struct scatterlist *)temp;
        dio->max_pages = max_pages;
        dio->fb_va = pci_pool_alloc(adp->fill_byte_pool, GFP_KERNEL,
                                    &dio->fb_pa);
        if (!dio->fb_va) {
            dev_err(dev, "fill byte alloc failed.\n");
            return -ENOMEM;
        }

        crystalhd_free_dio(adp, dio);
    }

    return 0;
}

/**
 * crystalhd_destroy_dio_pool - Release DIO mem pool.
 * @adp: Adapter instance
 *
 * Return:
 *	none.
 *
 * This routine releases dio memory pool during close.
 */
void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp)
{
    crystalhd_dio_req *dio;
    int count = 0;

    if (!adp) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return;
    }

    do {
        dio = crystalhd_alloc_dio(adp);
        if (dio) {
            if (dio->fb_va)
                pci_pool_free(adp->fill_byte_pool,
                              dio->fb_va, dio->fb_pa);
            count++;
            kfree(dio);
        }
    } while (dio);

    if (adp->fill_byte_pool) {
        pci_pool_destroy(adp->fill_byte_pool);
        adp->fill_byte_pool = NULL;
    }

    dev_dbg(&adp->pdev->dev, "Released dio pool %d\n", count);
}
Ejemplo n.º 15
0
/**
 * qla24xx_start_scsi() - Send a SCSI command to the ISP
 * @sp: command to send to the ISP
 *
 * Returns non-zero if a failure occured, else zero.
 */
int
qla24xx_start_scsi(srb_t *sp)
{
    int		ret;
    unsigned long   flags;
    scsi_qla_host_t	*ha;
    struct scsi_cmnd *cmd;
    uint32_t	*clr_ptr;
    uint32_t        index;
    uint32_t	handle;
    struct cmd_type_7 *cmd_pkt;
    struct scatterlist *sg;
    uint16_t	cnt;
    uint16_t	req_cnt;
    uint16_t	tot_dsds;
    struct device_reg_24xx __iomem *reg;

    /* Setup device pointers. */
    ret = 0;
    ha = sp->ha;
    reg = &ha->iobase->isp24;
    cmd = sp->cmd;
    /* So we know we haven't pci_map'ed anything yet */
    tot_dsds = 0;

    /* Send marker if required */
    if (ha->marker_needed != 0) {
        if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
            return QLA_FUNCTION_FAILED;
        }
        ha->marker_needed = 0;
    }

    /* Acquire ring specific lock */
    spin_lock_irqsave(&ha->hardware_lock, flags);

    /* Check for room in outstanding command list. */
    handle = ha->current_outstanding_cmd;
    for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
        handle++;
        if (handle == MAX_OUTSTANDING_COMMANDS)
            handle = 1;
        if (ha->outstanding_cmds[handle] == 0)
            break;
    }
    if (index == MAX_OUTSTANDING_COMMANDS)
        goto queuing_error;

    /* Map the sg table so we have an accurate count of sg entries needed */
    if (cmd->use_sg) {
        sg = (struct scatterlist *) cmd->request_buffer;
        tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
                              cmd->sc_data_direction);
        if (tot_dsds == 0)
            goto queuing_error;
    } else if (cmd->request_bufflen) {
        dma_addr_t      req_dma;

        req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
                                 cmd->request_bufflen, cmd->sc_data_direction);
        if (dma_mapping_error(req_dma))
            goto queuing_error;

        sp->dma_handle = req_dma;
        tot_dsds = 1;
    }

    req_cnt = qla24xx_calc_iocbs(tot_dsds);
    if (ha->req_q_cnt < (req_cnt + 2)) {
        cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
        if (ha->req_ring_index < cnt)
            ha->req_q_cnt = cnt - ha->req_ring_index;
        else
            ha->req_q_cnt = ha->request_q_length -
                            (ha->req_ring_index - cnt);
    }
    if (ha->req_q_cnt < (req_cnt + 2))
        goto queuing_error;

    /* Build command packet. */
    ha->current_outstanding_cmd = handle;
    ha->outstanding_cmds[handle] = sp;
    sp->ha = ha;
    sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
    ha->req_q_cnt -= req_cnt;

    cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
    cmd_pkt->handle = handle;

    /* Zero out remaining portion of packet. */
    /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
    clr_ptr = (uint32_t *)cmd_pkt + 2;
    memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
    cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);

    /* Set NPORT-ID and LUN number*/
    cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
    cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
    cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
    cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;

    int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
    host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));

    /* Load SCSI command packet. */
    memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
    host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));

    cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);

    /* Build IOCB segments */
    qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);

    /* Set total data segment count. */
    cmd_pkt->entry_count = (uint8_t)req_cnt;
    wmb();

    /* Adjust ring index. */
    ha->req_ring_index++;
    if (ha->req_ring_index == ha->request_q_length) {
        ha->req_ring_index = 0;
        ha->request_ring_ptr = ha->request_ring;
    } else
        ha->request_ring_ptr++;

    sp->flags |= SRB_DMA_VALID;
    sp->state = SRB_ACTIVE_STATE;

    /* Set chip new ring index. */
    WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
    RD_REG_DWORD_RELAXED(&reg->req_q_in);		/* PCI Posting. */

    /* Manage unprocessed RIO/ZIO commands in response queue. */
    if (ha->flags.process_response_queue &&
            ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
        qla24xx_process_response_queue(ha);

    spin_unlock_irqrestore(&ha->hardware_lock, flags);
    return QLA_SUCCESS;

queuing_error:
    if (cmd->use_sg && tot_dsds) {
        sg = (struct scatterlist *) cmd->request_buffer;
        pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
                     cmd->sc_data_direction);
    } else if (tot_dsds) {
        pci_unmap_single(ha->pdev, sp->dma_handle,
                         cmd->request_bufflen, cmd->sc_data_direction);
    }
    spin_unlock_irqrestore(&ha->hardware_lock, flags);

    return QLA_FUNCTION_FAILED;
}
Ejemplo n.º 16
0
/*
 * @to_user:	1 - the transfer is to/from a user-space buffer
 *		0 - the transfer is to/from a kernel buffer
 */
static int __vme_do_dma(struct vme_dma *desc, int to_user)
{
	int rc = 0;
	struct dma_channel *channel;

	/* First check the transfer length */
	if (!desc->length) {
		printk(KERN_ERR PFX "%s: Wrong length %d\n",
		       __func__, desc->length);
		return -EINVAL;
	}

	/* Check the transfer direction validity */
	if ((desc->dir != VME_DMA_FROM_DEVICE) &&
	    (desc->dir != VME_DMA_TO_DEVICE)) {
		printk(KERN_ERR PFX "%s: Wrong direction %d\n",
		       __func__, desc->dir);
		return -EINVAL;
	}

	/* Check we're within a 32-bit address space */
	if (desc->src.addru || desc->dst.addru) {
		printk(KERN_ERR PFX "%s: Addresses are not 32-bit\n", __func__);
		return -EINVAL;
	}

	/* Acquire an available channel */
	channel = vme_dma_channel_acquire();
	if (IS_ERR(channel))
		return PTR_ERR(channel);

	memcpy(&channel->desc, desc, sizeof(struct vme_dma));

	/* Setup the DMA transfer */
	rc = vme_dma_setup(channel, to_user);

	if (rc)
		goto out_release_channel;

	/* Start the DMA transfer */
	vme_dma_start(channel);

	/* Wait for DMA completion */
	rc = wait_event_interruptible(channel->wait,
				 !tsi148_dma_busy(channel));

	/* React to user-space signals by aborting the ongoing DMA transfer */
	if (rc) {
		tsi148_dma_abort(channel);
		/* leave some time for the bridge to clear the DMA channel */
		udelay(10);
	}

	desc->status = tsi148_dma_get_status(channel);

	/* Now do some cleanup and we're done */
	tsi148_dma_release(channel);

	pci_unmap_sg(vme_bridge->pdev, channel->sgl, channel->sg_mapped,
		     desc->dir);

	sgl_unmap_user_pages(channel->sgl, channel->sg_pages, 0, to_user);

	kfree(channel->sgl);

out_release_channel:
	vme_dma_channel_release(channel);

	/* Signal we're done in case we're in module exit */
	wake_up(&channel_wait[channel->num]);

	return rc;
}
Ejemplo n.º 17
0
/****************************************************************
 *	Name:	Irq_Handler	:LOCAL
 *
 *	Description:	Interrupt handler.
 *
 *	Parameters:		irq		- Hardware IRQ number.
 *					dev_id	-
 *					regs	-
 *
 *	Returns:		TRUE if drive is not ready in time.
 *
 ****************************************************************/
static void Irq_Handler (int irq, void *dev_id, struct pt_regs *regs)
	{
	struct Scsi_Host   *shost = NULL;	// Pointer to host data block
	PADAPTER2000		padapter;		// Pointer to adapter control structure
	PDEV2000			pdev;
	Scsi_Cmnd		   *SCpnt;
	UCHAR				tag = 0;
	UCHAR				tag0;
	ULONG				error;
	int					pun;
	int					bus;
	int					z;
    unsigned long		flags;

    /*
     * Disable interrupts, if they aren't already disabled and acquire
     * the I/O spinlock.
     */
    spin_lock_irqsave (&io_request_lock, flags);

	DEB(printk ("\npci2000 received interrupt "));
	for ( z = 0; z < NumAdapters;  z++ )										// scan for interrupt to process
		{
		if ( PsiHost[z]->irq == (UCHAR)(irq & 0xFF) )
			{
			tag = inb_p (HOSTDATA(PsiHost[z])->tag);
			if (  tag )
				{
				shost = PsiHost[z];
				break;
				}
			}
		}

	if ( !shost )
		{
		DEB (printk ("\npci2000: not my interrupt"));
		goto irq_return;
		}

	padapter = HOSTDATA(shost);

	tag0 = tag & 0x7F;															// mask off the error bit
	for ( bus = 0;  bus < MAX_BUS;  bus++ )										// scan the busses
    	{
		for ( pun = 0;  pun < MAX_UNITS;  pun++ )								// scan the targets
    		{
			pdev = &padapter->dev[bus][pun];
			if ( !pdev->tag )
    			continue;
			if ( pdev->tag == tag0 )											// is this it?
				{
				pdev->tag = 0;
				SCpnt = pdev->SCpnt;
				goto unmapProceed;
    			}
			}
    	}

	outb_p (0xFF, padapter->tag);												// clear the op interrupt
	outb_p (CMD_DONE, padapter->cmd);											// complete the op
	goto irq_return;;															// done, but, with what?

unmapProceed:;
	if ( !bus )
		{
		switch ( SCpnt->cmnd[0] )
			{
			case SCSIOP_TEST_UNIT_READY:
				pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, sizeof (SCpnt->sense_buffer), PCI_DMA_FROMDEVICE);
				goto irqProceed;
			case SCSIOP_READ_CAPACITY:
				pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, 8, PCI_DMA_FROMDEVICE);
				goto irqProceed;
			case SCSIOP_VERIFY:
			case SCSIOP_START_STOP_UNIT:
			case SCSIOP_MEDIUM_REMOVAL:
				goto irqProceed;
			}
		}
	if ( SCpnt->SCp.have_data_in )
		pci_unmap_single (padapter->pdev, SCpnt->SCp.have_data_in, SCpnt->request_bufflen, scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
	else 
		{
		if ( SCpnt->use_sg )
			pci_unmap_sg (padapter->pdev, (struct scatterlist *)SCpnt->request_buffer, SCpnt->use_sg, scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
		}

irqProceed:;
	if ( tag & ERR08_TAGGED )												// is there an error here?
		{
		if ( WaitReady (padapter) )
			{
			OpDone (SCpnt, DID_TIME_OUT << 16);
			goto irq_return;;
			}

		outb_p (tag0, padapter->mb0);										// get real error code
		outb_p (CMD_ERROR, padapter->cmd);
		if ( WaitReady (padapter) )											// wait for controller to suck up the op
			{
			OpDone (SCpnt, DID_TIME_OUT << 16);
			goto irq_return;;
			}

		error = inl (padapter->mb0);										// get error data
		outb_p (0xFF, padapter->tag);										// clear the op interrupt
		outb_p (CMD_DONE, padapter->cmd);									// complete the op

		DEB (printk ("status: %lX ", error));
		if ( error == 0x00020002 )											// is this error a check condition?
			{
			if ( bus )														// are we doint SCSI commands?
				{
				OpDone (SCpnt, (DID_OK << 16) | 2);
				goto irq_return;;
				}
			if ( *SCpnt->cmnd == SCSIOP_TEST_UNIT_READY )
				OpDone (SCpnt, (DRIVER_SENSE << 24) | (DID_OK << 16) | 2);	// test caller we have sense data too
			else
				OpDone (SCpnt, DID_ERROR << 16);
			goto irq_return;;
			}
		OpDone (SCpnt, DID_ERROR << 16);
		goto irq_return;;
		}

	outb_p (0xFF, padapter->tag);											// clear the op interrupt
	outb_p (CMD_DONE, padapter->cmd);										// complete the op
	OpDone (SCpnt, DID_OK << 16);

irq_return:;
    /*
     * Release the I/O spinlock and restore the original flags
     * which will enable interrupts if and only if they were
     * enabled on entry.
     */
    spin_unlock_irqrestore (&io_request_lock, flags);
	}
Ejemplo n.º 18
0
/* Creates the scatter gather list, DMA Table */
static unsigned int
sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
{
	ide_hwif_t *hwif = HWIF(drive);
	unsigned int *table = hwif->dmatable_cpu;
	unsigned int count = 0, i = 1;
	struct scatterlist *sg;

	hwif->sg_nents = i = ide_build_sglist(drive, rq);

	if (!i)
		return 0;	/* sglist of length Zero */

	sg = hwif->sg_table;
	while (i && sg_dma_len(sg)) {
		dma_addr_t cur_addr;
		int cur_len;
		cur_addr = sg_dma_address(sg);
		cur_len = sg_dma_len(sg);

		while (cur_len) {
			if (count++ >= IOC4_PRD_ENTRIES) {
				printk(KERN_WARNING
				       "%s: DMA table too small\n",
				       drive->name);
				goto use_pio_instead;
			} else {
				u32 bcount =
				    0x10000 - (cur_addr & 0xffff);

				if (bcount > cur_len)
					bcount = cur_len;

				/* put the addr, length in
				 * the IOC4 dma-table format */
				*table = 0x0;
				table++;
				*table = cpu_to_be32(cur_addr);
				table++;
				*table = 0x0;
				table++;

				*table = cpu_to_be32(bcount);
				table++;

				cur_addr += bcount;
				cur_len -= bcount;
			}
		}

		sg++;
		i--;
	}

	if (count) {
		table--;
		*table |= cpu_to_be32(0x80000000);
		return count;
	}

use_pio_instead:
	pci_unmap_sg(hwif->pci_dev, hwif->sg_table, hwif->sg_nents,
		     hwif->sg_dma_direction);

	return 0;		/* revert to PIO for this request */
}
Ejemplo n.º 19
0
static inline int asd_map_scatterlist(struct sas_task *task,
				      struct sg_el *sg_arr,
				      gfp_t gfp_flags)
{
	struct asd_ascb *ascb = task->lldd_task;
	struct asd_ha_struct *asd_ha = ascb->ha;
	struct scatterlist *sc;
	int num_sg, res;

	if (task->data_dir == PCI_DMA_NONE)
		return 0;

	if (task->num_scatter == 0) {
		void *p = task->scatter;
		dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
						task->total_xfer_len,
						task->data_dir);
		sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
		sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
		sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
		return 0;
	}

	num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
			    task->data_dir);
	if (num_sg == 0)
		return -ENOMEM;

	if (num_sg > 3) {
		int i;

		ascb->sg_arr = asd_alloc_coherent(asd_ha,
						  num_sg*sizeof(struct sg_el),
						  gfp_flags);
		if (!ascb->sg_arr) {
			res = -ENOMEM;
			goto err_unmap;
		}
		for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
			struct sg_el *sg =
				&((struct sg_el *)ascb->sg_arr->vaddr)[i];
			sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
			sg->size = cpu_to_le32((u32)sg_dma_len(sc));
			if (i == num_sg-1)
				sg->flags |= ASD_SG_EL_LIST_EOL;
		}

		for (sc = task->scatter, i = 0; i < 2; i++, sc++) {
			sg_arr[i].bus_addr =
				cpu_to_le64((u64)sg_dma_address(sc));
			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
		}
		sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
		sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;

		memset(&sg_arr[2], 0, sizeof(*sg_arr));
		sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
	} else {
		int i;
		for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
			sg_arr[i].bus_addr =
				cpu_to_le64((u64)sg_dma_address(sc));
			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
		}
		sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
	}

	return 0;
err_unmap:
	pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
		     task->data_dir);
	return res;
}