Beispiel #1
0
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags)
{
    struct ehci_qh		*qh;
    //dma_addr_t		dma;

    void*		real;

    real = kmalloc( sizeof( struct ehci_qh ) + 32, MEMF_KERNEL | MEMF_CLEAR );
    qh = ( struct ehci_qh* )( ( (uint32)real + 32 ) & ~31 );

#if 0
    qh = (struct ehci_qh *)
         pci_pool_alloc (ehci->qh_pool, flags, &dma);
#endif
    if (!real)
        return real;

    memset (qh, 0, sizeof *qh);

    atomic_set(&qh->refcount, 1);
    qh->qh_real = real;
    // INIT_LIST_HEAD (&qh->qh_list);
    INIT_LIST_HEAD (&qh->qtd_list);

    /* dummy td enables safe urb queuing */
    qh->dummy = ehci_qtd_alloc (ehci, flags);
    if (qh->dummy == 0) {
        ehci_dbg (ehci, "no dummy td\n");
        kfree( qh->qh_real );
//		pci_pool_free (ehci->qh_pool, qh, qh->qh_dma);
        qh = 0;
    }
    return qh;
}
Beispiel #2
0
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags)
{
	struct ehci_qh		*qh;
	dma_addr_t		dma;

	qh = (struct ehci_qh *)
		pci_pool_alloc (ehci->qh_pool, flags, &dma);
	if (!qh)
		return qh;

	memset (qh, 0, sizeof *qh);
	atomic_set (&qh->refcount, 1);
	qh->qh_dma = dma;
	// INIT_LIST_HEAD (&qh->qh_list);
	INIT_LIST_HEAD (&qh->qtd_list);

	/* dummy td enables safe urb queuing */
	qh->dummy = ehci_qtd_alloc (ehci, flags);
	if (qh->dummy == 0) {
		ehci_dbg (ehci, "no dummy td\n");
		pci_pool_free (ehci->qh_pool, qh, qh->qh_dma);
		qh = 0;
	}
	return qh;
}
Beispiel #3
0
/**
 * crystalhd_create_dio_pool - Allocate mem pool for DIO management.
 * @adp: Adapter instance
 * @max_pages: Max pages for size calculation.
 *
 * Return:
 *	system error.
 *
 * This routine creates a memory pool to hold dio context for
 * for HW Direct IO operation.
 */
int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages)
{
	struct device *dev;
	uint32_t asz = 0, i = 0;
	uint8_t	*temp;
	struct crystalhd_dio_req *dio;

	if (!adp || !max_pages) {
		printk(KERN_ERR "%s: Invalid arg\n", __func__);
		return -EINVAL;
	}

	dev = &adp->pdev->dev;

	/* Get dma memory for fill byte handling..*/
	adp->fill_byte_pool = pci_pool_create("crystalhd_fbyte",
					      adp->pdev, 8, 8, 0);
	if (!adp->fill_byte_pool) {
		dev_err(dev, "failed to create fill byte pool\n");
		return -ENOMEM;
	}

	/* Get the max size from user based on 420/422 modes */
	asz =  (sizeof(*dio->pages) * max_pages) +
	       (sizeof(*dio->sg) * max_pages) + sizeof(*dio);

	dev_dbg(dev, "Initializing Dio pool %d %d %x %p\n",
		BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool);

	for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) {
		temp = kzalloc(asz, GFP_KERNEL);
		if ((temp) == NULL) {
			dev_err(dev, "Failed to alloc %d mem\n", asz);
			return -ENOMEM;
		}

		dio = (struct crystalhd_dio_req *)temp;
		temp += sizeof(*dio);
		dio->pages = (struct page **)temp;
		temp += (sizeof(*dio->pages) * max_pages);
		dio->sg = (struct scatterlist *)temp;
		dio->max_pages = max_pages;
		dio->fb_va = pci_pool_alloc(adp->fill_byte_pool, GFP_KERNEL,
					    &dio->fb_pa);
		if (!dio->fb_va) {
			dev_err(dev, "fill byte alloc failed.\n");
			return -ENOMEM;
		}

		crystalhd_free_dio(adp, dio);
	}

	return 0;
}
Beispiel #4
0
static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags)
{
	struct ehci_qtd		*qtd;
	dma_addr_t		dma;

	qtd = pci_pool_alloc (ehci->qtd_pool, flags, &dma);
	if (qtd != 0) {
		ehci_qtd_init (qtd, dma);
	}
	return qtd;
}
Beispiel #5
0
/* EDs ... */
static struct ed *
ed_alloc (struct ohci_hcd *hc, int mem_flags)
{
	dma_addr_t	dma;
	struct ed	*ed;

	ed = pci_pool_alloc (hc->ed_cache, sizeof(*ed)/*mem_flags*/, &dma);
	if (ed) {
		memset (ed, 0, sizeof (*ed));
		INIT_LIST_HEAD (&ed->td_list);
		ed->dma = dma;
	}
	return ed;
}
Beispiel #6
0
static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags)
{
    struct ehci_qtd		*qtd;
    void*		real;

    real = kmalloc( sizeof( struct ehci_qtd ) + 32, MEMF_KERNEL | MEMF_CLEAR );
    qtd = ( struct ehci_qtd* )( ((uint32)real + 32 ) & ~31 );
#if 0
    qtd = pci_pool_alloc (ehci->qtd_pool, flags, real);
#endif
    if (real != 0) {
        ehci_qtd_init (qtd, real);
    }
    return qtd;
}
Beispiel #7
0
/**
 * hinic_alloc_cmdq_buf - alloc buffer for sending command
 * @cmdqs: the cmdqs
 * @cmdq_buf: the buffer returned in this struct
 *
 * Return 0 - Success, negative - Failure
 **/
int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
			 struct hinic_cmdq_buf *cmdq_buf)
{
	struct hinic_hwif *hwif = cmdqs->hwif;
	struct pci_dev *pdev = hwif->pdev;

	cmdq_buf->buf = pci_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
				       &cmdq_buf->dma_addr);
	if (!cmdq_buf->buf) {
		dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
		return -ENOMEM;
	}

	return 0;
}
Beispiel #8
0
/* TDs ... */
static struct td *
td_alloc (struct ohci_hcd *hc, int mem_flags)
{
	dma_addr_t	dma;
	struct td	*td;

	td = pci_pool_alloc (hc->td_cache, sizeof(*td)/*mem_flags*/, &dma);
	if (td) {
		/* in case hc fetches it, make it look dead */
		memset (td, 0, sizeof *td);
		td->hwNextTD = cpu_to_le32 (dma);
		td->td_dma = dma;
		/* hashed in td_fill */
	}
	return td;
}
Beispiel #9
0
/* Allocates request packet, called by gadget driver */
static struct usb_request *
udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
{
	struct udc_request	*req;
	struct udc_data_dma	*dma_desc;
	struct udc_ep	*ep;

	if (!usbep)
		return NULL;

	ep = container_of(usbep, struct udc_ep, ep);

	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
	req = kzalloc(sizeof(struct udc_request), gfp);
	if (!req)
		return NULL;

	req->req.dma = DMA_DONT_USE;
	INIT_LIST_HEAD(&req->queue);

	if (ep->dma) {
		/* ep0 in requests are allocated from data pool here */
		dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
						&req->td_phys);
		if (!dma_desc) {
			kfree(req);
			return NULL;
		}

		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
				"td_phys = %lx\n",
				req, dma_desc,
				(unsigned long)req->td_phys);
		/* prevent from using desc. - set HOST BUSY */
		dma_desc->status = AMD_ADDBITS(dma_desc->status,
						UDC_DMA_STP_STS_BS_HOST_BUSY,
						UDC_DMA_STP_STS_BS);
		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
		req->td_data = dma_desc;
		req->td_data_last = NULL;
		req->chain_len = 1;
	}

	return &req->req;
}
Beispiel #10
0
int mthca_create_ah(struct mthca_dev *dev,
		    struct mthca_pd *pd,
		    struct ib_ah_attr *ah_attr,
		    struct mthca_ah *ah)
{
	u32 index = -1;
	struct mthca_av *av = NULL;

	ah->on_hca = 0;

	if (!atomic_read(&pd->sqp_count) &&
	    !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
		index = mthca_alloc(&dev->av_table.alloc);

		/* fall back to allocate in host memory */
		if (index == -1)
			goto host_alloc;

		av = kmalloc(sizeof *av, GFP_KERNEL);
		if (!av)
			goto host_alloc;

		ah->on_hca = 1;
		ah->avdma  = dev->av_table.ddr_av_base +
			index * MTHCA_AV_SIZE;
	}

 host_alloc:
	if (!ah->on_hca) {
		ah->av = pci_pool_alloc(dev->av_table.pool,
					SLAB_KERNEL, &ah->avdma);
		if (!ah->av)
			return -ENOMEM;

		av = ah->av;
	}

	ah->key = pd->ntmr.ibmr.lkey;

	memset(av, 0, MTHCA_AV_SIZE);

	av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
	av->g_slid  = ah_attr->src_path_bits;
	av->dlid    = cpu_to_be16(ah_attr->dlid);
	av->msg_sr  = (3 << 4) | /* 2K message */
		ah_attr->static_rate;
	av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
	if (ah_attr->ah_flags & IB_AH_GRH) {
		av->g_slid |= 0x80;
		av->gid_index = (ah_attr->port_num - 1) * dev->limits.gid_table_len +
			ah_attr->grh.sgid_index;
		av->hop_limit = ah_attr->grh.hop_limit;
		av->sl_tclass_flowlabel |=
			cpu_to_be32((ah_attr->grh.traffic_class << 20) |
				    ah_attr->grh.flow_label);
		memcpy(av->dgid, ah_attr->grh.dgid.raw, 16);
	} else {
		/* Arbel workaround -- low byte of GID must be 2 */
		av->dgid[3] = cpu_to_be32(2);
	}

	if (0) {
		int j;

		mthca_dbg(dev, "Created UDAV at %p/%08lx:\n",
			  av, (unsigned long) ah->avdma);
		for (j = 0; j < 8; ++j)
			printk(KERN_DEBUG "  [%2x] %08x\n",
			       j * 4, be32_to_cpu(((u32 *) av)[j]));
	}

	if (ah->on_hca) {
		memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE,
			    av, MTHCA_AV_SIZE);
		kfree(av);
	}

	return 0;
}
Beispiel #11
0
/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl || !sgc)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid >= IXGBE_FCOE_DDP_MAX) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	if (!fcoe->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		return 0;
	}

	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
			xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);

	/* setup dma from scsi command sgl */
	dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		return 0;
	}

	/* alloc the udl from our ddp pool */
	ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
				      "not enough descriptors\n",
					xid, i, j, dmacount, (u64)addr);
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
				goto out_noddp_free;
			/*
			 * all but the last buffer
			 * ((i == (dmacount - 1)) && (thislen == len))
			 * must end at bufflen
			 */
			if (((i != (dmacount - 1)) || (thislen != len))
			    && ((thislen + thisoff) != bufflen))
				goto out_noddp_free;

			ddp->udl[j] = (u64)(addr - thisoff);
			/* only the first buffer may have none-zero offset */
			if (j == 0)
				firstoff = thisoff;
			len -= thislen;
			addr += thislen;
			j++;
		}
	}
/**
 * crystalhd_dioq_fetch_wait - Fetch element from Head.
 * @ioq: DIO queue instance
 * @to_secs: Wait timeout in seconds..
 *
 * Return:
 *	element from the head..
 *
 * Return element from head if Q is not empty. Wait for new element
 * if Q is empty for Timeout seconds.
 */
void *crystalhd_dioq_fetch_wait(struct crystalhd_hw *hw, uint32_t to_secs, uint32_t *sig_pend)
{
    struct device *dev = chddev();
    unsigned long flags = 0;
    int rc = 0;

    crystalhd_rx_dma_pkt *r_pkt = NULL;
    crystalhd_dioq_t *ioq = hw->rx_rdyq;
    uint32_t picYcomp = 0;

    unsigned long fetchTimeout = jiffies + msecs_to_jiffies(to_secs * 1000);

    if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !to_secs || !sig_pend) {
        dev_err(dev, "%s: Invalid arg\n", __func__);
        return r_pkt;
    }

    spin_lock_irqsave(&ioq->lock, flags);
#ifndef __APPLE__
    while (!time_after_eq(jiffies, fetchTimeout)) {
#else
    while (fetchTimeout >= jiffies) {
#endif
        if(ioq->count == 0) {
            spin_unlock_irqrestore(&ioq->lock, flags);
            crystalhd_wait_on_event(&ioq->event, (ioq->count > 0),
                                    250, rc, false);
        }
        else
            spin_unlock_irqrestore(&ioq->lock, flags);
        if (rc == 0) {
            // Found a packet. Check if it is a repeated picture or not
            // Drop the picture if it is a repeated picture
            // Lock against checks from get status calls
            if(down_interruptible(&hw->fetch_sem))
                goto sem_error;
#ifndef __APPLE__
            r_pkt = crystalhd_dioq_fetch(ioq);
#else
            r_pkt = (crystalhd_rx_dma_pkt*)crystalhd_dioq_fetch(ioq);
#endif
            // If format change packet, then return with out checking anything
            if (r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE))
                goto sem_rel_return;
            if (hw->adp->pdev->device == BC_PCI_DEVID_LINK) {
                picYcomp = link_GetRptDropParam(hw, hw->PICHeight, hw->PICWidth, (void *)r_pkt);
            }
            else {
                // For Flea, we don't have the width and height handy since they
                // come in the PIB in the picture, so this function will also
                // populate the width and height
                picYcomp = flea_GetRptDropParam(hw, (void *)r_pkt);
                // For flea it is the above function that indicated format change
                if(r_pkt->flags & (COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE))
                    goto sem_rel_return;
            }
            if(!picYcomp || (picYcomp == hw->LastPicNo) ||
                    (picYcomp == hw->LastTwoPicNo)) {
                //Discard picture
                if(picYcomp != 0) {
                    hw->LastTwoPicNo = hw->LastPicNo;
                    hw->LastPicNo = picYcomp;
                }
                crystalhd_dioq_add(hw->rx_freeq, r_pkt, false, r_pkt->pkt_tag);
                r_pkt = NULL;
                up(&hw->fetch_sem);
            } else {
                if(hw->adp->pdev->device == BC_PCI_DEVID_LINK) {
                    if((picYcomp - hw->LastPicNo) > 1) {
                        dev_info(dev, "MISSING %u PICTURES\n", (picYcomp - hw->LastPicNo));
                    }
                }
                hw->LastTwoPicNo = hw->LastPicNo;
                hw->LastPicNo = picYcomp;
                goto sem_rel_return;
            }
        } else if (rc == -EINTR) {
            *sig_pend = 1;
            return NULL;
        }
        spin_lock_irqsave(&ioq->lock, flags);
    }
    dev_info(dev, "FETCH TIMEOUT\n");
    spin_unlock_irqrestore(&ioq->lock, flags);
    return r_pkt;
sem_error:
    return NULL;
sem_rel_return:
    up(&hw->fetch_sem);
    return r_pkt;
}

#ifdef __APPLE__
static bool CustomSegmentFunction(IODMACommand *target, IODMACommand::Segment64 segment, void *sglMem, UInt32 segmentIndex)
{
    struct scatterlist *sg = (scatterlist*)sglMem;
    sg[segmentIndex].dma_address = (uint32_t)segment.fIOVMAddr;
    sg[segmentIndex].dma_length = (unsigned int)segment.fLength;
    //MPCLOG(MPCLOG_DBG,"CustomSegmentFunction: 0x%X/%d/%d\n",(unsigned int)segment.fIOVMAddr,(unsigned int)segment.fLength, (unsigned int)segmentIndex);
    return true;
}
#endif
/**
 * crystalhd_map_dio - Map user address for DMA
 * @adp:	Adapter instance
 * @ubuff:	User buffer to map.
 * @ubuff_sz:	User buffer size.
 * @uv_offset:	UV buffer offset.
 * @en_422mode: TRUE:422 FALSE:420 Capture mode.
 * @dir_tx:	TRUE for Tx (To device from host)
 * @dio_hnd:	Handle to mapped DIO request.
 *
 * Return:
 *	Status.
 *
 * This routine maps user address and lock pages for DMA.
 *
 */
BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
                            uint32_t ubuff_sz, uint32_t uv_offset,
                            bool en_422mode, bool dir_tx,
                            crystalhd_dio_req **dio_hnd)
{
    struct device *dev;
    crystalhd_dio_req	*dio;
    uint32_t start = 0, end = 0, count = 0;
#ifndef __APPLE__
    uint32_t spsz = 0;
    unsigned long uaddr = 0, uv_start = 0;
    int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0;
#else
    unsigned long uaddr = 0, uv_start = 0;
    int rw = 0;
    uint32_t nr_pages = 0;
#endif

    if (!adp || !ubuff || !ubuff_sz || !dio_hnd) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return BC_STS_INV_ARG;
    }

    dev = &adp->pdev->dev;

    /* Compute pages */
    uaddr = (unsigned long)ubuff;
    count = ubuff_sz;
    end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
    start = uaddr >> PAGE_SHIFT;
    nr_pages = end - start;

    if (!count || ((uaddr + count) < uaddr)) {
        dev_err(dev, "User addr overflow!!\n");
        return BC_STS_INV_ARG;
    }

    dio = crystalhd_alloc_dio(adp);
    if (!dio) {
        dev_err(dev, "dio pool empty..\n");
        return BC_STS_INSUFF_RES;
    }

    if (dir_tx) {
        rw = WRITE;
        dio->direction = DMA_TO_DEVICE;
    } else {
        rw = READ;
        dio->direction = DMA_FROM_DEVICE;
    }

    if (nr_pages > dio->max_pages) {
        dev_err(dev, "max_pages(%d) exceeded(%d)!!\n",
                dio->max_pages, nr_pages);
        crystalhd_unmap_dio(adp, dio);
        return BC_STS_INSUFF_RES;
    }

#ifndef __APPLE__
    if (uv_offset) {
        uv_start = (uaddr + uv_offset)  >> PAGE_SHIFT;
        dio->uinfo.uv_sg_ix = uv_start - start;
        dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & ~PAGE_MASK);
    }

    dio->fb_size = ubuff_sz & 0x03;
    if (dio->fb_size) {
        res = copy_from_user(dio->fb_va,
                             (void *)(uaddr + count - dio->fb_size),
                             dio->fb_size);
        if (res) {
            dev_err(dev, "failed %d to copy %u fill bytes from %p\n",
                    res, dio->fb_size,
                    (void *)(uaddr + count-dio->fb_size));
            crystalhd_unmap_dio(adp, dio);
            return BC_STS_INSUFF_RES;
        }
    }

    down_read(&current->mm->mmap_sem);
    res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ,
                         0, dio->pages, NULL);
    up_read(&current->mm->mmap_sem);

    /* Save for release..*/
    dio->sig = crystalhd_dio_locked;
    if (res < nr_pages) {
        dev_err(dev, "get pages failed: %d-%d\n", nr_pages, res);
        dio->page_cnt = res;
        crystalhd_unmap_dio(adp, dio);
        return BC_STS_ERROR;
    }

    dio->page_cnt = nr_pages;
    /* Get scatter/gather */
    crystalhd_init_sg(dio->sg, dio->page_cnt);
    crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK);
    if (nr_pages > 1) {
        dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
        dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
        count -= dio->sg[0].length;
        for (i = 1; i < nr_pages; i++) {
            if (count < 4) {
                spsz = count;
                skip_fb_sg = 1;
            } else {
                spsz = (count < PAGE_SIZE) ?
                       (count & ~0x03) : PAGE_SIZE;
            }
            crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0);
            count -= spsz;
        }
    } else {
        if (count < 4) {
            dio->sg[0].length = count;
            skip_fb_sg = 1;
        } else {
            dio->sg[0].length = count - dio->fb_size;
        }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
        dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
    }
    dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg,
                             dio->page_cnt, dio->direction);
    if (dio->sg_cnt <= 0) {
        dev_err(dev, "sg map %d-%d\n", dio->sg_cnt, dio->page_cnt);
        crystalhd_unmap_dio(adp, dio);
        return BC_STS_ERROR;
    }
    if (dio->sg_cnt && skip_fb_sg)
        dio->sg_cnt -= 1;
#else
    IODMACommand          *dma_command;
    IOMemoryDescriptor    *mem_desc;
    IOReturn              result;

    if (uv_offset) {
        uv_start = (uaddr + uv_offset)  >> PAGE_SHIFT;
        dio->uinfo.uv_sg_ix = uv_start - start;
        dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & PAGE_MASK);
    }

    dio->fb_size = ubuff_sz & 0x03;

    // map user memory into kernel memory
    mem_desc = IOMemoryDescriptor::withAddress(uaddr, count, dir_tx ? kIODirectionIn : kIODirectionOut,
               current_task() );
    if (mem_desc) {
        result = mem_desc->prepare();
        //IOLog("bc_link_map_dio:mem_desc=0x%X, prepare result 0x%X \n", (unsigned int)mem_desc, (int)result);
        dio->io_class = (void*)mem_desc;
    } else {
        dev_err(&adp->pdev->dev, "bc_link_map_dio:IOMemoryDescriptor::withAddress failed\n");
        crystalhd_free_dio(adp,dio);
        return BC_STS_INSUFF_RES;
    }

    // Save for release..
    dio->sig = crystalhd_dio_locked;

    // check transfer count, counts less than four are handled using only the fill byte page
    if (count > 3) {
        do {
            // 32 bit physical address generation using IODMACommand
            // any memory above 4Gb in the memory descriptor will be buffered
            // to memory below the 4G line, on machines without remapping HW support
            dma_command = IODMACommand::withSpecification(
                              // custom segment function
                              (IODMACommand::SegmentFunction)CustomSegmentFunction,
                              // numAddressBits
                              32,
                              // maxSegmentSize
                              PAGE_SIZE,
                              // mappingOptions - kMapped for DMA addresses
                              IODMACommand::kMapped,
                              // maxTransferSize - no restriction
                              0,
                              // alignment - no restriction
                              1 );
            if (!dma_command) {
                dev_err(&adp->pdev->dev, "IODMACommand::withSpecification failed\n");
                break;
            }

            //IOLog("bc_link_map_dio:dma_command=0x%X \n", (unsigned int)dma_command);
            // point IODMACommand at the memory descriptor, don't use auto prepare option
            result = dma_command->setMemoryDescriptor(mem_desc, false);
            if (kIOReturnSuccess != result) {
                dev_err(&adp->pdev->dev, "setMemoryDescriptor failed (0x%x)\n", result);
                break;
            }
            dio->io_class = (void*)dma_command;
            result = dma_command->prepare(0, count, true);
            //IOLog("bc_link_map_dio:dma_command->prepare() result 0x%X \n",(int)result);

            // generate scatter/gather list using custom segment function. This routine will make
            // sure that the first s/g entry will have the correct address and length for user
            // addresses that are not page aligned.
            UInt64 offset = 0;
            result = dma_command->gen32IOVMSegments(&offset, (IODMACommand::Segment32*)dio->sg, (UInt32*)&nr_pages);

            //IOLog("bc_link_map_dio:gen32IOVMSegments nr_pages %d, result %d\n", (int)nr_pages, (int)result);
            // if ending page is not end 4 byte aligned, decrease last page transfer length
            //  as those bytes will be handled using the fill byte page.
            if(dio->fb_size) {
                dio->sg[nr_pages-1].dma_length -= dio->fb_size;
                // check for last page == same size as dio->fb_size
                if (dio->sg[nr_pages-1].dma_length == 0) {
                    nr_pages--;
                }
            }
            // If need a fill byte page
            if(dio->fb_size) {
                UInt64          byte_count;
                UInt64          length;

                // manually copy those bytes into the fill byte page
                offset = count - dio->fb_size;
                length = dio->fb_size;
                byte_count = mem_desc->readBytes(offset, dio->fb_va, length);
            }
            dio->sg_cnt = nr_pages;
        } while(false);

        if (dio->sg_cnt <= 0) {
            dev_err(&adp->pdev->dev, "sg map %d \n",dio->sg_cnt);
            crystalhd_unmap_dio(adp,dio);
            return BC_STS_ERROR;
        }
    } else {
        // three bytes or less, handle this transfer using only the fill_byte page.
        UInt64          byte_count;
        UInt64          offset;
        UInt64          length;

        offset = 0;
        length = dio->fb_size;
        byte_count = mem_desc->readBytes(offset, dio->fb_va, length);
        dio->sg_cnt = 0;
        dio->sg[0].dma_length = count;
    }
#endif
    dio->sig = crystalhd_dio_sg_mapped;
    /* Fill in User info.. */
    dio->uinfo.xfr_len   = ubuff_sz;
#ifndef __APPLE__
    dio->uinfo.xfr_buff  = ubuff;
#else
    dio->uinfo.xfr_buff  = (uint8_t*)ubuff;
#endif
    dio->uinfo.uv_offset = uv_offset;
    dio->uinfo.b422mode  = en_422mode;
    dio->uinfo.dir_tx    = dir_tx;

    *dio_hnd = dio;

    return BC_STS_SUCCESS;
}

/**
 * crystalhd_unmap_sgl - Release mapped resources
 * @adp: Adapter instance
 * @dio: DIO request instance
 *
 * Return:
 *	Status.
 *
 * This routine is to unmap the user buffer pages.
 */
BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio)
{
#ifndef __APPLE__
    struct page *page = NULL;
    int j = 0;

    if (!adp || !dio) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return BC_STS_INV_ARG;
    }

    if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) {
        for (j = 0; j < dio->page_cnt; j++) {
            page = dio->pages[j];
            if (page) {
                if (!PageReserved(page) &&
                        (dio->direction == DMA_FROM_DEVICE))
                    SetPageDirty(page);
                page_cache_release(page);
            }
        }
    }
    if (dio->sig == crystalhd_dio_sg_mapped)
        pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction);
#else
    IODMACommand		*dma_command;
    IOMemoryDescriptor	*mem_desc;

    if(!adp || !dio ) {
        dev_err(chddev(), "bc_link_unmap_dio:Invalid arg \n");
        return BC_STS_INV_ARG;
    }
    dma_command = OSDynamicCast(IODMACommand, (OSMetaClassBase*)dio->io_class);
    //MPCLOG(MPCLOG_DBG, "bc_link_unmap_dio:dma_command=0x%X \n", (unsigned int)dma_command);
    if (dma_command) {
        // fetch current IOMemoryDescriptor before dma_command->clearMemoryDescriptor;
        mem_desc = (IOMemoryDescriptor*)dma_command->getMemoryDescriptor();
        dma_command->complete();
        dma_command->clearMemoryDescriptor();
        SAFE_RELEASE(dma_command);
        mem_desc->complete();
        SAFE_RELEASE(mem_desc);
        dio->io_class = NULL;
    }
#endif

    crystalhd_free_dio(adp, dio);

    return BC_STS_SUCCESS;
}

/**
 * crystalhd_create_dio_pool - Allocate mem pool for DIO management.
 * @adp: Adapter instance
 * @max_pages: Max pages for size calculation.
 *
 * Return:
 *	system error.
 *
 * This routine creates a memory pool to hold dio context for
 * for HW Direct IO operation.
 */
int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages)
{
    struct device *dev;
    uint32_t asz = 0, i = 0;
    uint8_t	*temp;
    crystalhd_dio_req *dio;

    if (!adp || !max_pages) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return -EINVAL;
    }

    dev = &adp->pdev->dev;

    /* Get dma memory for fill byte handling..*/
    adp->fill_byte_pool = pci_pool_create("crystalhd_fbyte",
                                          adp->pdev, 8, 8, 0);
    if (!adp->fill_byte_pool) {
        dev_err(dev, "failed to create fill byte pool\n");
        return -ENOMEM;
    }

#ifndef __APPLE__
    /* Get the max size from user based on 420/422 modes */
    asz =  (sizeof(*dio->pages) * max_pages) +
           (sizeof(*dio->sg) * max_pages) + sizeof(*dio);
#else
    asz = (sizeof(*dio->sg) * max_pages) + sizeof(*dio);
#endif

    dev_dbg(dev, "Initializing Dio pool %d %d %x %p\n",
            BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool);

    for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) {
        temp = (uint8_t *)kzalloc(asz, GFP_KERNEL);
        if ((temp) == NULL) {
            dev_err(dev, "Failed to alloc %d mem\n", asz);
            return -ENOMEM;
        }

        dio = (crystalhd_dio_req *)temp;
        temp += sizeof(*dio);
#ifndef __APPLE__
        dio->pages = (struct page **)temp;
        temp += (sizeof(*dio->pages) * max_pages);
#else
        temp += sizeof(*dio);
#endif
        dio->sg = (struct scatterlist *)temp;
        dio->max_pages = max_pages;
        dio->fb_va = pci_pool_alloc(adp->fill_byte_pool, GFP_KERNEL,
                                    &dio->fb_pa);
        if (!dio->fb_va) {
            dev_err(dev, "fill byte alloc failed.\n");
            return -ENOMEM;
        }

        crystalhd_free_dio(adp, dio);
    }

    return 0;
}

/**
 * crystalhd_destroy_dio_pool - Release DIO mem pool.
 * @adp: Adapter instance
 *
 * Return:
 *	none.
 *
 * This routine releases dio memory pool during close.
 */
void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp)
{
    crystalhd_dio_req *dio;
    int count = 0;

    if (!adp) {
        printk(KERN_ERR "%s: Invalid arg\n", __func__);
        return;
    }

    do {
        dio = crystalhd_alloc_dio(adp);
        if (dio) {
            if (dio->fb_va)
                pci_pool_free(adp->fill_byte_pool,
                              dio->fb_va, dio->fb_pa);
            count++;
            kfree(dio);
        }
    } while (dio);

    if (adp->fill_byte_pool) {
        pci_pool_destroy(adp->fill_byte_pool);
        adp->fill_byte_pool = NULL;
    }

    dev_dbg(&adp->pdev->dev, "Released dio pool %d\n", count);
}
Beispiel #13
0
/**
 * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
 * @adapter		: HBA soft state
 *
 * Setup the dma pools for mailbox, passthru and extended passthru structures,
 * and scatter-gather lists.
 */
static int
megaraid_mbox_setup_dma_pools(adapter_t *adapter)
{
	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
	struct mraid_pci_blk	*epthru_pci_blk;
	struct mraid_pci_blk	*sg_pci_blk;
	struct mraid_pci_blk	*mbox_pci_blk;
	int			i;



	// Allocate memory for 16-bytes aligned mailboxes
	raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
						adapter->pdev,
						sizeof(mbox64_t) + 16,
						16, 0);

	if (raid_dev->mbox_pool_handle == NULL) {
		goto fail_setup_dma_pool;
	}

	mbox_pci_blk = raid_dev->mbox_pool;
	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
		mbox_pci_blk[i].vaddr = pci_pool_alloc(
						raid_dev->mbox_pool_handle,
						GFP_KERNEL,
						&mbox_pci_blk[i].dma_addr);
		if (!mbox_pci_blk[i].vaddr) {
			goto fail_setup_dma_pool;
		}
	}

	/*
	 * Allocate memory for each embedded passthru strucuture pointer
	 * Request for a 128 bytes aligned structure for each passthru command
	 * structure
	 * Since passthru and extended passthru commands are exclusive, they
	 * share common memory pool. Passthru structures piggyback on memory
	 * allocted to extended passthru since passthru is smaller of the two
	 */
	raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
			adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);

	if (raid_dev->epthru_pool_handle == NULL) {
		goto fail_setup_dma_pool;
	}

	epthru_pci_blk = raid_dev->epthru_pool;
	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
		epthru_pci_blk[i].vaddr = pci_pool_alloc(
						raid_dev->epthru_pool_handle,
						GFP_KERNEL,
						&epthru_pci_blk[i].dma_addr);
		if (!epthru_pci_blk[i].vaddr) {
			goto fail_setup_dma_pool;
		}
	}


	// Allocate memory for each scatter-gather list. Request for 512 bytes
	// alignment for each sg list
	raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
					adapter->pdev,
					sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
					512, 0);

	if (raid_dev->sg_pool_handle == NULL) {
		goto fail_setup_dma_pool;
	}

	sg_pci_blk = raid_dev->sg_pool;
	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
		sg_pci_blk[i].vaddr = pci_pool_alloc(
						raid_dev->sg_pool_handle,
						GFP_KERNEL,
						&sg_pci_blk[i].dma_addr);
		if (!sg_pci_blk[i].vaddr) {
			goto fail_setup_dma_pool;
		}
	}

	return 0;

fail_setup_dma_pool:
	megaraid_mbox_teardown_dma_pools(adapter);
	return -1;
}