示例#1
0
/* Function for free allocated Desc Memory. */
void RtmpFreeDescBuf(
	IN PPCI_DEV pPciDev,
	IN ULONG Length,
	IN VOID *VirtualAddress,
	IN NDIS_PHYSICAL_ADDRESS phy_addr)
{
	dma_addr_t DmaAddr = (dma_addr_t)(phy_addr);

	pci_free_consistent(pPciDev, Length, VirtualAddress, DmaAddr);
}
示例#2
0
/* Function for free allocated Desc Memory. */
void RTMP_FreeDescMemory(struct rt_rtmp_adapter *pAd,
			 unsigned long Length,
			 void *VirtualAddress,
			 dma_addr_t PhysicalAddress)
{
	struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;

	pci_free_consistent(pObj->pci_dev, Length, VirtualAddress,
			    PhysicalAddress);
}
示例#3
0
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
	if (ring->descs) {
		pci_free_consistent(vdev->pdev,
			ring->size_unaligned,
			ring->descs_unaligned,
			ring->base_addr_unaligned);
		ring->descs = NULL;
	}
}
示例#4
0
void vnic_dev_unregister(struct vnic_dev *vdev)
{
	if (vdev) {
		if (vdev->notify)
			pci_free_consistent(vdev->pdev,
				sizeof(struct vnic_devcmd_notify),
				vdev->notify,
				vdev->notify_pa);
		if (vdev->stats)
			pci_free_consistent(vdev->pdev,
				sizeof(struct vnic_stats),
				vdev->stats, vdev->stats_pa);
		if (vdev->fw_info)
			pci_free_consistent(vdev->pdev,
				sizeof(struct vnic_devcmd_fw_info),
				vdev->fw_info, vdev->fw_info_pa);
		kfree(vdev);
	}
}
示例#5
0
void RTMP_FreeFirstTxBuffer(
	IN PPCI_DEV pPciDev,
	IN ULONG Length,
	IN BOOLEAN Cached,
	IN VOID *VirtualAddress,
	IN NDIS_PHYSICAL_ADDRESS phy_addr)
{
	dma_addr_t DmaAddr = (dma_addr_t)(phy_addr);
	pci_free_consistent(pPciDev, Length, VirtualAddress, DmaAddr);
}
示例#6
0
int
islpci_free_memory(islpci_private *priv)
{
	int counter;

	if (priv->device_base)
		iounmap(priv->device_base);
	priv->device_base = NULL;

	/* free consistent DMA area... */
	if (priv->driver_mem_address)
		pci_free_consistent(priv->pdev, HOST_MEM_BLOCK,
				    priv->driver_mem_address,
				    priv->device_host_address);

	/* clear some dangling pointers */
	priv->driver_mem_address = NULL;
	priv->device_host_address = 0;
	priv->device_psm_buffer = 0;
	priv->control_block = NULL;

        /* clean up mgmt rx buffers */
        for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
		struct islpci_membuf *buf = &priv->mgmt_rx[counter];
		if (buf->pci_addr)
			pci_unmap_single(priv->pdev, buf->pci_addr,
					 buf->size, PCI_DMA_FROMDEVICE);
		buf->pci_addr = 0;
		if (buf->mem)
			kfree(buf->mem);
		buf->size = 0;
		buf->mem = NULL;
        }

	/* clean up data rx buffers */
	for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
		if (priv->pci_map_rx_address[counter])
			pci_unmap_single(priv->pdev,
					 priv->pci_map_rx_address[counter],
					 MAX_FRAGMENT_SIZE_RX + 2,
					 PCI_DMA_FROMDEVICE);
		priv->pci_map_rx_address[counter] = 0;

		if (priv->data_low_rx[counter])
			dev_kfree_skb(priv->data_low_rx[counter]);
		priv->data_low_rx[counter] = NULL;
	}

	/* Free the acces control list and the WPA list */
	prism54_acl_clean(&priv->acl);
	prism54_wpa_ie_clean(priv);
	mgt_clean(priv);

	return 0;
}
示例#7
0
static void
e1000_free_desc_rings(struct e1000_adapter *adapter)
{
	struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
	struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
	struct pci_dev *pdev = adapter->pdev;
	int i;

	if(txdr->desc && txdr->buffer_info) {
		for(i = 0; i < txdr->count; i++) {
			if(txdr->buffer_info[i].dma)
				pci_unmap_single(pdev, txdr->buffer_info[i].dma,
						 txdr->buffer_info[i].length,
						 PCI_DMA_TODEVICE);
			if(txdr->buffer_info[i].skb)
				dev_kfree_skb(txdr->buffer_info[i].skb);
		}
	}

	if(rxdr->desc && rxdr->buffer_info) {
		for(i = 0; i < rxdr->count; i++) {
			if(rxdr->buffer_info[i].dma)
				pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
						 rxdr->buffer_info[i].length,
						 PCI_DMA_FROMDEVICE);
			if(rxdr->buffer_info[i].skb)
				dev_kfree_skb(rxdr->buffer_info[i].skb);
		}
	}

	if(txdr->desc)
		pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
	if(rxdr->desc)
		pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);

	if(txdr->buffer_info)
		kfree(txdr->buffer_info);
	if(rxdr->buffer_info)
		kfree(rxdr->buffer_info);

	return;
}
示例#8
0
static void buffer_finish(struct vb2_buffer *vb)
{
    struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
    struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
    struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
    struct cx88_riscmem *risc = &buf->risc;

    if (risc->cpu)
        pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
    memset(risc, 0, sizeof(*risc));
}
void
osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
{
	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));

#ifdef __ARM_ARCH_7A__
	kfree(va);
#else
	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
#endif
}
示例#10
0
文件: device_main.c 项目: Abioy/kasan
static void device_free_rings(struct vnt_private *pDevice)
{
	pci_free_consistent(pDevice->pcid,
			    pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
			    pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
			    pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
			    pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc)
			    ,
			    pDevice->aRD0Ring, pDevice->pool_dma
		);

	if (pDevice->tx0_bufs)
		pci_free_consistent(pDevice->pcid,
				    pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
				    pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
				    CB_BEACON_BUF_SIZE +
				    CB_MAX_BUF_SIZE,
				    pDevice->tx0_bufs, pDevice->tx_bufs_dma0
			);
}
示例#11
0
int vnic_dev_notify_unset(struct vnic_dev *vdev)
{
	if (vdev->notify) {
		pci_free_consistent(vdev->pdev,
			sizeof(struct vnic_devcmd_notify),
			vdev->notify,
			vdev->notify_pa);
	}

	return vnic_dev_notify_unsetcmd(vdev);
}
/* Function for free allocated Desc Memory. */
void RTMP_FreeDescMemory(
	IN	PPCI_DEV				pPciDev,
	IN	ULONG					Length,
	IN	PVOID					VirtualAddress,
	IN	NDIS_PHYSICAL_ADDRESS	PhysicalAddress)
{
/*	POS_COOKIE pObj = (POS_COOKIE)pAd->OS_Cookie; */
	dma_addr_t DmaAddr = (dma_addr_t)(PhysicalAddress);

	pci_free_consistent(pPciDev, Length, VirtualAddress, DmaAddr);
}
示例#13
0
void RTMP_FreeFirstTxBuffer(
	IN	PRTMP_ADAPTER pAd,
	IN	ULONG	Length,
	IN	BOOLEAN	Cached,
	IN	PVOID	VirtualAddress,
	IN	NDIS_PHYSICAL_ADDRESS PhysicalAddress)
{
	POS_COOKIE pObj = (POS_COOKIE)pAd->OS_Cookie;

	pci_free_consistent(pObj->pci_dev, Length, VirtualAddress, PhysicalAddress);
}
void RTMP_FreeFirstTxBuffer(struct rt_rtmp_adapter *pAd,
                            unsigned long Length,
                            IN BOOLEAN Cached,
                            void *VirtualAddress,
                            dma_addr_t PhysicalAddress)
{
    struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;

    pci_free_consistent(pObj->pci_dev, Length, VirtualAddress,
                        PhysicalAddress);
}
示例#15
0
void dma_prog_region_free(struct dma_prog_region *prog)
{
	if (prog->kvirt) {
		pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr);
	}

	prog->kvirt = NULL;
	prog->dev = NULL;
	prog->n_pages = 0;
	prog->bus_addr = 0;
}
示例#16
0
static int alloc_buffer(struct vino_device *v, int size)
{
	int count, i, j, err;

	err = i = 0;
	count = (size / PAGE_SIZE + 4) & ~3;
	v->desc = (unsigned long *) kmalloc(count * sizeof(unsigned long),
					    GFP_KERNEL);
	if (!v->desc)
		return -ENOMEM;

	v->dma_desc.cpu = pci_alloc_consistent(NULL, PAGE_RATIO * (count+4) *
					       sizeof(dma_addr_t),
					       &v->dma_desc.dma);
	if (!v->dma_desc.cpu) {
		err = -ENOMEM;
		goto out_free_desc;
	}
	while (i < count) {
		dma_addr_t dma;

		v->desc[i] = get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!v->desc[i])
			break;
		dma = pci_map_single(NULL, (void *)v->desc[i], PAGE_SIZE,
				     PCI_DMA_FROMDEVICE);
		for (j = 0; j < PAGE_RATIO; j++)
			v->dma_desc.cpu[PAGE_RATIO * i + j ] = 
				dma + VINO_PAGE_SIZE * j;
		mem_map_reserve(virt_to_page(v->desc[i]));
		i++;
	}
	v->dma_desc.cpu[PAGE_RATIO * count] = VINO_DESC_STOP;
	if (i-- < count) {
		while (i >= 0) {
			mem_map_unreserve(virt_to_page(v->desc[i]));
			pci_unmap_single(NULL, v->dma_desc.cpu[PAGE_RATIO * i],
					 PAGE_SIZE, PCI_DMA_FROMDEVICE);
			free_page(v->desc[i]);
			i--;
		}
		pci_free_consistent(NULL,
				    PAGE_RATIO * (count+4) * sizeof(dma_addr_t),
				    (void *)v->dma_desc.cpu, v->dma_desc.dma);
		err = -ENOBUFS;
		goto out_free_desc;
	}
	v->page_count = count;
	return 0;

out_free_desc:
	kfree(v->desc);
	return err;
}
void cx25821_free_mem_upstream_ch2(struct cx25821_dev *dev)
{
	if (dev->_is_running_ch2) {
		cx25821_stop_upstream_video_ch2(dev);
	}

	if (dev->_dma_virt_addr_ch2) {
		pci_free_consistent(dev->pci, dev->_risc_size_ch2,
				    dev->_dma_virt_addr_ch2,
				    dev->_dma_phys_addr_ch2);
		dev->_dma_virt_addr_ch2 = NULL;
	}

	if (dev->_data_buf_virt_addr_ch2) {
		pci_free_consistent(dev->pci, dev->_data_buf_size_ch2,
				    dev->_data_buf_virt_addr_ch2,
				    dev->_data_buf_phys_addr_ch2);
		dev->_data_buf_virt_addr_ch2 = NULL;
	}
}
示例#18
0
文件: r8169.c 项目: wxlong/Test
static int
rtl8169_close(struct net_device *dev)
{
    struct rtl8169_private *tp = dev->priv;
    struct pci_dev *pdev = tp->pci_dev;
    void *ioaddr = tp->mmio_addr;

    netif_stop_queue(dev);

    rtl8169_delete_timer(dev);

    spin_lock_irq(&tp->lock);

    /* Stop the chip's Tx and Rx DMA processes. */
    RTL_W8(ChipCmd, 0x00);

    /* Disable interrupts by clearing the interrupt mask. */
    RTL_W16(IntrMask, 0x0000);

    /* Update the error counts. */
    tp->stats.rx_missed_errors += RTL_R32(RxMissed);
    RTL_W32(RxMissed, 0);

    spin_unlock_irq(&tp->lock);

    synchronize_irq(dev->irq);
    free_irq(dev->irq, dev);

    rtl8169_tx_clear(tp);

    rtl8169_rx_clear(tp);

    pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
                        tp->RxPhyAddr);
    pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
                        tp->TxPhyAddr);
    tp->TxDescArray = NULL;
    tp->RxDescArray = NULL;

    return 0;
}
示例#19
0
/*
 * Must not be invoked with interrupt sources disabled and
 * the hardware shutdown down.
 */
static void b44_free_consistent(struct b44 *bp)
{
	if (bp->rx_buffers) {
		kfree(bp->rx_buffers);
		bp->rx_buffers = NULL;
	}
	if (bp->tx_buffers) {
		kfree(bp->tx_buffers);
		bp->tx_buffers = NULL;
	}
	if (bp->rx_ring) {
		pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
				    bp->rx_ring, bp->rx_ring_dma);
		bp->rx_ring = NULL;
	}
	if (bp->tx_ring) {
		pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
				    bp->tx_ring, bp->tx_ring_dma);
		bp->tx_ring = NULL;
	}
}
示例#20
0
/* Get info of a NIC partition */
int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
				struct qlcnic_info *npar_info, u8 func_id)
{
	int	err;
	dma_addr_t nic_dma_t;
	struct qlcnic_info *nic_info;
	void *nic_info_addr;
	size_t	nic_size = sizeof(struct qlcnic_info);

	nic_info_addr = pci_alloc_consistent(adapter->pdev,
		nic_size, &nic_dma_t);
	if (!nic_info_addr)
		return -ENOMEM;
	memset(nic_info_addr, 0, nic_size);

	nic_info = (struct qlcnic_info *) nic_info_addr;
	err = qlcnic_issue_cmd(adapter,
			adapter->ahw.pci_func,
			adapter->fw_hal_version,
			MSD(nic_dma_t),
			LSD(nic_dma_t),
			(func_id << 16 | nic_size),
			QLCNIC_CDRP_CMD_GET_NIC_INFO);

	if (err == QLCNIC_RCODE_SUCCESS) {
		npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
		npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
		npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
		npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
		npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
		npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
		npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
		npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
		npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
		npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);

		dev_info(&adapter->pdev->dev,
			"phy port: %d switch_mode: %d,\n"
			"\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
			"\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
			npar_info->phys_port, npar_info->switch_mode,
			npar_info->max_tx_ques, npar_info->max_rx_ques,
			npar_info->min_tx_bw, npar_info->max_tx_bw,
			npar_info->max_mtu, npar_info->capabilities);
	} else {
		dev_err(&adapter->pdev->dev,
			"Failed to get nic info%d\n", err);
		err = -EIO;
	}

	pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
	return err;
}
示例#21
0
static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
				     struct data_queue *queue)
{
	struct queue_entry_priv_pci *entry_priv =
	    queue->entries[0].priv_data;

	if (entry_priv->desc)
		pci_free_consistent(to_pci_dev(rt2x00dev->dev),
				  queue->limit * queue->desc_size,
				  entry_priv->desc, entry_priv->desc_dma);
	entry_priv->desc = NULL;
}
示例#22
0
文件: pci.c 项目: AppEngine/linux-2.6
static void rings_free(struct agnx_priv *priv)
{
	unsigned int len = priv->rx.size + priv->txm.size + priv->txd.size;
	unsigned long flags;
	AGNX_TRACE;

	spin_lock_irqsave(&priv->lock, flags);
	kfree(priv->rx.info);
	pci_free_consistent(priv->pdev, sizeof(struct agnx_desc) * len,
			    priv->rx.desc, priv->rx.dma);
	spin_unlock_irqrestore(&priv->lock, flags);
}
void cx25821_free_mem_upstream(struct cx25821_channel *chan)
{
	struct cx25821_video_out_data *out = chan->out;
	struct cx25821_dev *dev = chan->dev;

	if (out->_is_running)
		cx25821_stop_upstream_video(chan);

	if (out->_dma_virt_addr) {
		pci_free_consistent(dev->pci, out->_risc_size,
				    out->_dma_virt_addr, out->_dma_phys_addr);
		out->_dma_virt_addr = NULL;
	}

	if (out->_data_buf_virt_addr) {
		pci_free_consistent(dev->pci, out->_data_buf_size,
				    out->_data_buf_virt_addr,
				    out->_data_buf_phys_addr);
		out->_data_buf_virt_addr = NULL;
	}
}
示例#24
0
int saa7164_buffer_dealloc(struct saa7164_tsport *port,
	struct saa7164_buffer *buf)
{
	struct saa7164_dev *dev = port->dev;

	if ((buf == 0) || (port == 0))
		return SAA_ERR_BAD_PARAMETER;

	dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf);

	if (buf->flags != SAA7164_BUFFER_FREE)
		log_warn(" freeing a non-free buffer\n");

	pci_free_consistent(port->dev->pci, buf->pci_size, buf->cpu, buf->dma);
	pci_free_consistent(port->dev->pci, buf->pt_size, buf->pt_cpu,
		buf->pt_dma);

	kfree(buf);

	return SAA_OK;
}
示例#25
0
/* Creates a dma map for the scatter-gather list entries */
static void __devinit
ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
{
	int num_ports = sizeof (ioc4_dma_regs_t);

	printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name,
	       dma_base, dma_base + num_ports - 1);

	if (!request_region(dma_base, num_ports, hwif->name)) {
		printk(KERN_ERR
		       "%s(%s) -- ERROR, Addresses 0x%p to 0x%p "
		       "ALREADY in use\n",
		       __FUNCTION__, hwif->name, (void *) dma_base,
		       (void *) dma_base + num_ports - 1);
		goto dma_alloc_failure;
	}

	hwif->dma_base = dma_base;
	hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
					  IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
					  &hwif->dmatable_dma);

	if (!hwif->dmatable_cpu)
		goto dma_alloc_failure;

	hwif->sg_max_nents = IOC4_PRD_ENTRIES;

	hwif->dma_base2 = (unsigned long)
		pci_alloc_consistent(hwif->pci_dev,
				     IOC4_IDE_CACHELINE_SIZE,
				     (dma_addr_t *) &(hwif->dma_status));

	if (!hwif->dma_base2)
		goto dma_base2alloc_failure;

	return;

dma_base2alloc_failure:
	pci_free_consistent(hwif->pci_dev,
			    IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
			    hwif->dmatable_cpu, hwif->dmatable_dma);
	printk(KERN_INFO
	       "%s() -- Error! Unable to allocate DMA Maps for drive %s\n",
	       __FUNCTION__, hwif->name);
	printk(KERN_INFO
	       "Changing from DMA to PIO mode for Drive %s\n", hwif->name);

dma_alloc_failure:
	/* Disable DMA because we couldnot allocate any DMA maps */
	hwif->autodma = 0;
	hwif->atapi_dma = 0;
}
示例#26
0
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
 * This free routine walks the list of POOL entries and if SKB is set to
 * non NULL it is unmapped and freed
 */
void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
	int i;
	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
		if (rxq->pool[i].page != NULL) {
			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
				PAGE_SIZE << priv->hw_params.rx_page_order,
				PCI_DMA_FROMDEVICE);
			__free_pages(rxq->pool[i].page,
				     priv->hw_params.rx_page_order);
			rxq->pool[i].page = NULL;
			priv->alloc_rxb_page--;
		}
	}

	pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
			    rxq->dma_addr);
	pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
			    rxq->rb_stts, rxq->rb_stts_dma);
	rxq->bd = NULL;
	rxq->rb_stts  = NULL;
}
示例#27
0
文件: dl2k.c 项目: 3null/fastsocket
static void __devexit
rio_remove1 (struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata (pdev);

	if (dev) {
		struct netdev_private *np = netdev_priv(dev);

		unregister_netdev (dev);
		pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
				     np->rx_ring_dma);
		pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
				     np->tx_ring_dma);
#ifdef MEM_MAPPING
		iounmap ((char *) (dev->base_addr));
#endif
		free_netdev (dev);
		pci_release_regions (pdev);
		pci_disable_device (pdev);
	}
	pci_set_drvdata (pdev, NULL);
}
示例#28
0
void btcx_riscmem_free(struct pci_dev *pci,
		       struct btcx_riscmem *risc)
{
	if (NULL == risc->cpu)
		return;

	memcnt--;
	dprintk("btcx: riscmem free [%d] dma=%lx\n",
		memcnt, (unsigned long)risc->dma);

	pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
	memset(risc,0,sizeof(*risc));
}
示例#29
0
int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
				      struct beiscsi_hba *phba)
{
	struct be_dma_mem nonemb_cmd;
	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
	struct be_mgmt_controller_attributes *req;
	struct be_sge *sge = nonembedded_sgl(wrb);
	int status = 0;

	nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
				sizeof(struct be_mgmt_controller_attributes),
				&nonemb_cmd.dma);
	if (nonemb_cmd.va == NULL) {
		SE_DEBUG(DBG_LVL_1,
			 "Failed to allocate memory for mgmt_check_supported_fw"
			 "\n");
		return -ENOMEM;
	}
	nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
	req = nonemb_cmd.va;
	memset(req, 0, sizeof(*req));
	spin_lock(&ctrl->mbox_lock);
	memset(wrb, 0, sizeof(*wrb));
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			   OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd.size);
	status = be_mbox_notify(ctrl);
	if (!status) {
		struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
		SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n",
			resp->params.hba_attribs.flashrom_version_string);
		SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n",
			resp->params.hba_attribs.firmware_version_string);
		SE_DEBUG(DBG_LVL_8,
			"Developer Build, not performing version check...\n");
		phba->fw_config.iscsi_features =
				resp->params.hba_attribs.iscsi_features;
		SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n",
				      phba->fw_config.iscsi_features);
	} else
		SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
	spin_unlock(&ctrl->mbox_lock);
	if (nonemb_cmd.va)
		pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
				    nonemb_cmd.va, nonemb_cmd.dma);

	return status;
}
示例#30
0
static void __exit vino_exit(void)
{
	video_unregister_device(&Vino->chA.vdev);
	video_unregister_device(&Vino->chB.vdev);
	vino_i2c_del_bus();
	free_irq(SGI_VINO_IRQ, NULL);
	pci_unmap_single(NULL, Vino->dummy_dma.dma, PAGE_SIZE,
			 PCI_DMA_FROMDEVICE);
	pci_free_consistent(NULL, 4 * sizeof(dma_addr_t),
			    (void *)Vino->dummy_dma.cpu, Vino->dummy_dma.dma);
	free_page(Vino->dummy_desc);
	kfree(Vino);
	iounmap(vino);
}