Exemple #1
1
/**
 * emu10k1_voice_alloc_buffer -
 *
 * allocates the memory buffer for a voice. Two page tables are kept for each buffer.
 * One (dma_handle) keeps track of the host memory pages used and the other (virtualpagetable)
 * is passed to the device so that it can do DMA to host memory.
 *
 */
int emu10k1_voice_alloc_buffer(struct emu10k1_card *card, struct voice_mem *mem, u32 pages)
{
	u32 pageindex, pagecount;
	unsigned long busaddx;
	int i;

	DPD(2, "requested pages is: %d\n", pages);

	if ((mem->emupageindex = emu10k1_addxmgr_alloc(pages * PAGE_SIZE, card)) < 0)
	{
		DPF(1, "couldn't allocate emu10k1 address space\n");
		return -1;
	}

	/* Fill in virtual memory table */

	if ((mem->addr = pci_alloc_consistent(card->pci_dev, pages * PAGE_SIZE, &mem->dma_handle))
	    == NULL) {
		mem->pages = 0;
		DPF(1, "couldn't allocate dma memory\n");
		return -1;
	}

	for (pagecount = 0; pagecount < pages; pagecount++) {
		DPD(2, "Virtual Addx: %p\n", mem->addr + pagecount * PAGE_SIZE);

		for (i = 0; i < PAGE_SIZE / EMUPAGESIZE; i++) {
			busaddx = (u32) pci_logic_to_physic_addr( 
			  mem->addr + pagecount * PAGE_SIZE, card->pci_dev )
			  + i * EMUPAGESIZE;

			DPD(3, "Bus Addx: %#lx\n", busaddx);

			pageindex = mem->emupageindex + pagecount * PAGE_SIZE / EMUPAGESIZE + i;

			((u32 *) card->virtualpagetable.addr)[pageindex] = cpu_to_le32((busaddx * 2) | pageindex);
		}
	}

	mem->pages = pagecount;

	return 0;
}
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
	u64 a0, a1;
	int wait = 1000;

	if (!vdev->stats) {
		vdev->stats = pci_alloc_consistent(vdev->pdev,
			sizeof(struct vnic_stats), &vdev->stats_pa);
		if (!vdev->stats)
			return -ENOMEM;
	}

	*stats = vdev->stats;
	a0 = vdev->stats_pa;
	a1 = sizeof(struct vnic_stats);

	return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
}
Exemple #3
0
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
{
	void *notify_addr;
	dma_addr_t notify_pa;

	if (vdev->notify || vdev->notify_pa) {
		pr_err("notify block %p still allocated", vdev->notify);
		return -EINVAL;
	}

	notify_addr = pci_alloc_consistent(vdev->pdev,
			sizeof(struct vnic_devcmd_notify),
			&notify_pa);
	if (!notify_addr)
		return -ENOMEM;

	return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
{
	u64 a0, a1;
	int wait = 1000;

	if (!vdev->notify) {
		vdev->notify = pci_alloc_consistent(vdev->pdev,
			sizeof(struct vnic_devcmd_notify),
			&vdev->notify_pa);
		if (!vdev->notify)
			return -ENOMEM;
	}

	a0 = vdev->notify_pa;
	a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
	a1 += sizeof(struct vnic_devcmd_notify);

	return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
Exemple #5
0
static int
scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
{
	int i;
	struct cciss_scsi_cmd_stack_t *stk;
	size_t size;

	stk = &sa->cmd_stack;
	stk->nelems = cciss_tape_cmds + 2;
	sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
		h->chainsize, stk->nelems);
	if (!sa->cmd_sg_list && h->chainsize > 0)
		return -ENOMEM;

	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;

	/* Check alignment, see cciss_cmd.h near CommandList_struct def. */
	BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
	/* pci_alloc_consistent guarantees 32-bit DMA address will be used */
	stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
		pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);

	if (stk->pool == NULL) {
		cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
		sa->cmd_sg_list = NULL;
		return -ENOMEM;
	}
	stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL);
	if (!stk->elem) {
		pci_free_consistent(h->pdev, size, stk->pool,
		stk->cmd_pool_handle);
		return -1;
	}
	for (i = 0; i < stk->nelems; i++) {
		stk->elem[i] = &stk->pool[i];
		stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + 
			(sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
		stk->elem[i]->cmdindex = i;
	}
	stk->top = stk->nelems-1;
	return 0;
}
Exemple #6
0
int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
			  struct pci_dev *dev)
{
	/* round up to page size */
	n_bytes = PAGE_ALIGN(n_bytes);

	prog->n_pages = n_bytes >> PAGE_SHIFT;

	prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
	if (!prog->kvirt) {
		printk(KERN_ERR
		       "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
		dma_prog_region_free(prog);
		return -ENOMEM;
	}

	prog->dev = dev;

	return 0;
}
/* Issue a mailbox command to dump RISC RAM. */
int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
		u32 ram_addr, int word_count)
{
	int status;
	char *my_buf;
	dma_addr_t buf_dma;

	my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
					&buf_dma);
	if (!my_buf)
		return -EIO;

	status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
	if (!status)
		memcpy(buf, my_buf, word_count * sizeof(u32));

	pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
				buf_dma);
	return status;
}
void*
osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
{
	void *va;
	uint16 align = (1 << align_bits);
	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));

	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
		size += align;
	*alloced = size;

#ifdef __ARM_ARCH_7A__
	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
	if (va)
		*pap = (ulong)__virt_to_phys((ulong)va);
#else
	va = pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap);
#endif
	return va;
}
static int
lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
		u32 buf_len, void *buf)
{
	struct be_dma_mem read_cmd;
	u32 read_len = 0, total_read_len = 0, chunk_size;
	u32 eof = 0;
	u8 addn_status;
	int status = 0;

	read_cmd.size = LANCER_READ_FILE_CHUNK;
	read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
			&read_cmd.dma);

	if (!read_cmd.va) {
		dev_err(&adapter->pdev->dev,
				"Memory allocation failure while reading dump\n");
		return -ENOMEM;
	}

	while ((total_read_len < buf_len) && !eof) {
		chunk_size = min_t(u32, (buf_len - total_read_len),
				LANCER_READ_FILE_CHUNK);
		chunk_size = ALIGN(chunk_size, 4);
		status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
				total_read_len, file_name, &read_len,
				&eof, &addn_status);
		if (!status) {
			memcpy(buf + total_read_len, read_cmd.va, read_len);
			total_read_len += read_len;
			eof &= LANCER_READ_FILE_EOF_MASK;
		} else {
			status = -EIO;
			break;
		}
	}
	pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
			read_cmd.dma);

	return status;
}
Exemple #10
0
static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
				 unsigned int prio, unsigned int entries)
{
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_tx_desc *ring;
	dma_addr_t dma;
	u32 nextdescaddress;
	int i;

	ring = pci_alloc_consistent(rtlpci->pdev,
				    sizeof(*ring) * entries, &dma);

	if (!ring || (unsigned long)ring & 0xFF) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 ("Cannot allocate TX ring (prio = %d)\n", prio));
		return -ENOMEM;
	}

	memset(ring, 0, sizeof(*ring) * entries);
	rtlpci->tx_ring[prio].desc = ring;
	rtlpci->tx_ring[prio].dma = dma;
	rtlpci->tx_ring[prio].idx = 0;
	rtlpci->tx_ring[prio].entries = entries;
	skb_queue_head_init(&rtlpci->tx_ring[prio].queue);

	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
		 ("queue:%d, ring_addr:%p\n", prio, ring));

	for (i = 0; i < entries; i++) {
		nextdescaddress = (u32) dma + ((i + 1) % entries) *
					      sizeof(*ring);

		rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
					    true, HW_DESC_TX_NEXTDESC_ADDR,
					    (u8 *)&nextdescaddress);
	}

	return 0;
}
Exemple #11
0
int wlRxRingAlloc(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);

    WLDBG_ENTER_INFO(DBG_LEVEL_12, "allocating %i (0x%x) bytes",
                     MAX_NUM_RX_RING_BYTES, MAX_NUM_RX_RING_BYTES);

    ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing =
        (wlrxdesc_t *) pci_alloc_consistent(wlpptr->pPciDev,
                                            MAX_NUM_RX_RING_BYTES,
                                            &((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing);

    if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing == NULL)
    {
        WLDBG_ERROR(DBG_LEVEL_12, "can not alloc mem");
        return FAIL;
    }
    memset(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing, 0x00, MAX_NUM_RX_RING_BYTES);
    WLDBG_EXIT_INFO(DBG_LEVEL_12, "RX ring vaddr: 0x%x paddr: 0x%x",
                    ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pRxRing, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysRxRing);
    return SUCCESS;
}
Exemple #12
0
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{
	u64 a0, a1 = len;
	int wait = 1000;
	dma_addr_t prov_pa;
	void *prov_buf;
	int ret;

	prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
	if (!prov_buf)
		return -ENOMEM;

	memcpy(prov_buf, buf, len);

	a0 = prov_pa;

	ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);

	pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);

	return ret;
}
Exemple #13
0
int flexcop_dma_allocate(struct pci_dev *pdev,
		struct flexcop_dma *dma, u32 size)
{
	u8 *tcpu;
	dma_addr_t tdma = 0;

	if (size % 2) {
		err("dma buffersize has to be even.");
		return -EINVAL;
	}

	if ((tcpu = pci_alloc_consistent(pdev, size, &tdma)) != NULL) {
		dma->pdev = pdev;
		dma->cpu_addr0 = tcpu;
		dma->dma_addr0 = tdma;
		dma->cpu_addr1 = tcpu + size/2;
		dma->dma_addr1 = tdma + size/2;
		dma->size = size/2;
		return 0;
	}
	return -ENOMEM;
}
Exemple #14
0
static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
{
	struct rtl8180_priv *priv = dev->priv;
	struct rtl8180_rx_desc *entry;
	int i;

	priv->rx_ring = pci_alloc_consistent(priv->pdev,
					     sizeof(*priv->rx_ring) * 32,
					     &priv->rx_ring_dma);

	if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
		printk(KERN_ERR "%s: Cannot allocate RX ring\n",
		       wiphy_name(dev->wiphy));
		return -ENOMEM;
	}

	memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * 32);
	priv->rx_idx = 0;

	for (i = 0; i < 32; i++) {
		struct sk_buff *skb = dev_alloc_skb(MAX_RX_SIZE);
		dma_addr_t *mapping;
		entry = &priv->rx_ring[i];
		if (!skb)
			return 0;

		priv->rx_buf[i] = skb;
		mapping = (dma_addr_t *)skb->cb;
		*mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
					  MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
		entry->rx_buf = cpu_to_le32(*mapping);
		entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
					   MAX_RX_SIZE);
	}
	entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
	return 0;
}
Exemple #15
0
int btcx_riscmem_alloc(struct pci_dev *pci,
		       struct btcx_riscmem *risc,
		       unsigned int size)
{
	__le32 *cpu;
	dma_addr_t dma = 0;

	if (NULL != risc->cpu && risc->size < size)
		btcx_riscmem_free(pci,risc);
	if (NULL == risc->cpu) {
		cpu = pci_alloc_consistent(pci, size, &dma);
		if (NULL == cpu)
			return -ENOMEM;
		risc->cpu  = cpu;
		risc->dma  = dma;
		risc->size = size;

		memcnt++;
		dprintk("btcx: riscmem alloc [%d] dma=%lx cpu=%p size=%d\n",
			memcnt, (unsigned long)dma, cpu, size);
	}
	memset(risc->cpu,0,risc->size);
	return 0;
}
int vnic_dev_fw_info(struct vnic_dev *vdev,
	struct vnic_devcmd_fw_info **fw_info)
{
	u64 a0, a1 = 0;
	int wait = 1000;
	int err = 0;

	if (!vdev->fw_info) {
		vdev->fw_info = pci_alloc_consistent(vdev->pdev,
			sizeof(struct vnic_devcmd_fw_info),
			&vdev->fw_info_pa);
		if (!vdev->fw_info)
			return -ENOMEM;

		a0 = vdev->fw_info_pa;

		/* only get fw_info once and cache it */
		err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
	}

	*fw_info = vdev->fw_info;

	return err;
}
Exemple #17
0
int wlTxRingAlloc(struct net_device *netdev)
{
    struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
    int num;
    UINT8 *mem = (UINT8 *) pci_alloc_consistent(wlpptr->pPciDev,
                 MAX_NUM_TX_RING_BYTES *NUM_OF_DESCRIPTOR_DATA,
                 &((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysTxRing);
    for(num =0; num < NUM_OF_DESCRIPTOR_DATA; num++)
    {

        WLDBG_ENTER_INFO(DBG_LEVEL_12, "allocating %i (0x%x) bytes",MAX_NUM_TX_RING_BYTES, MAX_NUM_TX_RING_BYTES);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing =(wltxdesc_t *) (mem +num*MAX_NUM_TX_RING_BYTES);
        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing = (dma_addr_t)((UINT32)((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pPhysTxRing+num*MAX_NUM_TX_RING_BYTES);
        if (((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing == NULL)
        {
            WLDBG_ERROR(DBG_LEVEL_12, "can not alloc mem");
            return FAIL;
        }
        memset(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing, 0x00, MAX_NUM_TX_RING_BYTES);
        WLDBG_EXIT_INFO(DBG_LEVEL_12, "TX ring vaddr: 0x%x paddr: 0x%x",
                        ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pTxRing, ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[num].pPhysTxRing);
    }
    return SUCCESS;
}
Exemple #18
0
 /**
  * pm8001_mem_alloc - allocate memory for pm8001.
  * @pdev: pci device.
  * @virt_addr: the allocated virtual address
  * @pphys_addr_hi: the physical address high byte address.
  * @pphys_addr_lo: the physical address low byte address.
  * @mem_size: memory size.
  */
int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
	dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
	u32 *pphys_addr_lo, u32 mem_size, u32 align)
{
	caddr_t mem_virt_alloc;
	dma_addr_t mem_dma_handle;
	u64 phys_align;
	u64 align_offset = 0;
	if (align)
		align_offset = (dma_addr_t)align - 1;
	mem_virt_alloc =
		pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle);
	if (!mem_virt_alloc) {
		pm8001_printk("memory allocation error\n");
		return -1;
	}
	memset((void *)mem_virt_alloc, 0, mem_size+align);
	*pphys_addr = mem_dma_handle;
	phys_align = (*pphys_addr + align_offset) & ~align_offset;
	*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
	*pphys_addr_hi = upper_32_bits(phys_align);
	*pphys_addr_lo = lower_32_bits(phys_align);
	return 0;
}
Exemple #19
0
static int
be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
			uint8_t *data)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_dma_mem eeprom_cmd;
	struct be_cmd_resp_seeprom_read *resp;
	int status;

	if (!eeprom->len)
		return -EINVAL;

	eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);

	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
	eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
				&eeprom_cmd.dma);

	if (!eeprom_cmd.va) {
		dev_err(&adapter->pdev->dev,
			"Memory allocation failure. Could not read eeprom\n");
		return -ENOMEM;
	}

	status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);

	if (!status) {
		resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
		memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
	}
	pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
			eeprom_cmd.dma);

	return status;
}
Exemple #20
0
static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
{
	struct hw_fib * kfib;
	struct fib *fibptr;
	struct hw_fib * hw_fib = (struct hw_fib *)0;
	dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
	unsigned size;
	int retval;

	if (dev->in_reset) {
		return -EBUSY;
	}
	fibptr = aac_fib_alloc(dev);
	if(fibptr == NULL) {
		return -ENOMEM;
	}

	kfib = fibptr->hw_fib_va;
	
	if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
		aac_fib_free(fibptr);
		return -EFAULT;
	}
	
	size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
	if (size < le16_to_cpu(kfib->header.SenderSize))
		size = le16_to_cpu(kfib->header.SenderSize);
	if (size > dev->max_fib_size) {
		dma_addr_t daddr;

		if (size > 2048) {
			retval = -EINVAL;
			goto cleanup;
		}

		kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
		if (!kfib) {
			retval = -ENOMEM;
			goto cleanup;
		}

		
		hw_fib = fibptr->hw_fib_va;
		hw_fib_pa = fibptr->hw_fib_pa;
		fibptr->hw_fib_va = kfib;
		fibptr->hw_fib_pa = daddr;
		memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
		memcpy(kfib, hw_fib, dev->max_fib_size);
	}

	if (copy_from_user(kfib, arg, size)) {
		retval = -EFAULT;
		goto cleanup;
	}

	if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
		aac_adapter_interrupt(dev);
		
		kfib->header.XferState = 0;
	} else {
		retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
				le16_to_cpu(kfib->header.Size) , FsaNormal,
				1, 1, NULL, NULL);
		if (retval) {
			goto cleanup;
		}
		if (aac_fib_complete(fibptr) != 0) {
			retval = -EINVAL;
			goto cleanup;
		}
	}
	

	retval = 0;
	if (copy_to_user(arg, (void *)kfib, size))
		retval = -EFAULT;
cleanup:
	if (hw_fib) {
		pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
		fibptr->hw_fib_pa = hw_fib_pa;
		fibptr->hw_fib_va = hw_fib;
	}
	if (retval != -EINTR)
		aac_fib_free(fibptr);
	return retval;
}
static int cx25821_upstream_buffer_prepare_ch2(struct cx25821_dev *dev,
					       struct sram_channel *sram_ch,
					       int bpl)
{
	int ret = 0;
	dma_addr_t dma_addr;
	dma_addr_t data_dma_addr;

	if (dev->_dma_virt_addr_ch2 != NULL) {
		pci_free_consistent(dev->pci, dev->upstream_riscbuf_size_ch2,
				    dev->_dma_virt_addr_ch2,
				    dev->_dma_phys_addr_ch2);
	}

	dev->_dma_virt_addr_ch2 = pci_alloc_consistent(dev->pci,
			dev->upstream_riscbuf_size_ch2, &dma_addr);
	dev->_dma_virt_start_addr_ch2 = dev->_dma_virt_addr_ch2;
	dev->_dma_phys_start_addr_ch2 = dma_addr;
	dev->_dma_phys_addr_ch2 = dma_addr;
	dev->_risc_size_ch2 = dev->upstream_riscbuf_size_ch2;

	if (!dev->_dma_virt_addr_ch2) {
		pr_err("FAILED to allocate memory for Risc buffer! Returning\n");
		return -ENOMEM;
	}

	/* Iniitize at this address until n bytes to 0 */
	memset(dev->_dma_virt_addr_ch2, 0, dev->_risc_size_ch2);

	if (dev->_data_buf_virt_addr_ch2 != NULL) {
		pci_free_consistent(dev->pci, dev->upstream_databuf_size_ch2,
				    dev->_data_buf_virt_addr_ch2,
				    dev->_data_buf_phys_addr_ch2);
	}
	/* For Video Data buffer allocation */
	dev->_data_buf_virt_addr_ch2 = pci_alloc_consistent(dev->pci,
			dev->upstream_databuf_size_ch2, &data_dma_addr);
	dev->_data_buf_phys_addr_ch2 = data_dma_addr;
	dev->_data_buf_size_ch2 = dev->upstream_databuf_size_ch2;

	if (!dev->_data_buf_virt_addr_ch2) {
		pr_err("FAILED to allocate memory for data buffer! Returning\n");
		return -ENOMEM;
	}

	/* Initialize at this address until n bytes to 0 */
	memset(dev->_data_buf_virt_addr_ch2, 0, dev->_data_buf_size_ch2);

	ret = cx25821_openfile_ch2(dev, sram_ch);
	if (ret < 0)
		return ret;

	/* Creating RISC programs */
	ret = cx25821_risc_buffer_upstream_ch2(dev, dev->pci, 0, bpl,
						dev->_lines_count_ch2);
	if (ret < 0) {
		pr_info("Failed creating Video Upstream Risc programs!\n");
		goto error;
	}

	return 0;

error:
	return ret;
}
Exemple #22
0
/*
 * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
 * rq.max_gs and sq.max_gs must all be assigned.
 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
 * queue)
 */
static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
			       struct mthca_pd *pd,
			       struct mthca_qp *qp)
{
	int size;
	int i;
	int npages, shift;
	dma_addr_t t;
	u64 *dma_list = NULL;
	int err = -ENOMEM;

	size = sizeof (struct mthca_next_seg) +
		qp->rq.max_gs * sizeof (struct mthca_data_seg);

	for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
	     qp->rq.wqe_shift++)
		; /* nothing */

	size = sizeof (struct mthca_next_seg) +
		qp->sq.max_gs * sizeof (struct mthca_data_seg);
	if (qp->transport == MLX)
		size += 2 * sizeof (struct mthca_data_seg);
	else if (qp->transport == UD)
		size += sizeof (struct mthca_ud_seg);
	else /* bind seg is as big as atomic + raddr segs */
		size += sizeof (struct mthca_bind_seg);

	for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
	     qp->sq.wqe_shift++)
		; /* nothing */

	qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
				    1 << qp->sq.wqe_shift);
	size = PAGE_ALIGN(qp->send_wqe_offset +
			  (qp->sq.max << qp->sq.wqe_shift));

	qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
			   GFP_KERNEL);
	if (!qp->wrid)
		goto err_out;

	if (size <= MTHCA_MAX_DIRECT_QP_SIZE) {
		qp->is_direct = 1;
		npages = 1;
		shift = get_order(size) + PAGE_SHIFT;

		if (0)
			mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
				  size, shift);

		qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t);
		if (!qp->queue.direct.buf)
			goto err_out;

		pci_unmap_addr_set(&qp->queue.direct, mapping, t);

		memset(qp->queue.direct.buf, 0, size);

		while (t & ((1 << shift) - 1)) {
			--shift;
			npages *= 2;
		}

		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
		if (!dma_list)
			goto err_out_free;

		for (i = 0; i < npages; ++i)
			dma_list[i] = t + i * (1 << shift);
	} else {
		qp->is_direct = 0;
		npages = size / PAGE_SIZE;
		shift = PAGE_SHIFT;

		if (0)
			mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);

		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
		if (!dma_list)
			goto err_out;

		qp->queue.page_list = kmalloc(npages *
					      sizeof *qp->queue.page_list,
					      GFP_KERNEL);
		if (!qp->queue.page_list)
			goto err_out;

		for (i = 0; i < npages; ++i) {
			qp->queue.page_list[i].buf =
				pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
			if (!qp->queue.page_list[i].buf)
				goto err_out_free;

			memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);

			pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
			dma_list[i] = t;
		}
	}

	err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
				  npages, 0, size,
				  MTHCA_MPT_FLAG_LOCAL_WRITE |
				  MTHCA_MPT_FLAG_LOCAL_READ,
				  &qp->mr);
	if (err)
		goto err_out_free;

	kfree(dma_list);
	return 0;

 err_out_free:
	if (qp->is_direct) {
		pci_free_consistent(dev->pdev, size,
				    qp->queue.direct.buf,
				    pci_unmap_addr(&qp->queue.direct, mapping));
	} else
		for (i = 0; i < npages; ++i) {
			if (qp->queue.page_list[i].buf)
				pci_free_consistent(dev->pdev, PAGE_SIZE,
						    qp->queue.page_list[i].buf,
						    pci_unmap_addr(&qp->queue.page_list[i],
								   mapping));

		}

 err_out:
	kfree(qp->wrid);
	kfree(dma_list);
	return err;
}
Exemple #23
0
static void load_code(struct icom_port *icom_port)
{
	const struct firmware *fw;
	char __iomem *iram_ptr;
	int index;
	int status = 0;
	void __iomem *dram_ptr = icom_port->dram;
	dma_addr_t temp_pci;
	unsigned char *new_page = NULL;
	unsigned char cable_id = NO_CABLE;
	struct pci_dev *dev = icom_port->adapter->pci_dev;

	/* Clear out any pending interrupts */
	writew(0x3FFF, icom_port->int_reg);

	trace(icom_port, "CLEAR_INTERRUPTS", 0);

	/* Stop processor */
	stop_processor(icom_port);

	/* Zero out DRAM */
	memset_io(dram_ptr, 0, 512);

	/* Load Call Setup into Adapter */
	if (request_firmware(&fw, "icom_call_setup.bin", &dev->dev) < 0) {
		dev_err(&dev->dev,"Unable to load icom_call_setup.bin firmware image\n");
		status = -1;
		goto load_code_exit;
	}

	if (fw->size > ICOM_DCE_IRAM_OFFSET) {
		dev_err(&dev->dev, "Invalid firmware image for icom_call_setup.bin found.\n");
		release_firmware(fw);
		status = -1;
		goto load_code_exit;
	}

	iram_ptr = (char __iomem *)icom_port->dram + ICOM_IRAM_OFFSET;
	for (index = 0; index < fw->size; index++)
		writeb(fw->data[index], &iram_ptr[index]);

	release_firmware(fw);

	/* Load Resident DCE portion of Adapter */
	if (request_firmware(&fw, "icom_res_dce.bin", &dev->dev) < 0) {
		dev_err(&dev->dev,"Unable to load icom_res_dce.bin firmware image\n");
		status = -1;
		goto load_code_exit;
	}

	if (fw->size > ICOM_IRAM_SIZE) {
		dev_err(&dev->dev, "Invalid firmware image for icom_res_dce.bin found.\n");
		release_firmware(fw);
		status = -1;
		goto load_code_exit;
	}

	iram_ptr = (char __iomem *) icom_port->dram + ICOM_IRAM_OFFSET;
	for (index = ICOM_DCE_IRAM_OFFSET; index < fw->size; index++)
		writeb(fw->data[index], &iram_ptr[index]);

	release_firmware(fw);

	/* Set Hardware level */
	if ((icom_port->adapter->version | ADAPTER_V2) == ADAPTER_V2)
		writeb(V2_HARDWARE, &(icom_port->dram->misc_flags));

	/* Start the processor in Adapter */
	start_processor(icom_port);

	writeb((HDLC_PPP_PURE_ASYNC | HDLC_FF_FILL),
	       &(icom_port->dram->HDLCConfigReg));
	writeb(0x04, &(icom_port->dram->FlagFillIdleTimer));	/* 0.5 seconds */
	writeb(0x00, &(icom_port->dram->CmdReg));
	writeb(0x10, &(icom_port->dram->async_config3));
	writeb((ICOM_ACFG_DRIVE1 | ICOM_ACFG_NO_PARITY | ICOM_ACFG_8BPC |
		ICOM_ACFG_1STOP_BIT), &(icom_port->dram->async_config2));

	/*Set up data in icom DRAM to indicate where personality
	 *code is located and its length.
	 */
	new_page = pci_alloc_consistent(dev, 4096, &temp_pci);

	if (!new_page) {
		dev_err(&dev->dev, "Can not allocate DMA buffer\n");
		status = -1;
		goto load_code_exit;
	}

	if (request_firmware(&fw, "icom_asc.bin", &dev->dev) < 0) {
		dev_err(&dev->dev,"Unable to load icom_asc.bin firmware image\n");
		status = -1;
		goto load_code_exit;
	}

	if (fw->size > ICOM_DCE_IRAM_OFFSET) {
		dev_err(&dev->dev, "Invalid firmware image for icom_asc.bin found.\n");
		release_firmware(fw);
		status = -1;
		goto load_code_exit;
	}

	for (index = 0; index < fw->size; index++)
		new_page[index] = fw->data[index];

	release_firmware(fw);

	writeb((char) ((fw->size + 16)/16), &icom_port->dram->mac_length);
	writel(temp_pci, &icom_port->dram->mac_load_addr);

	/*Setting the syncReg to 0x80 causes adapter to start downloading
	   the personality code into adapter instruction RAM.
	   Once code is loaded, it will begin executing and, based on
	   information provided above, will start DMAing data from
	   shared memory to adapter DRAM.
	 */
	/* the wait loop below verifies this write operation has been done
	   and processed
	*/
	writeb(START_DOWNLOAD, &icom_port->dram->sync);

	/* Wait max 1 Sec for data download and processor to start */
	for (index = 0; index < 10; index++) {
		msleep(100);
		if (readb(&icom_port->dram->misc_flags) & ICOM_HDW_ACTIVE)
			break;
	}

	if (index == 10)
		status = -1;

	/*
	 * check Cable ID
	 */
	cable_id = readb(&icom_port->dram->cable_id);

	if (cable_id & ICOM_CABLE_ID_VALID) {
		/* Get cable ID into the lower 4 bits (standard form) */
		cable_id = (cable_id & ICOM_CABLE_ID_MASK) >> 4;
		icom_port->cable_id = cable_id;
	} else {
Exemple #24
0
static int __devinit get_port_memory(struct icom_port *icom_port)
{
	int index;
	unsigned long stgAddr;
	unsigned long startStgAddr;
	unsigned long offset;
	struct pci_dev *dev = icom_port->adapter->pci_dev;

	icom_port->xmit_buf =
	    pci_alloc_consistent(dev, 4096, &icom_port->xmit_buf_pci);
	if (!icom_port->xmit_buf) {
		dev_err(&dev->dev, "Can not allocate Transmit buffer\n");
		return -ENOMEM;
	}

	trace(icom_port, "GET_PORT_MEM",
	      (unsigned long) icom_port->xmit_buf);

	icom_port->recv_buf =
	    pci_alloc_consistent(dev, 4096, &icom_port->recv_buf_pci);
	if (!icom_port->recv_buf) {
		dev_err(&dev->dev, "Can not allocate Receive buffer\n");
		free_port_memory(icom_port);
		return -ENOMEM;
	}
	trace(icom_port, "GET_PORT_MEM",
	      (unsigned long) icom_port->recv_buf);

	icom_port->statStg =
	    pci_alloc_consistent(dev, 4096, &icom_port->statStg_pci);
	if (!icom_port->statStg) {
		dev_err(&dev->dev, "Can not allocate Status buffer\n");
		free_port_memory(icom_port);
		return -ENOMEM;
	}
	trace(icom_port, "GET_PORT_MEM",
	      (unsigned long) icom_port->statStg);

	icom_port->xmitRestart =
	    pci_alloc_consistent(dev, 4096, &icom_port->xmitRestart_pci);
	if (!icom_port->xmitRestart) {
		dev_err(&dev->dev,
			"Can not allocate xmit Restart buffer\n");
		free_port_memory(icom_port);
		return -ENOMEM;
	}

	memset(icom_port->statStg, 0, 4096);

	/* FODs: Frame Out Descriptor Queue, this is a FIFO queue that
           indicates that frames are to be transmitted
	*/

	stgAddr = (unsigned long) icom_port->statStg;
	for (index = 0; index < NUM_XBUFFS; index++) {
		trace(icom_port, "FOD_ADDR", stgAddr);
		stgAddr = stgAddr + sizeof(icom_port->statStg->xmit[0]);
		if (index < (NUM_XBUFFS - 1)) {
			memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
			icom_port->statStg->xmit[index].leLengthASD =
			    (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
			trace(icom_port, "FOD_ADDR", stgAddr);
			trace(icom_port, "FOD_XBUFF",
			      (unsigned long) icom_port->xmit_buf);
			icom_port->statStg->xmit[index].leBuffer =
			    cpu_to_le32(icom_port->xmit_buf_pci);
		} else if (index == (NUM_XBUFFS - 1)) {
			memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
			icom_port->statStg->xmit[index].leLengthASD =
			    (unsigned short int) cpu_to_le16(XMIT_BUFF_SZ);
			trace(icom_port, "FOD_XBUFF",
			      (unsigned long) icom_port->xmit_buf);
			icom_port->statStg->xmit[index].leBuffer =
			    cpu_to_le32(icom_port->xmit_buf_pci);
		} else {
			memset(&icom_port->statStg->xmit[index], 0, sizeof(struct xmit_status_area));
		}
	}
	/* FIDs */
	startStgAddr = stgAddr;

	/* fill in every entry, even if no buffer */
	for (index = 0; index <  NUM_RBUFFS; index++) {
		trace(icom_port, "FID_ADDR", stgAddr);
		stgAddr = stgAddr + sizeof(icom_port->statStg->rcv[0]);
		icom_port->statStg->rcv[index].leLength = 0;
		icom_port->statStg->rcv[index].WorkingLength =
		    (unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
		if (index < (NUM_RBUFFS - 1) ) {
			offset = stgAddr - (unsigned long) icom_port->statStg;
			icom_port->statStg->rcv[index].leNext =
			      cpu_to_le32(icom_port-> statStg_pci + offset);
			trace(icom_port, "FID_RBUFF",
			      (unsigned long) icom_port->recv_buf);
			icom_port->statStg->rcv[index].leBuffer =
			    cpu_to_le32(icom_port->recv_buf_pci);
		} else if (index == (NUM_RBUFFS -1) ) {
			offset = startStgAddr - (unsigned long) icom_port->statStg;
			icom_port->statStg->rcv[index].leNext =
			    cpu_to_le32(icom_port-> statStg_pci + offset);
			trace(icom_port, "FID_RBUFF",
			      (unsigned long) icom_port->recv_buf + 2048);
			icom_port->statStg->rcv[index].leBuffer =
			    cpu_to_le32(icom_port->recv_buf_pci + 2048);
		} else {
			icom_port->statStg->rcv[index].leNext = 0;
			icom_port->statStg->rcv[index].leBuffer = 0;
		}
	}

	return 0;
}
Exemple #25
0
int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
                                    struct sram_channel *sram_ch, int bpl)
{
    int ret = 0;
    dma_addr_t dma_addr;
    dma_addr_t data_dma_addr;

    if (dev->_dma_virt_addr != NULL)
        pci_free_consistent(dev->pci, dev->upstream_riscbuf_size,
                            dev->_dma_virt_addr, dev->_dma_phys_addr);

    dev->_dma_virt_addr = pci_alloc_consistent(dev->pci,
                          dev->upstream_riscbuf_size, &dma_addr);
    dev->_dma_virt_start_addr = dev->_dma_virt_addr;
    dev->_dma_phys_start_addr = dma_addr;
    dev->_dma_phys_addr = dma_addr;
    dev->_risc_size = dev->upstream_riscbuf_size;

    if (!dev->_dma_virt_addr) {
        pr_err("FAILED to allocate memory for Risc buffer! Returning\n");
        return -ENOMEM;
    }

    /* Clear memory at address */
    memset(dev->_dma_virt_addr, 0, dev->_risc_size);

    if (dev->_data_buf_virt_addr != NULL)
        pci_free_consistent(dev->pci, dev->upstream_databuf_size,
                            dev->_data_buf_virt_addr,
                            dev->_data_buf_phys_addr);
    /* For Video Data buffer allocation */
    dev->_data_buf_virt_addr = pci_alloc_consistent(dev->pci,
                               dev->upstream_databuf_size, &data_dma_addr);
    dev->_data_buf_phys_addr = data_dma_addr;
    dev->_data_buf_size = dev->upstream_databuf_size;

    if (!dev->_data_buf_virt_addr) {
        pr_err("FAILED to allocate memory for data buffer! Returning\n");
        return -ENOMEM;
    }

    /* Clear memory at address */
    memset(dev->_data_buf_virt_addr, 0, dev->_data_buf_size);

    ret = cx25821_openfile(dev, sram_ch);
    if (ret < 0)
        return ret;

    /* Create RISC programs */
    ret = cx25821_risc_buffer_upstream(dev, dev->pci, 0, bpl,
                                       dev->_lines_count);
    if (ret < 0) {
        pr_info("Failed creating Video Upstream Risc programs!\n");
        goto error;
    }

    return 0;

error:
    return ret;
}
Exemple #26
0
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct net_device *dev;
	struct netdev_private *np;
	static int card_idx;
	int chip_idx = ent->driver_data;
	int err, irq;
	void __iomem *ioaddr;
	static int version_printed;
	void *ring_space;
	dma_addr_t ring_dma;

	if (!version_printed++)
		printk ("%s", version);

	err = pci_enable_device (pdev);
	if (err)
		return err;

	irq = pdev->irq;
	err = pci_request_regions (pdev, "dl2k");
	if (err)
		goto err_out_disable;

	pci_set_master (pdev);

	err = -ENOMEM;

	dev = alloc_etherdev (sizeof (*np));
	if (!dev)
		goto err_out_res;
	SET_NETDEV_DEV(dev, &pdev->dev);

	np = netdev_priv(dev);

	/* IO registers range. */
	ioaddr = pci_iomap(pdev, 0, 0);
	if (!ioaddr)
		goto err_out_dev;
	np->eeprom_addr = ioaddr;

#ifdef MEM_MAPPING
	/* MM registers range. */
	ioaddr = pci_iomap(pdev, 1, 0);
	if (!ioaddr)
		goto err_out_iounmap;
#endif
	np->ioaddr = ioaddr;
	np->chip_id = chip_idx;
	np->pdev = pdev;
	spin_lock_init (&np->tx_lock);
	spin_lock_init (&np->rx_lock);

	/* Parse manual configuration */
	np->an_enable = 1;
	np->tx_coalesce = 1;
	if (card_idx < MAX_UNITS) {
		if (media[card_idx] != NULL) {
			np->an_enable = 0;
			if (strcmp (media[card_idx], "auto") == 0 ||
			    strcmp (media[card_idx], "autosense") == 0 ||
			    strcmp (media[card_idx], "0") == 0 ) {
				np->an_enable = 2;
			} else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
			    strcmp (media[card_idx], "4") == 0) {
				np->speed = 100;
				np->full_duplex = 1;
			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
				   strcmp (media[card_idx], "3") == 0) {
				np->speed = 100;
				np->full_duplex = 0;
			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
				   strcmp (media[card_idx], "2") == 0) {
				np->speed = 10;
				np->full_duplex = 1;
			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
				   strcmp (media[card_idx], "1") == 0) {
				np->speed = 10;
				np->full_duplex = 0;
			} else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
				 strcmp (media[card_idx], "6") == 0) {
				np->speed=1000;
				np->full_duplex=1;
			} else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
				 strcmp (media[card_idx], "5") == 0) {
				np->speed = 1000;
				np->full_duplex = 0;
			} else {
				np->an_enable = 1;
			}
		}
		if (jumbo[card_idx] != 0) {
			np->jumbo = 1;
			dev->mtu = MAX_JUMBO;
		} else {
			np->jumbo = 0;
			if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
				dev->mtu = mtu[card_idx];
		}
		np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
		    vlan[card_idx] : 0;
		if (rx_coalesce > 0 && rx_timeout > 0) {
			np->rx_coalesce = rx_coalesce;
			np->rx_timeout = rx_timeout;
			np->coalesce = 1;
		}
		np->tx_flow = (tx_flow == 0) ? 0 : 1;
		np->rx_flow = (rx_flow == 0) ? 0 : 1;

		if (tx_coalesce < 1)
			tx_coalesce = 1;
		else if (tx_coalesce > TX_RING_SIZE-1)
			tx_coalesce = TX_RING_SIZE - 1;
	}
	dev->netdev_ops = &netdev_ops;
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->ethtool_ops = &ethtool_ops;
#if 0
	dev->features = NETIF_F_IP_CSUM;
#endif
	pci_set_drvdata (pdev, dev);

	ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
	if (!ring_space)
		goto err_out_iounmap;
	np->tx_ring = ring_space;
	np->tx_ring_dma = ring_dma;

	ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
	if (!ring_space)
		goto err_out_unmap_tx;
	np->rx_ring = ring_space;
	np->rx_ring_dma = ring_dma;

	/* Parse eeprom data */
	parse_eeprom (dev);

	/* Find PHY address */
	err = find_miiphy (dev);
	if (err)
		goto err_out_unmap_rx;

	/* Fiber device? */
	np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
	np->link_status = 0;
	/* Set media and reset PHY */
	if (np->phy_media) {
		/* default Auto-Negotiation for fiber deivices */
	 	if (np->an_enable == 2) {
			np->an_enable = 1;
		}
		mii_set_media_pcs (dev);
	} else {
		/* Auto-Negotiation is mandatory for 1000BASE-T,
		   IEEE 802.3ab Annex 28D page 14 */
		if (np->speed == 1000)
			np->an_enable = 1;
		mii_set_media (dev);
	}

	err = register_netdev (dev);
	if (err)
		goto err_out_unmap_rx;

	card_idx++;

	printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
		dev->name, np->name, dev->dev_addr, irq);
	if (tx_coalesce > 1)
		printk(KERN_INFO "tx_coalesce:\t%d packets\n",
				tx_coalesce);
	if (np->coalesce)
		printk(KERN_INFO
		       "rx_coalesce:\t%d packets\n"
		       "rx_timeout: \t%d ns\n",
				np->rx_coalesce, np->rx_timeout*640);
	if (np->vlan)
		printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
	return 0;

err_out_unmap_rx:
	pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
err_out_unmap_tx:
	pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
err_out_iounmap:
#ifdef MEM_MAPPING
	pci_iounmap(pdev, np->ioaddr);
#endif
	pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
	free_netdev (dev);
err_out_res:
	pci_release_regions (pdev);
err_out_disable:
	pci_disable_device (pdev);
	return err;
}
Exemple #27
0
static int hififo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
    int i;
    int rc;
    dev_t dev = 0;
    char tmpstr[16];
    struct hififo_dev *drvdata;
    struct hififo_fifo * fifo;

    drvdata = devm_kzalloc(&pdev->dev,
                           sizeof(struct hififo_dev),
                           GFP_KERNEL);
    if (!drvdata) {
        printk(KERN_ERR DEVICE_NAME "failed to alloc drvdata\n");
        return -ENOMEM;
    }

    rc = pcim_enable_device(pdev);
    if(rc) {
        printk(KERN_ERR DEVICE_NAME ": pcim_enable_device() failed\n");
        return rc;
    }

    rc = pci_request_regions(pdev, DEVICE_NAME);
    if(rc < 0) {
        printk(KERN_ERR DEVICE_NAME ": pci_request_regions() failed\n");
        return rc;
    }

    printk(KERN_INFO DEVICE_NAME\
           ": Found Harmon Instruments PCI Express interface board\n");
    pci_set_drvdata(pdev, drvdata);

    pci_set_master(pdev); /* returns void */

    pci_set_dma_mask(pdev, 0xFFFFFFFFFFFFFFFF);

    pci_set_consistent_dma_mask(pdev, 0xFFFFFFFFFFFFFFFF);

    rc = pci_enable_msi(pdev);
    if(rc < 0) {
        printk(KERN_ERR DEVICE_NAME ": pci_enable_msi() failed\n");
        return rc;
    }

    rc = devm_request_irq(&pdev->dev,
                          pdev->irq,
                          (irq_handler_t) hififo_interrupt,
                          0, /* flags */
                          DEVICE_NAME,
                          drvdata);
    if(rc) {
        printk(KERN_ERR DEVICE_NAME ": request_irq() failed\n");
        return rc;
    }

    drvdata->pio_reg_base = (u64 *) pcim_iomap(pdev, 0, 0);
    printk(KERN_INFO DEVICE_NAME					\
           ": pci_resource_start(dev, 0) = 0x%.8llx, virt = 0x%.16llx\n",
           (u64) pci_resource_start(pdev, 0),
           (u64) drvdata->pio_reg_base);

    for(i=0; i<8; i++)
        printk("bar0[%d] = %.8x\n", i, (u32) readreg(drvdata, i));
    drvdata->idreg = readreg(drvdata, REG_ID);
    drvdata->build = readreg(drvdata, REG_BUILD);
    printk(KERN_INFO DEVICE_NAME " FPGA build = 0x%.8X\n", drvdata->build);
    drvdata->nfifos = 0;
    for(i=0; i<MAX_FIFOS; i++) {
        if(drvdata->idreg & (1 << i))
            drvdata->nfifos ++;
    }

    if(drvdata->nfifos == 0) {
        printk(KERN_INFO DEVICE_NAME "no fifos reported on card\n");
        return -1;
    }

    /* reset it */
    writereg(drvdata, 0xFFFF, REG_RESET_SET);
    udelay(10); /* wait for completion of anything that was running */
    writereg(drvdata, 0xFFFF, REG_RESET_CLEAR);

    rc = alloc_chrdev_region(&dev, 0, drvdata->nfifos, DEVICE_NAME);
    if (rc) {
        printk(KERN_ERR DEVICE_NAME ": alloc_chrdev_region() failed\n");
        return rc;
    }

    drvdata->major = MAJOR(dev);

    for(i=0; i<MAX_FIFOS; i++) {
        if((drvdata->idreg & (1 << i)) == 0)
            continue; /* fifo not present */
        fifo = devm_kzalloc(&pdev->dev,
                            sizeof(struct hififo_fifo),
                            GFP_KERNEL);
        if (!fifo) {
            printk(KERN_ERR DEVICE_NAME\
                   "failed to alloc hififo_fifo\n");
            return -ENOMEM;
        }
        drvdata->fifo[i] = fifo;
        if(i<MAX_FIFOS/2) {
            cdev_init(&fifo->cdev, &fops_fpc); /* returns void */
            fifo->cdev.ops = &fops_fpc;
        }
        else {
            cdev_init(&fifo->cdev, &fops_tpc); /* returns void */
            fifo->cdev.ops = &fops_tpc;
        }

        fifo->cdev.owner = THIS_MODULE;

        rc = cdev_add (&fifo->cdev, MKDEV(MAJOR(dev), i), 1);
        if (rc) {
            printk(KERN_NOTICE DEVICE_NAME\
                   ": Error %d adding cdev\n", rc);
            return rc;
        }
        sprintf(tmpstr, "hififo_%d_%d", hififo_count, i);
        device_create(hififo_class,
                      NULL,
                      MKDEV(MAJOR(dev), i),
                      NULL,
                      tmpstr);
        fifo->n = i;
        spin_lock_init(&fifo->lock_open);
        fifo->local_base = drvdata->pio_reg_base+8+i;
        init_waitqueue_head(&fifo->queue);
        fifo->build = drvdata->build;
        mutex_init(&fifo->sem);
        fifo->ring = pci_alloc_consistent(pdev,
                                          BUFFER_SIZE,
                                          &fifo->ring_dma_addr);
    }
    hififo_count++;
    /* enable interrupts */
    writereg(drvdata, 0xFFFF, REG_INTERRUPT);
    return 0;
}
Exemple #28
0
static int p54p_probe(struct pci_dev *pdev,
				const struct pci_device_id *id)
{
	struct p54p_priv *priv;
	struct ieee80211_hw *dev;
	unsigned long mem_addr, mem_len;
	int err;

	pci_dev_get(pdev);
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
		return err;
	}

	mem_addr = pci_resource_start(pdev, 0);
	mem_len = pci_resource_len(pdev, 0);
	if (mem_len < sizeof(struct p54p_csr)) {
		dev_err(&pdev->dev, "Too short PCI resources\n");
		err = -ENODEV;
		goto err_disable_dev;
	}

	err = pci_request_regions(pdev, "p54pci");
	if (err) {
		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
		goto err_disable_dev;
	}

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err) {
		dev_err(&pdev->dev, "No suitable DMA available\n");
		goto err_free_reg;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	pci_write_config_byte(pdev, 0x40, 0);
	pci_write_config_byte(pdev, 0x41, 0);

	dev = p54_init_common(sizeof(*priv));
	if (!dev) {
		dev_err(&pdev->dev, "ieee80211 alloc failed\n");
		err = -ENOMEM;
		goto err_free_reg;
	}

	priv = dev->priv;
	priv->pdev = pdev;

	init_completion(&priv->fw_loaded);
	SET_IEEE80211_DEV(dev, &pdev->dev);
	pci_set_drvdata(pdev, dev);

	priv->map = ioremap(mem_addr, mem_len);
	if (!priv->map) {
		dev_err(&pdev->dev, "Cannot map device memory\n");
		err = -ENOMEM;
		goto err_free_dev;
	}

	priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
						  &priv->ring_control_dma);
	if (!priv->ring_control) {
		dev_err(&pdev->dev, "Cannot allocate rings\n");
		err = -ENOMEM;
		goto err_iounmap;
	}
	priv->common.open = p54p_open;
	priv->common.stop = p54p_stop;
	priv->common.tx = p54p_tx;

	spin_lock_init(&priv->lock);
	tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);

	err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
				      &priv->pdev->dev, GFP_KERNEL,
				      priv, p54p_firmware_step2);
	if (!err)
		return 0;

	pci_free_consistent(pdev, sizeof(*priv->ring_control),
			    priv->ring_control, priv->ring_control_dma);

 err_iounmap:
	iounmap(priv->map);

 err_free_dev:
	p54_free_common(dev);

 err_free_reg:
	pci_release_regions(pdev);
 err_disable_dev:
	pci_disable_device(pdev);
	pci_dev_put(pdev);
	return err;
}
Exemple #29
0
static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
{
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_rx_desc *entry = NULL;
	int i, rx_queue_idx;
	u8 tmp_one = 1;

	/*
	 *rx_queue_idx 0:RX_MPDU_QUEUE
	 *rx_queue_idx 1:RX_CMD_QUEUE
	 */
	for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
	     rx_queue_idx++) {
		rtlpci->rx_ring[rx_queue_idx].desc =
		    pci_alloc_consistent(rtlpci->pdev,
					 sizeof(*rtlpci->rx_ring[rx_queue_idx].
						desc) * rtlpci->rxringcount,
					 &rtlpci->rx_ring[rx_queue_idx].dma);

		if (!rtlpci->rx_ring[rx_queue_idx].desc ||
		    (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
				 ("Cannot allocate RX ring\n"));
			return -ENOMEM;
		}

		memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
		       sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
		       rtlpci->rxringcount);

		rtlpci->rx_ring[rx_queue_idx].idx = 0;

		for (i = 0; i < rtlpci->rxringcount; i++) {
			struct sk_buff *skb =
			    dev_alloc_skb(rtlpci->rxbuffersize);
			u32 bufferaddress;
			if (!skb)
				return 0;
			entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];

			/*skb->dev = dev; */

			rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;

			/*
			 *just set skb->cb to mapping addr
			 *for pci_unmap_single use
			 */
			*((dma_addr_t *) skb->cb) =
			    pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
					   rtlpci->rxbuffersize,
					   PCI_DMA_FROMDEVICE);

			bufferaddress = (u32)(*((dma_addr_t *)skb->cb));
			rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
						    HW_DESC_RXBUFF_ADDR,
						    (u8 *)&bufferaddress);
			rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
						    HW_DESC_RXPKT_LEN,
						    (u8 *)&rtlpci->
						    rxbuffersize);
			rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
						    HW_DESC_RXOWN,
						    (u8 *)&tmp_one);
		}

		rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
					    HW_DESC_RXERO, (u8 *)&tmp_one);
	}
	return 0;
}
Exemple #30
0
static int vbi_workaround(struct saa7146_dev *dev)
{
	struct saa7146_vv *vv = dev->vv_data;

	u32          *cpu;
	dma_addr_t   dma_addr;

	int count = 0;
	int i;

	DECLARE_WAITQUEUE(wait, current);

	DEB_VBI("dev:%p\n", dev);

	/* once again, a bug in the saa7146: the brs acquisition
	   is buggy and especially the BXO-counter does not work
	   as specified. there is this workaround, but please
	   don't let me explain it. ;-) */

	cpu = pci_alloc_consistent(dev->pci, 4096, &dma_addr);
	if (NULL == cpu)
		return -ENOMEM;

	/* setup some basic programming, just for the workaround */
	saa7146_write(dev, BASE_EVEN3,	dma_addr);
	saa7146_write(dev, BASE_ODD3,	dma_addr+vbi_pixel_to_capture);
	saa7146_write(dev, PROT_ADDR3,	dma_addr+4096);
	saa7146_write(dev, PITCH3,	vbi_pixel_to_capture);
	saa7146_write(dev, BASE_PAGE3,	0x0);
	saa7146_write(dev, NUM_LINE_BYTE3, (2<<16)|((vbi_pixel_to_capture)<<0));
	saa7146_write(dev, MC2, MASK_04|MASK_20);

	/* load brs-control register */
	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
	/* BXO = 1h, BRS to outbound */
	WRITE_RPS1(0xc000008c);
	/* wait for vbi_a or vbi_b*/
	if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
		DEB_D("...using port b\n");
		WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_E_FID_B);
		WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_O_FID_B);
/*
		WRITE_RPS1(CMD_PAUSE | MASK_09);
*/
	} else {
		DEB_D("...using port a\n");
		WRITE_RPS1(CMD_PAUSE | MASK_10);
	}
	/* upload brs */
	WRITE_RPS1(CMD_UPLOAD | MASK_08);
	/* load brs-control register */
	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
	/* BYO = 1, BXO = NQBIL (=1728 for PAL, for NTSC this is 858*2) - NumByte3 (=1440) = 288 */
	WRITE_RPS1(((1728-(vbi_pixel_to_capture)) << 7) | MASK_19);
	/* wait for brs_done */
	WRITE_RPS1(CMD_PAUSE | MASK_08);
	/* upload brs */
	WRITE_RPS1(CMD_UPLOAD | MASK_08);
	/* load video-dma3 NumLines3 and NumBytes3 */
	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (NUM_LINE_BYTE3/4));
	/* dev->vbi_count*2 lines, 720 pixel (= 1440 Bytes) */
	WRITE_RPS1((2 << 16) | (vbi_pixel_to_capture));
	/* load brs-control register */
	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
	/* Set BRS right: note: this is an experimental value for BXO (=> PAL!) */
	WRITE_RPS1((540 << 7) | (5 << 19));  // 5 == vbi_start
	/* wait for brs_done */
	WRITE_RPS1(CMD_PAUSE | MASK_08);
	/* upload brs and video-dma3*/
	WRITE_RPS1(CMD_UPLOAD | MASK_08 | MASK_04);
	/* load mc2 register: enable dma3 */
	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC1/4));
	WRITE_RPS1(MASK_20 | MASK_04);
	/* generate interrupt */
	WRITE_RPS1(CMD_INTERRUPT);
	/* stop rps1 */
	WRITE_RPS1(CMD_STOP);

	/* we have to do the workaround twice to be sure that
	   everything is ok */
	for(i = 0; i < 2; i++) {

		/* indicate to the irq handler that we do the workaround */
		saa7146_write(dev, MC2, MASK_31|MASK_15);

		saa7146_write(dev, NUM_LINE_BYTE3, (1<<16)|(2<<0));
		saa7146_write(dev, MC2, MASK_04|MASK_20);

		/* enable rps1 irqs */
		SAA7146_IER_ENABLE(dev,MASK_28);

		/* prepare to wait to be woken up by the irq-handler */
		add_wait_queue(&vv->vbi_wq, &wait);
		current->state = TASK_INTERRUPTIBLE;

		/* start rps1 to enable workaround */
		saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
		saa7146_write(dev, MC1, (MASK_13 | MASK_29));

		schedule();

		DEB_VBI("brs bug workaround %d/1\n", i);

		remove_wait_queue(&vv->vbi_wq, &wait);
		current->state = TASK_RUNNING;

		/* disable rps1 irqs */
		SAA7146_IER_DISABLE(dev,MASK_28);

		/* stop video-dma3 */
		saa7146_write(dev, MC1, MASK_20);

		if(signal_pending(current)) {

			DEB_VBI("aborted (rps:0x%08x)\n",
				saa7146_read(dev, RPS_ADDR1));

			/* stop rps1 for sure */
			saa7146_write(dev, MC1, MASK_29);

			pci_free_consistent(dev->pci, 4096, cpu, dma_addr);
			return -EINTR;
		}
	}

	pci_free_consistent(dev->pci, 4096, cpu, dma_addr);
	return 0;
}