示例#1
0
/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
			   dma_addr_t *dma_handle, gfp_t flag)
{
	return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
			device_to_mask(hwdev), flag,
			pcibus_to_node(to_pci_dev(hwdev)->bus));
}
示例#2
0
文件: pci_dma.c 项目: 274914765/C
/**
 * sn_dma_alloc_coherent - allocate memory for coherent DMA
 * @dev: device to allocate for
 * @size: size of the region
 * @dma_handle: DMA (bus) address
 * @flags: memory allocation flags
 *
 * dma_alloc_coherent() returns a pointer to a memory region suitable for
 * coherent DMA traffic to/from a PCI device.  On SN platforms, this means
 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
 *
 * This interface is usually used for "command" streams (e.g. the command
 * queue for a SCSI controller).  See Documentation/DMA-API.txt for
 * more information.
 */
void *sn_dma_alloc_coherent(struct device *dev, size_t size,
                dma_addr_t * dma_handle, gfp_t flags)
{
    void *cpuaddr;
    unsigned long phys_addr;
    int node;
    struct pci_dev *pdev = to_pci_dev(dev);
    struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

    BUG_ON(dev->bus != &pci_bus_type);

    /*
     * Allocate the memory.
     */
    node = pcibus_to_node(pdev->bus);
    if (likely(node >=0)) {
        struct page *p = alloc_pages_node(node, flags, get_order(size));

        if (likely(p))
            cpuaddr = page_address(p);
        else
            return NULL;
    } else
        cpuaddr = (void *)__get_free_pages(flags, get_order(size));

    if (unlikely(!cpuaddr))
        return NULL;

    memset(cpuaddr, 0x0, size);

    /* physical addr. of the memory we just got */
    phys_addr = __pa(cpuaddr);

    /*
     * 64 bit address translations should never fail.
     * 32 bit translations can fail if there are insufficient mapping
     * resources.
     */

    *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
                           SN_DMA_ADDR_PHYS);
    if (!*dma_handle) {
        printk(KERN_ERR "%s: out of ATEs\n", __func__);
        free_pages((unsigned long)cpuaddr, get_order(size));
        return NULL;
    }

    return cpuaddr;
}
示例#3
0
/* Allocate DMA memory on node near device */
noinline static void *
dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
{
	struct page *page;
	int node;
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type)
		node = pcibus_to_node(to_pci_dev(dev)->bus);
	else
#endif
		node = numa_node_id();

	if (node < first_node(node_online_map))
		node = first_node(node_online_map);

	page = alloc_pages_node(node, gfp, order);
	return page ? page_address(page) : NULL;
}
示例#4
0
/* Set correct numa_node information for AMD NB functions */
static void quirk_amd_nb_node(struct pci_dev *dev)
{
	struct pci_dev *nb_ht;
	unsigned int devfn;
	u32 node;
	u32 val;

	devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
	nb_ht = pci_get_slot(dev->bus, devfn);
	if (!nb_ht)
		return;

	pci_read_config_dword(nb_ht, 0x60, &val);
	node = pcibus_to_node(dev->bus) | (val & 7);
	/*
	 * Some hardware may return an invalid node ID,
	 * so check it first:
	 */
	if (node_online(node))
		set_dev_node(&dev->dev, node);
	pci_dev_put(nb_ht);
}
示例#5
0
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
		node = pcibus_to_node(to_pci_dev(dev)->bus);
	else
#endif
		node = numa_node_id();