Exemple #1
0
static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
		u64 *phys_mask)
{
	if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
		dma_mask = dev->bus_dma_mask;

	if (force_dma_unencrypted())
		*phys_mask = __dma_to_phys(dev, dma_mask);
	else
		*phys_mask = dma_to_phys(dev, dma_mask);

	/*
	 * Optimistically try the zone that the physical address mask falls
	 * into first.  If that returns memory that isn't actually addressable
	 * we will fallback to the next lower zone and try again.
	 *
	 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
	 * zones.
	 */
	if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
		return GFP_DMA;
	if (*phys_mask <= DMA_BIT_MASK(32))
		return GFP_DMA32;
	return 0;
}
Exemple #2
0
/*
 * Allocates/reserves the Platform memory resources early in the boot process.
 * This ignores any resources that are designated IORESOURCE_IO
 */
void __init platform_alloc_bootmem(void)
{
	int i;
	int total = 0;

	/* Get persistent memory data from command line before allocating
	 * resources. This need to happen before normal command line parsing
	 * has been done */
	pmem_setup_resource();

	/* Loop through looking for resources that want a particular address */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = resource_size(&gp_resources[i]);
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			reserve_bootmem(dma_to_phys(gp_resources[i].start),
				size, 0);
			total += resource_size(&gp_resources[i]);
			pr_info("reserve resource %s at %08x (%u bytes)\n",
				gp_resources[i].name, gp_resources[i].start,
				resource_size(&gp_resources[i]));
		}
	}

	/* Loop through assigning addresses for those that are left */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = resource_size(&gp_resources[i]);
		if ((gp_resources[i].start == 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			void *mem = alloc_bootmem_pages(size);

			if (mem == NULL)
				pr_err("Unable to allocate bootmem pages "
					"for %s\n", gp_resources[i].name);

			else {
				gp_resources[i].start =
					phys_to_dma(virt_to_phys(mem));
				gp_resources[i].end =
					gp_resources[i].start + size - 1;
				total += size;
				pr_info("allocate resource %s at %08x "
						"(%u bytes)\n",
					gp_resources[i].name,
					gp_resources[i].start, size);
			}
		}
	}

	pr_info("Total Platform driver memory allocation: 0x%08x\n", total);

	/* indicate resources that are platform I/O related */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
			pr_info("reserved platform resource %s at %08x\n",
				gp_resources[i].name, gp_resources[i].start);
		}
	}
}
int mei_txe_dma_setup(struct mei_device *dev)
{
	struct mei_txe_hw *hw = to_txe_hw(dev);
	int err;

	err = mei_reserver_dma_acpi(dev);
	if (err)
		err = mei_alloc_dma(dev);

	if (err)
		return err;

	err = mei_txe_setup_satt2(dev,
		dma_to_phys(&dev->pdev->dev, hw->pool_paddr), hw->pool_size);

	if (err) {
		if (hw->pool_release)
			hw->pool_release(hw);
		return err;
	}

	hw->mdev = mei_mm_init(&dev->pdev->dev,
		hw->pool_vaddr, hw->pool_paddr, hw->pool_size);

	if (IS_ERR_OR_NULL(hw->mdev))
		return PTR_ERR(hw->mdev);

	return 0;
}
Exemple #4
0
void x86_swiotlb_free_coherent(struct device *dev, size_t size,
				      void *vaddr, dma_addr_t dma_addr,
				      unsigned long attrs)
{
	if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
		swiotlb_free_coherent(dev, size, vaddr, dma_addr);
	else
		dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
void __init platform_alloc_bootmem(void)
{
	int i;
	int total = 0;

	pmem_setup_resource();

	
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = resource_size(&gp_resources[i]);
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			reserve_bootmem(dma_to_phys(gp_resources[i].start),
				size, 0);
			total += resource_size(&gp_resources[i]);
			pr_info("reserve resource %s at %08x (%u bytes)\n",
				gp_resources[i].name, gp_resources[i].start,
				resource_size(&gp_resources[i]));
		}
	}

	
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = resource_size(&gp_resources[i]);
		if ((gp_resources[i].start == 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			void *mem = alloc_bootmem_pages(size);

			if (mem == NULL)
				pr_err("Unable to allocate bootmem pages "
					"for %s\n", gp_resources[i].name);

			else {
				gp_resources[i].start =
					phys_to_dma(virt_to_phys(mem));
				gp_resources[i].end =
					gp_resources[i].start + size - 1;
				total += size;
				pr_info("allocate resource %s at %08x "
						"(%u bytes)\n",
					gp_resources[i].name,
					gp_resources[i].start, size);
			}
		}
	}

	pr_info("Total Platform driver memory allocation: 0x%08x\n", total);

	
	for (i = 0; gp_resources[i].flags != 0; i++) {
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
			pr_info("reserved platform resource %s at %08x\n",
				gp_resources[i].name, gp_resources[i].start);
		}
	}
}
Exemple #6
0
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, unsigned long attrs)
{
	if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) {
		void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));

		vunmap(vaddr);
		dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
	}
}
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	struct memblock_region *reg;
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
	unsigned long max_dma = min;
#ifdef CONFIG_ZONE_DMA
	unsigned long max_dma_phys, dma_end;
#endif
	memset(zone_size, 0, sizeof(zone_size));

#ifdef CONFIG_ZONE_DMA
#ifdef CONFIG_ZONE_DMA_ALLOW_CUSTOM_SIZE
	max_dma_phys = (unsigned long)dma_to_phys(NULL,
			(min << PAGE_SHIFT) + ZONE_DMA_SIZE_BYTES + 1);
#else
	max_dma_phys = (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
#endif /* CONFIG_ZONE_DMA_ALLOW_CUSTOM_SIZE */
	max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
	zone_size[ZONE_DMA] = max_dma - min;
#endif /* CONFIG_ZONE_DMA */
	zone_size[ZONE_NORMAL] = max - max_dma;

	memcpy(zhole_size, zone_size, sizeof(zhole_size));

	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start >= max)
			continue;

#ifdef CONFIG_ZONE_DMA
		if (start < max_dma) {
			dma_end = min(end, max_dma);
			zhole_size[ZONE_DMA] -= dma_end - start;
		}
#endif
		if (end > max_dma) {
			unsigned long normal_end = min(end, max);
			unsigned long normal_start = max(start, max_dma);
			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
		}
	}
Exemple #8
0
void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
	phys_addr_t phys = dma_to_phys(dev, addr);

	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		dma_direct_sync_single_for_cpu(dev, addr, size, dir);

	if (unlikely(is_swiotlb_buffer(phys)))
		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
Exemple #9
0
void dma_direct_sync_single_for_device(struct device *dev,
		dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
	phys_addr_t paddr = dma_to_phys(dev, addr);

	if (unlikely(is_swiotlb_buffer(paddr)))
		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);

	if (!dev_is_dma_coherent(dev))
		arch_sync_dma_for_device(dev, paddr, size, dir);
}
Exemple #10
0
/*
 * Create scatter-list for the already allocated DMA buffer.
 * This function could be replaced by dma_common_get_sgtable
 * as soon as it will avalaible.
 */
static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
			       void *cpu_addr, dma_addr_t handle, size_t size)
{
	struct page *page = phys_to_page(dma_to_phys(dev, handle));
	struct scatterlist *sg;
	int ret;

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))
		return ret;

	sg = sgt->sgl;
	sg_set_page(sg, page, PAGE_ALIGN(size), 0);
	sg_dma_address(sg) = sg_phys(sg);

	return 0;
}
static void __dma_free_coherent(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle,
				struct dma_attrs *attrs)
{
	bool freed;
	phys_addr_t paddr = dma_to_phys(dev, dma_handle);

	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return;
	}

	freed = dma_release_from_contiguous(dev,
					phys_to_page(paddr),
					PAGE_ALIGN(size) >> PAGE_SHIFT);
	if (!freed)
		swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
static int mei_txe_pci_resume(struct device *device)
{
    struct pci_dev *pdev = to_pci_dev(device);
    struct mei_device *dev;
    struct mei_txe_hw *hw;
    int err;

    dev = pci_get_drvdata(pdev);
    if (!dev)
        return -ENODEV;

    pci_enable_msi(pdev);

    mei_clear_interrupts(dev);

    /* request and enable interrupt */
    if (pci_dev_msi_enabled(pdev))
        err = request_threaded_irq(pdev->irq,
                                   NULL,
                                   mei_txe_irq_thread_handler,
                                   IRQF_ONESHOT, KBUILD_MODNAME, dev);
    else
        err = request_threaded_irq(pdev->irq,
                                   mei_txe_irq_quick_handler,
                                   mei_txe_irq_thread_handler,
                                   IRQF_SHARED, KBUILD_MODNAME, dev);
    if (err) {
        dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
                pdev->irq);
        return err;
    }

    hw = to_txe_hw(dev);
    err = mei_txe_setup_satt2(dev,
                              dma_to_phys(&dev->pdev->dev, hw->pool_paddr), hw->pool_size);
    if (err)
        return err;

    err = mei_restart(dev);

    return err;
}
Exemple #13
0
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, unsigned long attrs)
{
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page;

	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
		page = vaddr;
	} else if (platform_vaddr_uncached(vaddr)) {
		page = virt_to_page(platform_vaddr_to_cached(vaddr));
	} else {
#ifdef CONFIG_MMU
		dma_common_free_remap(vaddr, size, VM_MAP);
#endif
		page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
	}

	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
}
Exemple #14
0
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	struct memblock_region *reg;
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
	unsigned long max_dma = min;

	memset(zone_size, 0, sizeof(zone_size));

	/* 4GB maximum for 32-bit only capable devices */
	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
		unsigned long max_dma_phys =
			(unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
		max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
		zone_size[ZONE_DMA] = max_dma - min;
	}
	zone_size[ZONE_NORMAL] = max - max_dma;

	memcpy(zhole_size, zone_size, sizeof(zhole_size));

	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start >= max)
			continue;

		if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
			unsigned long dma_end = min(end, max_dma);
			zhole_size[ZONE_DMA] -= dma_end - start;
		}

		if (end > max_dma) {
			unsigned long normal_end = min(end, max);
			unsigned long normal_start = max(start, max_dma);
			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
		}
	}
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in mei_txe_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
    struct mei_device *dev;
    struct mei_txe_hw *hw;
    int err;
    int i;

    /* enable pci dev */
    err = pci_enable_device(pdev);
    if (err) {
        dev_err(&pdev->dev, "failed to enable pci device.\n");
        goto end;
    }
    /* set PCI host mastering  */
    pci_set_master(pdev);
    /* pci request regions for mei driver */
    err = pci_request_regions(pdev, KBUILD_MODNAME);
    if (err) {
        dev_err(&pdev->dev, "failed to get pci regions.\n");
        goto disable_device;
    }

    err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
    if (err) {
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
            dev_err(&pdev->dev, "No suitable DMA available.\n");
            goto release_regions;
        }
    }

    /* allocates and initializes the mei dev structure */
    dev = mei_txe_dev_init(pdev);
    if (!dev) {
        err = -ENOMEM;
        goto release_regions;
    }
    hw = to_txe_hw(dev);


    err = mei_reserver_dma_acpi(dev);
    if (err)
        err = mei_alloc_dma(dev);
    if (err)
        goto free_device;

    /* mapping  IO device memory */
    for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
        hw->mem_addr[i] = pci_iomap(pdev, i, 0);
        if (!hw->mem_addr[i]) {
            dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
            err = -ENOMEM;
            goto free_device;
        }
    }


    pci_enable_msi(pdev);

    /* clear spurious interrupts */
    mei_clear_interrupts(dev);

    /* request and enable interrupt  */
    if (pci_dev_msi_enabled(pdev))
        err = request_threaded_irq(pdev->irq,
                                   NULL,
                                   mei_txe_irq_thread_handler,
                                   IRQF_ONESHOT, KBUILD_MODNAME, dev);
    else
        err = request_threaded_irq(pdev->irq,
                                   mei_txe_irq_quick_handler,
                                   mei_txe_irq_thread_handler,
                                   IRQF_SHARED, KBUILD_MODNAME, dev);
    if (err) {
        dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
                pdev->irq);
        goto free_device;
    }

    if (mei_start(dev)) {
        dev_err(&pdev->dev, "init hw failure.\n");
        err = -ENODEV;
        goto release_irq;
    }

    err = mei_txe_setup_satt2(dev,
                              dma_to_phys(&dev->pdev->dev, hw->pool_paddr), hw->pool_size);
    if (err)
        goto release_irq;


    err = mei_register(dev);
    if (err)
        goto release_irq;

    pci_set_drvdata(pdev, dev);

    hw->mdev = mei_mm_init(&dev->pdev->dev,
                           hw->pool_vaddr, hw->pool_paddr, hw->pool_size);

    if (IS_ERR_OR_NULL(hw->mdev))
        goto deregister_mei;

    pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
    pm_runtime_use_autosuspend(&pdev->dev);

    pm_runtime_mark_last_busy(&pdev->dev);

    /*
    * For not wake-able HW runtime pm framework
    * can't be used on pci device level.
    * Use domain runtime pm callbacks instead.
    */
    if (!pci_dev_run_wake(pdev))
        mei_txe_set_pm_domain(dev);

    pm_runtime_put_noidle(&pdev->dev);

    if (!nopg)
        pm_runtime_allow(&pdev->dev);

    return 0;

deregister_mei:
    mei_deregister(dev);
release_irq:

    mei_cancel_work(dev);

    /* disable interrupts */
    mei_disable_interrupts(dev);

    free_irq(pdev->irq, dev);
    pci_disable_msi(pdev);

free_device:
    if (hw->pool_release)
        hw->pool_release(hw);

    mei_txe_pci_iounmap(pdev, hw);

    kfree(dev);
release_regions:
    pci_release_regions(pdev);
disable_device:
    pci_disable_device(pdev);
end:
    dev_err(&pdev->dev, "initialization failed.\n");
    return err;
}
Exemple #16
0
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
		dma_addr_t dma_addr)
{
	return __phys_to_pfn(dma_to_phys(dev, dma_addr));
}