Пример #1
0
Файл: msi.c Проект: Moretti0/gg
static void imx_msi_handler(unsigned int irq, struct irq_desc *desc)
{
	int i, j;
	unsigned int status;
	struct irq_chip *chip = irq_get_chip(irq);
	unsigned int base_irq = IRQ_IMX_MSI_0;

	chained_irq_enter(chip, desc);
	for (i = 0; i < 8; i++) {
		status = imx_pcie_msi_pending(i);
		while (status) {
			j = __fls(status);
			generic_handle_irq(base_irq + j);
			status &= ~(1 << j);
		}
		base_irq += 32;
	}
	if (intd_active) {
		pr_info("%s intd\n", __func__);
		generic_handle_irq(MXC_INT_PCIE_0B);
	}
	chained_irq_exit(chip, desc);
}
Пример #2
0
unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
				unsigned long offset)
{
	const unsigned long *p = addr + (offset / BITS_PER_LONG);
	unsigned long result = offset & ~(BITS_PER_LONG - 1);
	unsigned long tmp;

	if (offset >= size)
		return size;
	size -= result;
	offset %= BITS_PER_LONG;
	if (offset) {
		tmp = *(p++);
		tmp &= (~0UL >> offset);
		if (size < BITS_PER_LONG)
			goto found_first;
		if (tmp)
			goto found_middle;
		size -= BITS_PER_LONG;
		result += BITS_PER_LONG;
	}
	while (size & ~(BITS_PER_LONG-1)) {
		if ((tmp = *(p++)))
			goto found_middle;
		result += BITS_PER_LONG;
		size -= BITS_PER_LONG;
	}
	if (!size)
		return result;
	tmp = *p;
found_first:
	tmp &= (~0UL << (BITS_PER_LONG - size));
	if (!tmp)		/* Are any bits set? */
		return result + size;	/* Nope. */
found_middle:
	return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
}
Пример #3
0
void
mptable_setup(void)
{
    if (! CONFIG_MPTABLE)
        return;

    dprintf(3, "init MPTable\n");

    // Config structure in temp area.
    struct mptable_config_s *config = malloc_tmp(32*1024);
    if (!config) {
        warn_noalloc();
        return;
    }
    memset(config, 0, sizeof(*config));
    config->signature = MPCONFIG_SIGNATURE;
    config->spec = 4;
    memcpy(config->oemid, BUILD_CPUNAME8, sizeof(config->oemid));
    memcpy(config->productid, "0.1         ", sizeof(config->productid));
    config->lapic = BUILD_APIC_ADDR;

    // Detect cpu info
    u32 cpuid_signature, ebx, ecx, cpuid_features;
    cpuid(1, &cpuid_signature, &ebx, &ecx, &cpuid_features);
    if (! cpuid_signature) {
        // Use default values.
        cpuid_signature = 0x600;
        cpuid_features = 0x201;
    }
    int pkgcpus = 1;
    if (cpuid_features & (1 << 28)) {
        /* Only populate the MPS tables with the first logical CPU in
           each package */
        pkgcpus = (ebx >> 16) & 0xff;
        pkgcpus = 1 << (__fls(pkgcpus - 1) + 1); /* round up to power of 2 */
    }
Пример #4
0
void __init owl_reserve(void)
{
	phys_addr_t phy_mem_size, phy_mem_end;
	unsigned int owl_ion0_start = 0;
	unsigned int owl_ion1_start = 0;

	phy_mem_size = memblock_phys_mem_size();
	if (phy_mem_size & (phy_mem_size - 1)) { /* != 2^n ? */
		uint _tmp = __fls(phy_mem_size);
		if (_tmp > 0 && (phy_mem_size & (1U << (_tmp - 1)))) {
			/* close to next boundary */
			_tmp++;
			phy_mem_size =
				(_tmp >= sizeof(phy_mem_size) * 8) ? phy_mem_size : (1U << _tmp);
		} else {
			phy_mem_size = 1U << _tmp;
		}
	}
	s_phy_mem_size_saved = phy_mem_size;
	phy_mem_end = arm_lowmem_limit;
	pr_info("%s: pyhsical memory size %u bytes, end @0x%x\n",
		__func__, phy_mem_size, phy_mem_end);

	memblock_reserve(0, 0x4000); /* reserve low 16K for DDR dqs training */

	of_scan_flat_dt(early_init_dt_scan_ion, (void*)phy_mem_size);

	phy_mem_end -= owl_fb_size;
#ifdef CONFIG_VIDEO_OWL_DSS
    owl_fb_start = phy_mem_end;
    memblock_reserve(owl_fb_start, owl_fb_size);
#endif

	phy_mem_end -= owl_kinfo_size;
    owl_kinfo_start = phy_mem_end;
    memblock_reserve(owl_kinfo_start, owl_kinfo_size);
    
#ifdef CONFIG_ION
	phy_mem_end -= owl_ion0_size;
	owl_ion0_start = phy_mem_end;
	owl_pdev_ion_data.heaps[0].base = owl_ion0_start;
	owl_pdev_ion_data.heaps[0].size = owl_ion0_size;

	/* ion_pmem */
#ifdef CONFIG_CMA
	phy_mem_end -= owl_ion1_size;
	owl_ion1_start = phy_mem_end; /* fake, not used. */
	owl_pdev_ion_data.heaps[1].base = 0;
	owl_pdev_ion_data.heaps[1].size = 0; /* prevent ion_reserve() from diging */
	owl_pdev_ion_data.heaps[1].priv = &(owl_pdev_ion_device.dev);
	dma_contiguous_set_global_reserve_size(owl_ion1_size); /* set size of the CMA global area */
#else /* no CMA */
	phy_mem_end -= owl_ion1_size;
	owl_ion1_start = phy_mem_end;
	owl_pdev_ion_data.heaps[1].base = owl_ion1_start;
	owl_pdev_ion_data.heaps[1].size = owl_ion1_size;
#endif
	ion_reserve(&owl_pdev_ion_data);
#endif

	printk(KERN_INFO "Reserved memory %uMB\n",
		(owl_ion0_size + owl_ion1_size) >> 20);
	printk(KERN_INFO 
	        "   FB:     0x%08x, %uMB\n"
	        "   KINFO:  0x%08x, %uMB\n"
	        "   ION0:   0x%08x, %uMB\n"
			"   ION1:   0x%08x, %uMB\n",
			owl_fb_start, owl_fb_size >> 20,
			owl_kinfo_start, owl_kinfo_size >> 20,
			owl_ion0_start, owl_ion0_size >> 20,
			owl_ion1_start, owl_ion1_size >> 20);
}
Пример #5
0
Файл: io_uring.c Проект: arh/fio
static unsigned roundup_pow2(unsigned depth)
{
	return 1UL << __fls(depth - 1);
}
Пример #6
0
/**
 * Computes block write protection registers from range
 *
 * @param start Desired protection start offset
 * @param len Desired protection length
 * @param sr1 Output pointer for status register 1
 * @param sr2 Output pointer for status register 2
 *
 * @return EC_SUCCESS, or non-zero if any error.
 */
static int protect_to_reg(unsigned int start, unsigned int len,
		uint8_t *sr1, uint8_t *sr2)
{
	char cmp = 0;
	char sec = 0;
	char tb = 0;
	char bp = 0;
	int blocks;
	int size;

	/* Bad pointers */
	if (!sr1 || !sr2 || *sr1 == -1 || *sr2 == -1)
		return EC_ERROR_INVAL;

	/* Invalid data */
	if ((start && !len) || start + len > CONFIG_FLASH_SIZE)
		return EC_ERROR_INVAL;

	/* Set complement bit based on whether length is power of 2 */
	if ((len & (len - 1)) != 0) {
		cmp = 1;
		start = start + len;
		len = CONFIG_FLASH_SIZE - len;
	}

	/* Set bottom/top bit based on start address */
	/* Do not set if len == 0 or len == CONFIG_FLASH_SIZE */
	if (!start && (len % CONFIG_FLASH_SIZE))
		tb = 1;

	/* Set sector bit and determine block length based on protect length */
	if (len == 0 || len >= 128 * 1024) {
		sec = 0;
		size = 64 * 1024;
	} else if (len >= 4 * 1024 && len <= 32 * 1024) {
		sec = 1;
		size = 2 * 1024;
	} else
		return EC_ERROR_INVAL;

	/* Determine number of blocks */
	if (len % size != 0)
		return EC_ERROR_INVAL;
	blocks = len / size;

	/* Determine bp = log2(blocks) with log2(0) = 0 */
	bp = blocks ? __fls(blocks) : 0;

	/* Clear bits */
	*sr1 &= ~(SPI_FLASH_SR1_SEC | SPI_FLASH_SR1_TB
		| SPI_FLASH_SR1_BP2 | SPI_FLASH_SR1_BP1 | SPI_FLASH_SR1_BP0);
	*sr2 &= ~SPI_FLASH_SR2_CMP;

	/* Set bits */
	*sr1 |= (sec ? SPI_FLASH_SR1_SEC : 0) | (tb ? SPI_FLASH_SR1_TB : 0)
		| (bp << 2);
	*sr2 |= (cmp ? SPI_FLASH_SR2_CMP : 0);

	/* Set SRP0 so status register can't be changed */
	*sr1 |= SPI_FLASH_SR1_SRP0;

	return EC_SUCCESS;
}
Пример #7
0
int iommu_map(struct iommu_domain *domain, unsigned long iova,
	      phys_addr_t paddr, size_t size, int prot)
{
	unsigned long orig_iova = iova;
	unsigned int min_pagesz;
	size_t orig_size = size;
	int ret = 0;

	if (unlikely(domain->ops->map == NULL))
		return -ENODEV;

	/* find out the minimum page size supported */
	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);

	/*
	 * both the virtual address and the physical one, as well as
	 * the size of the mapping, must be aligned (at least) to the
	 * size of the smallest page supported by the hardware
	 */
	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
			"0x%x\n", iova, (unsigned long)paddr,
			(unsigned long)size, min_pagesz);
		return -EINVAL;
	}

	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
				(unsigned long)paddr, (unsigned long)size);

	while (size) {
		unsigned long pgsize, addr_merge = iova | paddr;
		unsigned int pgsize_idx;

		/* Max page size that still fits into 'size' */
		pgsize_idx = __fls(size);

		/* need to consider alignment requirements ? */
		if (likely(addr_merge)) {
			/* Max page size allowed by both iova and paddr */
			unsigned int align_pgsize_idx = __ffs(addr_merge);

			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
		}

		/* build a mask of acceptable page sizes */
		pgsize = (1UL << (pgsize_idx + 1)) - 1;

		/* throw away page sizes not supported by the hardware */
		pgsize &= domain->ops->pgsize_bitmap;

		/* make sure we're still sane */
		BUG_ON(!pgsize);

		/* pick the biggest page */
		pgsize_idx = __fls(pgsize);
		pgsize = 1UL << pgsize_idx;

		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
					(unsigned long)paddr, pgsize);

		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
		if (ret)
			break;

		iova += pgsize;
		paddr += pgsize;
		size -= pgsize;
	}

	/* unroll mapping in case something went wrong */
	if (ret)
		iommu_unmap(domain, orig_iova, orig_size - size);

	return ret;
}
Пример #8
0
dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
								size_t size)
{
	off_t start_off;
	dma_addr_t addr, start = 0;
	size_t mapped_size = 0;
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	int order;
	int ret;
	int count =0;
#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	size_t iova_size = 0;
#endif
	for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
		offset -= sg_dma_len(sg);

	start_off = offset_in_page(sg_phys(sg) + offset);
	size = PAGE_ALIGN(size + start_off);

	order = __fls(min_t(size_t, size, SZ_1M));

	region = kmalloc(sizeof(*region), GFP_KERNEL);
	if (!region) {
		ret = -ENOMEM;
		goto err_map_nomem;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	iova_size = ALIGN(size, SZ_64K);
	start = (dma_addr_t)gen_pool_alloc_aligned(vmm->vmm_pool, iova_size,
									order);
#else
	start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
#endif
	if (!start) {
		ret = -ENOMEM;
		goto err_map_noiomem;
	}

	addr = start;
	do {
		phys_addr_t phys;
		size_t len;

		phys = sg_phys(sg);
		len = sg_dma_len(sg);

		/* if back to back sg entries are contiguous consolidate them */
		while (sg_next(sg) &&
		       sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg))) {
			len += sg_dma_len(sg_next(sg));
			sg = sg_next(sg);
		}

		if (offset > 0) {
			len -= offset;
			phys += offset;
			offset = 0;
		}

		if (offset_in_page(phys)) {
			len += offset_in_page(phys);
			phys = round_down(phys, PAGE_SIZE);
		}

		len = PAGE_ALIGN(len);

		if (len > (size - mapped_size))
			len = size - mapped_size;

		ret = iommu_map(vmm->domain, addr, phys, len, 0);
		if (ret)
			break;

		addr += len;
		mapped_size += len;
	} while ((sg = sg_next(sg)) && (mapped_size < size));
	BUG_ON(mapped_size > size);

	if (mapped_size < size) {
		pr_err("IOVMM: iovmm_map failed as mapped_size (%d) < size (%d)\n", mapped_size, size);
		goto err_map_map;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	if (iova_size != size) {
		addr = start + size;
		size = iova_size;

		for (; addr < start + size; addr += PAGE_SIZE) {
			ret = iommu_map(vmm->domain, addr,
				page_to_phys(ZERO_PAGE(0)), PAGE_SIZE, 0);
			if (ret)
				goto err_map_map;

			mapped_size += PAGE_SIZE;
		}
	}
#endif

	region->start = start + start_off;
	region->size = size;

	INIT_LIST_HEAD(&region->node);

	spin_lock(&vmm->lock);

	list_add(&region->node, &vmm->regions_list);

	spin_unlock(&vmm->lock);

	dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#X bytes.\n",
					region->start, region->size);

	return region->start;

err_map_map:
	iommu_unmap(vmm->domain, start, mapped_size);
	gen_pool_free(vmm->vmm_pool, start, size);
err_map_noiomem:
	kfree(region);
err_map_nomem:
	dev_dbg(dev, "IOVMM: Failed to allocated VM region for %#x bytes.\n",
									size);
	return (dma_addr_t)ret;
}
Пример #9
0
int iommu_map(struct iommu_domain *domain, unsigned long iova,
	      phys_addr_t paddr, size_t size, int prot)
{
	unsigned long orig_iova = iova;
	unsigned int min_pagesz;
	size_t orig_size = size;
	int ret = 0;

	if (unlikely(domain->ops->map == NULL))
		return -ENODEV;

	
	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);

	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
			"0x%x\n", iova, (unsigned long)paddr,
			(unsigned long)size, min_pagesz);
		return -EINVAL;
	}

	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
				(unsigned long)paddr, (unsigned long)size);

	while (size) {
		unsigned long pgsize, addr_merge = iova | paddr;
		unsigned int pgsize_idx;

		
		pgsize_idx = __fls(size);

		
		if (likely(addr_merge)) {
			
			unsigned int align_pgsize_idx = __ffs(addr_merge);

			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
		}

		
		pgsize = (1UL << (pgsize_idx + 1)) - 1;

		
		pgsize &= domain->ops->pgsize_bitmap;

		
		BUG_ON(!pgsize);

		
		pgsize_idx = __fls(pgsize);
		pgsize = 1UL << pgsize_idx;

		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
					(unsigned long)paddr, pgsize);

		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
		if (ret)
			break;

		iova += pgsize;
		paddr += pgsize;
		size -= pgsize;
	}

	
	if (ret)
		iommu_unmap(domain, orig_iova, orig_size - size);

	return ret;
}