Пример #1
0
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);
	pgdat = NODE_DATA(0);
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (end >= end_pfn)
			end = end_pfn;
		if (start >= end)
			break;

		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
	}
static void ramfile_vma_close(struct vm_area_struct *vma)
{
	struct ramfile_desc *prf;
	unsigned long usize = vma->vm_end - vma->vm_start;

	/* Fill in the ramfile desc (header) */
	prf = (struct ramfile_desc *)__phys_to_virt(__pfn_to_phys(vma->vm_pgoff));
	prf->payload_size = usize;
	prf->flags = RAMFILE_PHYCONT;
	memset((void*)&prf->reserved[0], 0, sizeof(prf->reserved));
	ramdump_attach_ramfile(prf);
#ifdef RAMFILE_DEBUG
	printk(KERN_ERR "ramfile close 0x%x - linked into RDC\n", (unsigned)prf);
#endif
}
Пример #3
0
static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
			 unsigned long size, unsigned long flags)
{
	unsigned long prot, addr = virt, end = virt + size;
	pgd_t *pgd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
			PMD_DOMAIN(DOMAIN_IO) |
			(flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));

	/*
	 * ARMv6 and above need XN set to prevent speculative prefetches
	 * hitting IO.
	 */
	if (cpu_architecture() >= CPU_ARCH_ARMv6)
		prot |= PMD_SECT_XN;

	pgd = pgd_offset_k(virt);
	do {
		unsigned long super_pmd_val, i;

		super_pmd_val = __pfn_to_phys(pfn) | prot;
		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

		for (i = 0; i < 8; i++) {
			pmd_t *pmd = pmd_offset(pgd, addr);

			pmd[0] = __pmd(super_pmd_val);
			pmd[1] = __pmd(super_pmd_val);
			flush_pmd_entry(pmd);

			addr += PGDIR_SIZE;
			pgd++;
		}

		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
	} while (addr < end);

	return 0;
}
static int __init cma_init_reserved_areas(void)
{
	int i;

	for (i = 0; i < cma_area_count; i++) {
		int ret = cma_activate_area(&cma_areas[i]);
		if (ret)
			return ret;
		pr_err("cma base %llx  cma size %lu\n",
				__pfn_to_phys(cma_areas[i].base_pfn),
				(cma_areas[i].count<<PAGE_SHIFT)/SZ_1M);
	}

	hisi_cma_dev_init();

	return 0;
}
Пример #5
0
void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
{
	struct map_desc *desc = davinci_soc_info.io_desc;
	int desc_num = davinci_soc_info.io_desc_num;
	int i;

	for (i = 0; i < desc_num; i++, desc++) {
		unsigned long iophys = __pfn_to_phys(desc->pfn);
		unsigned long iosize = desc->length;

		if (p >= iophys && (p + size) <= (iophys + iosize))
			return __io(desc->virtual + p - iophys);
	}

	return __arm_ioremap_caller(p, size, type,
					__builtin_return_address(0));
}
Пример #6
0
	/* Reserve the lowmem memblock reserved regions in bootmem. */
	for_each_memblock(reserved, reg) {
		// start: 0x40004
		unsigned long start = memblock_region_reserved_base_pfn(reg);
		// end: 0x40008
		unsigned long end = memblock_region_reserved_end_pfn(reg);

		// end: 0x40008, end_pfn: 0x4f800
		if (end >= end_pfn)
			end = end_pfn;
		// start: 0x40004, end: 0x40008
		if (start >= end)
			break;

		// __pfn_to_phys(0x40004): 0x40004000, (end - start) << PAGE_SHIFT: 0x4000
		// BOOTMEM_DEFAULT: 0
		reserve_bootmem(__pfn_to_phys(start),
			        (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
	}
Пример #7
0
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
	const void *caller)
{
	struct vm_struct *area;
	unsigned long addr;

	area = get_vm_area_caller(size, VM_DMA | VM_USERMAP, caller);
	if (!area)
		return NULL;
	addr = (unsigned long)area->addr;
	area->phys_addr = __pfn_to_phys(page_to_pfn(page));

	if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
		vunmap((void *)addr);
		return NULL;
	}
	return (void *)addr;
}
Пример #8
0
/**
 * The caller has to make sure that there is enough guard
 * vm area allocated, so that the allignment adjustment done here
 * does not overflow the vm area. Unlike ioremap, this function cant
 * take care of this, as the vm area is pre-allocated
 * by calling plat_get_vm_area.
 */
void __iomem *plat_ioremap_ns(unsigned long vaddr, unsigned long size,
		phys_addr_t phys_addr)
{
	unsigned long pfn;
	unsigned long offset;

	pfn = __phys_to_pfn(phys_addr);
	offset = phys_addr & ~PAGE_MASK;

	size = PAGE_ALIGN(offset + size);

	if (ioremap_page_range(vaddr, vaddr + size, __pfn_to_phys(pfn),
		__pgprot(PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
			L_PTE_SHARED))) {
			pr_err("ERROR: ns_ioremap failed\n");
			return (void __iomem *)NULL;
	}

	return (void __iomem *)(vaddr + offset);
}
Пример #9
0
static int remap_area_pages(unsigned long start, unsigned long pfn,
			    size_t size, const struct mem_type *type)
{
	unsigned long addr = start;
	unsigned long next, end = start + size;
	unsigned long phys_addr = __pfn_to_phys(pfn);
	pgd_t *pgd;
	int err = 0;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = remap_area_pmd(pgd, addr, next, phys_addr, type);
		if (err)
			break;
		phys_addr += next - addr;
	} while (pgd++, addr = next, addr != end);

	return err;
}
bool is_cma_area(struct page *page)
{
	int i;
	phys_addr_t start;
	phys_addr_t cma_start;
	phys_addr_t cma_end;

	start = page_to_phys(page);
	
	for (i = 0; i < cma_area_count; i++) {
		cma_start = __pfn_to_phys(cma_areas[i].base_pfn);
		cma_end = cma_start + cma_areas[i].count<<PAGE_SHIFT;

		if (start >= cma_start && start <= cma_end) {
			//pr_err("is_cma_area %lx \n", start);
			return true;
		}
	}

	return false;
}
Пример #11
0
static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
			 size_t size, const struct mem_type *type)
{
	unsigned long addr = virt, end = virt + size;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(virt);
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
	do {
		unsigned long super_pmd_val, i;

		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
				PMD_SECT_SUPER;
		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

		for (i = 0; i < 8; i++) {
			pmd[0] = __pmd(super_pmd_val);
			pmd[1] = __pmd(super_pmd_val);
			flush_pmd_entry(pmd);

			addr += PMD_SIZE;
			pmd += 2;
		}

		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
	} while (addr < end);

	return 0;
}
Пример #12
0
static void __dma_clear_buffer(struct page *page, size_t size)
{
	if (!PageHighMem(page)) {
		void *ptr = page_address(page);
		if (ptr) {
			memset(ptr, 0, size);
			dmac_flush_range(ptr, ptr + size);
			outer_flush_range(__pa(ptr), __pa(ptr) + size);
		}
	} else {
		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
		phys_addr_t end = base + size;
		while (size > 0) {
			void *ptr = kmap_atomic(page);
			memset(ptr, 0, PAGE_SIZE);
			dmac_flush_range(ptr, ptr + PAGE_SIZE);
			kunmap_atomic(ptr);
			page++;
			size -= PAGE_SIZE;
		}
		outer_flush_range(base, end);
	}
}
Пример #13
0
static void * __meminit altmap_alloc_block_buf(unsigned long size,
		struct vmem_altmap *altmap)
{
	unsigned long pfn, nr_pfns;
	void *ptr;

	if (size & ~PAGE_MASK) {
		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
				__func__, size);
		return NULL;
	}

	nr_pfns = size >> PAGE_SHIFT;
	pfn = vmem_altmap_alloc(altmap, nr_pfns);
	if (pfn < ULONG_MAX)
		ptr = __va(__pfn_to_phys(pfn));
	else
		ptr = NULL;
	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);

	return ptr;
}
Пример #14
0
static int remap_area_pages(unsigned long start, unsigned long pfn,
			    unsigned long size, unsigned long flags)
{
	unsigned long addr = start;
	unsigned long next, end = start + size;
	unsigned long phys_addr = __pfn_to_phys(pfn);
	pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
				 L_PTE_DIRTY | L_PTE_WRITE | flags);
	pgd_t *pgd;
	int err = 0;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = remap_area_pmd(pgd, addr, next, phys_addr, prot);
		if (err)
			break;
		phys_addr += next - addr;
	} while (pgd++, addr = next, addr != end);

	return err;
}
Пример #15
0
static int
remap_area_sections(unsigned long virt, unsigned long pfn,
		    size_t size, const struct mem_type *type)
{
	unsigned long addr = virt, end = virt + size;
	pgd_t *pgd;

	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(addr);
	do {
		pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);

		set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
		pfn += SZ_4M >> PAGE_SHIFT;
		flush_pmd_entry(pmd);

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	return 0;
}
Пример #16
0
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
	const void *caller)
{
	struct vm_struct *area;
	unsigned long addr;

	/*
	 * DMA allocation can be mapped to user space, so lets
	 * set VM_USERMAP flags too.
	 */
	area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
				  caller);
	if (!area)
		return NULL;
	addr = (unsigned long)area->addr;
	area->phys_addr = __pfn_to_phys(page_to_pfn(page));

	if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
		vunmap((void *)addr);
		return NULL;
	}
	return (void *)addr;
}
Пример #17
0
int page_is_ram(unsigned long pfn)
{
	return memblock_is_memory(__pfn_to_phys(pfn));
}
Пример #18
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	/*
	 * Try to reuse one of the static mapping whenever possible.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area; area = area->next) {
		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
			break;
		if (!(area->flags & VM_ARM_STATIC_MAPPING))
			continue;
		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
			continue;
		if (__phys_to_pfn(area->phys_addr) > pfn ||
		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
			continue;
		/* we can drop the lock here as we know *area is static */
		read_unlock(&vmlist_lock);
		addr = (unsigned long)area->addr;
		addr += __pfn_to_phys(pfn) - area->phys_addr;
		return (void __iomem *) (offset + addr);
	}
	read_unlock(&vmlist_lock);

#if 0 /* HACK - do allow RAM to be mapped, the problems are a bit overrated */
	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;
#endif

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Пример #19
0
unsigned long __pfn_to_bus(unsigned long pfn)
{
    return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
}
Пример #20
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
                                        unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
    const struct mem_type *type;
    int err;
    unsigned long addr;
    struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
    if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
        return NULL;
#endif

    type = get_mem_type(mtype);
    if (!type)
        return NULL;

    size = PAGE_ALIGN(offset + size);

    read_lock(&vmlist_lock);
    for (area = vmlist; area; area = area->next) {
        if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
            break;
        if (!(area->flags & VM_ARM_STATIC_MAPPING))
            continue;
        if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
            continue;
        if (__phys_to_pfn(area->phys_addr) > pfn ||
                __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
            continue;

        read_unlock(&vmlist_lock);
        addr = (unsigned long)area->addr;
        addr += __pfn_to_phys(pfn) - area->phys_addr;
        return (void __iomem *) (offset + addr);
    }
    read_unlock(&vmlist_lock);

    if (WARN_ON(pfn_valid(pfn)))
        return NULL;

    area = get_vm_area_caller(size, VM_IOREMAP, caller);
    if (!area)
        return NULL;
    addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
    if (DOMAIN_IO == 0 &&
            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
             cpu_is_xsc3()) && pfn >= 0x100000 &&
            !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_supersections(addr, pfn, size, type);
    } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_sections(addr, pfn, size, type);
    } else
#endif
        err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
                                 __pgprot(type->prot_pte));

    if (err) {
        vunmap((void *)addr);
        return NULL;
    }

    flush_cache_vmap(addr, addr + size);
    return (void __iomem *) (offset + addr);
}
Пример #21
0
/**
 * @brief ドライバwrite関数
 *
 * @param[in] *file   : ファイル
 * @param[in] *buffer : 書き込みパラメータ
 * @param[in] count   : 書き込みサイズ
 * @param[in] *f_pos  : ファイルのRW位置
 * @retval write_size     : 正常終了 書き込みサイズ
 * @retval EFAULT         : 異常終了 アドレス不正
 * @retval ENOENT         : 異常終了 ファイルorディレクトリがない
 * @retval ENOSPC         : 異常終了 デバイスに空領域がない
 * @retval EINVAL         : 異常終了 引数なし
 * @retval D_SHDMA_RET_NG : 異常終了 エラー
 * @exception なし
 * @see       なし
 */
static ssize_t shdma_write(
    struct file    *file,
    const char __user    *buffer,
    size_t         count,
    loff_t         *f_pos )
{
	int ret = D_SHDMA_RET_OK;
	unsigned int i,j;
	int err = D_SHDMA_RET_OK;
	int result_chk = D_SHDMA_RET_OK;
	struct vm_area_struct *vma;
	unsigned long pfn = 0;
	ion_phys_addr_t src_phys = 0;
	unsigned long dst_phys = 0;
	size_t src_len;
	unsigned long trans_size = 0;
	unsigned long shdma_trans_num_rows = 0;
	unsigned long dma_trans_num_rows = 0;
	unsigned long dma_trans_num_rows_rem = 0;
	unsigned addr_offset = 0;
	struct ion_handle *shdma_src_handle;
	struct shdma_dmov_exec_cmdptr_cmd cmd[3];
	struct shdma_command_t shdma_cmd[D_SHDMA_CHANNEL_MAX];
	unsigned int id[D_SHDMA_CHANNEL_MAX] = { DMOV_SHDMA_CH1, DMOV_SHDMA_CH2, DMOV_SHDMA_CH3 };
	unsigned long width_yuv = 0;
	unsigned long height_y = 0;
	unsigned long height_uv = 0;
	unsigned long ysize_align = 0;
	unsigned long uvsize_align = 0;
	int ion_ret = 0;


	/** <ol><li>処理開始 */
	SHDMA_DEBUG_MSG_ENTER(0, 0, 0);

	/** <li> ドライバwriteセマフォ獲得*/
	down( &write_sem );

	/** <li>初期化処理 */
	/** <ol><li>引数NULLチェック */
	if( file == NULL || buffer == NULL || count <= 0 || f_pos == NULL ){
		printk("***ERROR: argument NULL    file = %p  buffer = %p  count = 0x%x  f_pos = %p\n", file, buffer, count, f_pos );
		up( &write_sem );
		return -EINVAL;
	}

	/** <li>上位からのパラメータをコピーする */
	if (copy_from_user(&tci, buffer, sizeof(tci))){
		printk("***ERROR: fault copy write data parameter.\n" );
		up( &write_sem );
		return -EFAULT;
	}

	/** <li>転送元、転送先アドレスNULLチェック */
	if( tci[0].dst_handle == NULL || tci[0].src_handle == NULL ){
		printk("***ERROR: fault transfer address NULL   src = %p  dst = %p\n", tci[0].src_handle, tci[0].dst_handle );
		up( &write_sem );
		return -EINVAL;
	}

	/** <li>転送幅、高さチェック */
	if(( tci[0].height < D_SHDMA_CHANNEL_MAX ) || ( tci[0].src_stride == 0  )){
		printk("***ERROR: argument ERROR   height = %d  width = %ld\n", tci[0].height, tci[0].src_stride );
		up( &write_sem );		
		return -EINVAL;
	}
	if(( tci[0].src_stride % D_SHDMA_ODD_CHECK ) != 0 ){	/* widthが奇数の場合はありえないためNGを返す */
		printk("***ERROR: argument ERROR width is odd number   width = %ld\n", tci[0].src_stride );
		up( &write_sem );
		return -EINVAL;
	}

	/** <li>内部変数の初期化をする */
	memset( &cmd, 0, sizeof(struct shdma_dmov_exec_cmdptr_cmd) * 3 );
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		memset( &shdma_cmd[i], 0, sizeof(struct shdma_command_t));
	}
	/** </ol>*/

	/** <li>物理アドレス取得 */
	/** <ol><li>転送元物理アドレス取得 */
	shdma_src_handle = (struct ion_handle *)tci[0].src_handle;
	ion_ret = ion_phys( shdma_src_handle->client, shdma_src_handle, &src_phys, &src_len);
	if( src_phys == 0 || src_len < 1 || ion_ret < 0 ){
		printk("***ERROR: get src_phys falut.\n");
		up( &write_sem );
		return -EFAULT;
	}

	/** <li>転送先物理アドレス取得 */
	vma = find_vma( current->mm, (unsigned int )tci[0].dst_handle );
	if( vma == NULL ){
		printk("***ERROR: get vma falut.\n");
		up( &write_sem );
		return -ENOENT;
	}
	follow_pfn( vma, (unsigned int)tci[0].dst_handle, &pfn );
	dst_phys = __pfn_to_phys( pfn );
	/** </ol> */

	/** <li>DMA転送用パラメータバッファ獲得 */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){	/** <ol><li>DMAチャネル分獲得する */
		/** <ol><li>DMA転送用パラメータ領域獲得 */
		shdma_cmd[i].cmd_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
		if( shdma_cmd[i].cmd_ptr == NULL ){
			printk("***ERROR: falut allocate buffer cmd_ptr  num = 0x%x .\n" , i);
			if( i != 0 ){
				for( j = 0; j < (i - 1); j++ ){
					kfree(shdma_cmd[j].cmd_ptr);
				}
			}
			up( &write_sem );
			return -ENOSPC;
		}
	}
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		/** <li>DMA転送用パラメータ先頭アドレス領域獲得 */
		shdma_cmd[i].cmd_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
		if( shdma_cmd[i].cmd_ptr_ptr == NULL ){
			printk("***ERROR: falut allocate buffer cmd_ptr_ptr  num = 0x%x .\n" , i);
			if( i != 0 ){
				for( j = 0; j < (i - 1); j++ ){
					kfree(shdma_cmd[j].cmd_ptr_ptr);
				}
			}
			for( j = 0; j < D_SHDMA_CHANNEL_MAX; j++ ){
				kfree(shdma_cmd[j].cmd_ptr);
			}
			up( &write_sem );
			return -ENOSPC;
		}
	}
	/** </ol></ol> */

	/** <li>転送サイズ計算 */
	/** <li>アライメント調整 */
	if(( tci[0].src_stride % D_SHDMA_ALIGN_128 ) != 0 ){	/*Y領域、UV領域幅アライメント調整*/
		width_yuv = ((( tci[0].src_stride /
				D_SHDMA_ALIGN_128 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_128 );		/*128バイトでアライメント*/
	} else {
		width_yuv = tci[0].src_stride;
	}

	if(( tci[0].height % D_SHDMA_ALIGN_32 ) != 0 ){		/*Y領域高さアライメント調整*/
		height_y = ((( tci[0].height /
				D_SHDMA_ALIGN_32 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_32 );		/*32バイトでアライメント*/
	} else {
		height_y = tci[0].height;
	}

	if((( tci[0].height / D_SHDMA_ALIGN_HEIGHT_UV ) %
			D_SHDMA_ALIGN_32 ) != 0 ){		/*UV領域高さアライメント調整*/
		height_uv = (((( tci[0].height /
				D_SHDMA_ALIGN_HEIGHT_UV ) /
				D_SHDMA_ALIGN_32 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_32 );		/*32バイトでアライメント*/
	} else {
		height_uv = tci[0].height / D_SHDMA_ALIGN_HEIGHT_UV;
	}

	if(( width_yuv * height_y ) % D_SHDMA_ALIGN_8192 ){	/*Y領域のアライメント調整*/
		ysize_align = ((( width_yuv * height_y /
				D_SHDMA_ALIGN_8192 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_8192 );		/*8Kバイトでアライメント*/
	} else {
		ysize_align = width_yuv * height_y;
	}

	if(( width_yuv * height_uv ) % D_SHDMA_ALIGN_8192 ){	/*YU領域のアライメント調整*/
		uvsize_align = ((( width_yuv * height_uv /
				D_SHDMA_ALIGN_8192 ) +
				D_SHDMA_ALIGN_ADJUST ) *
				D_SHDMA_ALIGN_8192 );		/*8Kバイトでアライメント*/
	} else {
		uvsize_align = width_yuv * height_uv;
	}

	shdma_trans_num_rows = (( ysize_align + uvsize_align ) /
					D_SHDMA_ALIGN_8192 );		/** <li>DMAbox転送回数はYUV領域を8Kで割った値*/
	trans_size = D_SHDMA_ALIGN_8192;				/** <li>DMA転送1boxサイズは8Kサイズを指定*/
	dma_trans_num_rows = shdma_trans_num_rows / D_SHDMA_CHANNEL_MAX;	/** <li>DMA1面あたりのbox転送回数算出 */
	dma_trans_num_rows_rem = shdma_trans_num_rows % D_SHDMA_CHANNEL_MAX;	/** <li>DMA1面あたりのbox転送回数の余り算出 */
	if( trans_size > D_SHDMA_TRANS_MAX_SIZE ){	/** <li>DMA転送1boxサイズが65535より大きい場合はハード制約で転送できないため、NGを返す */
		printk("***ERROR: Size over for DMA transfer.\n");
		for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
			kfree(shdma_cmd[i].cmd_ptr);
			kfree(shdma_cmd[i].cmd_ptr_ptr);
		}
		up( &write_sem );
		return -EINVAL;
	}
	/** </ol> */

	/** <li>DMA転送用パラメータ設定 */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){	/** <ol><li>DMAチャネル分設定する */
		if( i == D_SHDMA_CHANNEL_MAX - 1){	/** <ol><li>最後のDMAチャネルの場合転送box数の余りも転送する */
			dma_trans_num_rows += dma_trans_num_rows_rem;
		}
		shdma_cmd[i].cmd_ptr->cmd = CMD_PTR_LP | CMD_MODE_BOX;	/** <li>boxモード転送 */
		shdma_cmd[i].cmd_ptr->src_row_addr = (unsigned int)src_phys + addr_offset;	/** <li>転送元アドレス設定 */
		shdma_cmd[i].cmd_ptr->dst_row_addr = (unsigned int)dst_phys + addr_offset;	/** <li>転送先アドレス設定 */
		shdma_cmd[i].cmd_ptr->src_dst_len =			/** <li>1box転送サイズ設定 */
				(( trans_size & D_SHDMA_PARAM_MASK ) << D_SHDMA_SRC_PARAM_SHIFT ) |
				( trans_size & D_SHDMA_PARAM_MASK );
		shdma_cmd[i].cmd_ptr->num_rows =			/** <li>転送box数設定 */
				(( dma_trans_num_rows & D_SHDMA_PARAM_MASK ) << D_SHDMA_SRC_PARAM_SHIFT ) |
				( dma_trans_num_rows & D_SHDMA_PARAM_MASK );
		shdma_cmd[i].cmd_ptr->row_offset =			/** <li>転送オフセット設定 */
				(( trans_size & D_SHDMA_PARAM_MASK ) << D_SHDMA_SRC_PARAM_SHIFT ) |
				( trans_size & D_SHDMA_PARAM_MASK );
		/** <li>転送アドレスオフセット加算 */
		addr_offset += trans_size * dma_trans_num_rows;
	}
	/** </ol></ol> */

	/** <li>DMA転送用パラメータをマッピングする */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){	/** <ol><li>DMAチャネル分マッピングする */
		/** <ol><li>DMA転送用パラメータ領域の物理アドレスを獲得する */
		shdma_cmd[i].map_cmd = dma_map_single( NULL, shdma_cmd[i].cmd_ptr,
					sizeof(*shdma_cmd[i].cmd_ptr), DMA_TO_DEVICE );
		if( shdma_cmd[i].map_cmd == 0 ){
			printk("***ERROR: falut cmd_ptr mapping.  num = 0x%x\n", i);
			if( i != 0 ){
				for( j = 0; j < (i - 1); j++ ){
					dma_unmap_single( NULL, shdma_cmd[j].map_cmd,
						sizeof(*shdma_cmd[j].cmd_ptr), DMA_TO_DEVICE );
				}
			}
			for( j = 0; j < D_SHDMA_CHANNEL_MAX; j++ ){
				kfree(shdma_cmd[j].cmd_ptr_ptr);
				kfree(shdma_cmd[j].cmd_ptr);
			}
			up( &write_sem );
			return -EFAULT;
		}
		/** <li>DMA転送用パラメータの物理アドレスをDMA転送用パラメータ先頭領域に格納する */
		*shdma_cmd[i].cmd_ptr_ptr = CMD_PTR_ADDR(shdma_cmd[i].map_cmd) | CMD_PTR_LP;
	}
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		/** <li>DMA転送用パラメータ先頭領域の物理アドレスを獲得する */
		err = shdma_cmd[i].map_cmd_ptr = dma_map_single( NULL, shdma_cmd[i].cmd_ptr_ptr,
					sizeof(*shdma_cmd[i].cmd_ptr_ptr), DMA_TO_DEVICE );
		if( err == 0 ){
			printk("***ERROR: falut cmd_ptr_ptr mapping.  num = 0x%x\n", i);
			if( i != 0 ){
				for( j = 0; j < (i - 1); j++ ){
					dma_unmap_single( NULL, shdma_cmd[j].map_cmd_ptr,
						sizeof(*shdma_cmd[j].cmd_ptr_ptr), DMA_TO_DEVICE );
				}
			}
			for( j = 0; j < D_SHDMA_CHANNEL_MAX; j++ ){
				dma_unmap_single( NULL, shdma_cmd[j].map_cmd,
					sizeof(*shdma_cmd[j].cmd_ptr), DMA_TO_DEVICE );
				kfree(shdma_cmd[j].cmd_ptr_ptr);
				kfree(shdma_cmd[j].cmd_ptr);
			}
			up( &write_sem );
			return -EFAULT;
		}
	}
	/** </ol></ol> */

	/** <li>DMA転送構造体にパラメータを設定する */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){	/** <ol><li>DMAチャネル分設定する */
		cmd[i].dmov_cmd.cmdptr = DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(shdma_cmd[i].map_cmd_ptr);
		cmd[i].dmov_cmd.complete_func = shdma_complete_func;
		cmd[i].dmov_cmd.exec_func = NULL;
		cmd[i].id = id[i];
		cmd[i].result = 0;
	}
	/** </ol> */

	/** <li>DMA転送完了資源をチャネル数分に設定する */
	atomic_set( &atomic_shdma, D_SHDMA_CHANNEL_MAX );

	/** <li>DMA転送開始関数をコールする */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		msm_dmov_enqueue_cmd( cmd[i].id, &cmd[i].dmov_cmd );
	}

	/** <li>DMA転送完了資源が0以下になるまでWaitする */
	wait_event( wq, ( atomic_read( &atomic_shdma ) <= 0 ));

	/** <li>DMA転送結果を確認する*/
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++){	/** <ol><li>DMAチャネル分確認する */
		if( cmd[i].result != D_SHDMA_DMOV_RESULT_OK ){	/** <li>DMA転送結果がNGの場合、ログを出力する */
			result_chk = D_SHDMA_RET_NG;
			printk("***ERROR: dma id:%d result:0x%08x \n***flush: 0x%08x 0x%08x 0x%08x 0x%08x\n",
					id[i], cmd[i].result, cmd[i].err.flush[0],
					cmd[i].err.flush[1], cmd[i].err.flush[2], cmd[i].err.flush[3]);
		}
	}
	/** </ol>*/

	/** <li>獲得したメモリを解放する */
	for( i = 0; i < D_SHDMA_CHANNEL_MAX; i++ ){
		dma_unmap_single( NULL, (dma_addr_t)shdma_cmd[i].map_cmd_ptr,
					sizeof(*shdma_cmd[i].cmd_ptr_ptr), DMA_TO_DEVICE );
		dma_unmap_single( NULL, shdma_cmd[i].map_cmd,
					sizeof(*shdma_cmd[i].cmd_ptr), DMA_TO_DEVICE );
		kfree(shdma_cmd[i].cmd_ptr_ptr);
		kfree(shdma_cmd[i].cmd_ptr);
	}

	/** <li>DMA転送結果を返す */
	if( result_chk == 0 ){
		ret = count;
	} else {
		ret = result_chk;
	}

	/** <li>ドライバwriteセマフォ解放 */
	up( &write_sem );

	SHDMA_DEBUG_MSG_EXIT();
	/** <li>処理終了</ol>*/

	return ret;
}
Пример #22
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (pfn_valid(pfn)) {
		printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory.  This leads\n"
		       KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
		       KERN_WARNING "will fail in the next kernel release.  Please fix your driver.\n");
		WARN_ON(1);
	}

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
static int cma_info_show(struct seq_file *s, void *unused)
{
	struct cma *cma = dev_get_cma_area(NULL);
	unsigned long start = 0, set = 0, end = 0, sum = 0;
	int nr_per_order[32];
	int i, total = 0, order, order_max = 0;
	struct page *pg;
	phys_addr_t fm = __pfn_to_phys(cma->base_pfn);
	phys_addr_t to = __pfn_to_phys(cma->base_pfn + cma->count - 1);

	seq_printf(s, "CMA Region: pfn(0x%lx:0x%lx) phy(%pa:%pa)\n",
		cma->base_pfn, cma->base_pfn + cma->count - 1, &fm, &to);

	seq_printf(s, "\n( Un-Set    )           [ Set       ]\n");
	while (1) {
		set = find_next_bit(cma->bitmap, cma->count, start);
		if (set >= cma->count)
			break;
		end = find_next_zero_bit(cma->bitmap, cma->count, set);

		if (set > 0)
			seq_printf(s, "(0x%5lx:0x%5lx) %5ld ",
				cma->base_pfn + start, cma->base_pfn + set - 1,
				set - start);
		else
			seq_printf(s, "%16.s", "");

		seq_printf(s, "\t[0x%5lx:0x%5lx] %5ld\n", cma->base_pfn + set,
			cma->base_pfn + end - 1, end - set);

		start = end;
		sum += (end - set);
	}

	if (start < cma->count)
		seq_printf(s, "(0x%5lx:0x%5lx) %5ld\n",
			cma->base_pfn + start, cma->base_pfn + cma->count - 1,
			cma->count - start);

	seq_printf(s, "Total: %16ld%24ld%12ld(pages)\n",
		cma->count - sum, sum, cma->count);

	for (i = 0; i < 32; i++)
		nr_per_order[i] = 0;
	pg = pfn_to_page(cma->base_pfn);
	start = -1;
	for (i = 0; i < cma->count; i++, pg++) {
		if (!test_bit(i, cma->bitmap) && !page_count(pg)) {
			if (start == -1)
				start = i;
			end = i;

			if (i < (cma->count - 1))
				continue;
		}
		if (start != -1) {
			total += (end - start + 1);
			order = fls(end - start + 1) - 1;

			nr_per_order[order]++;
			start = -1;
			if (order_max < order)
				order_max = order;
		}
	}

	seq_printf(s, "\nIdle pages per order, total: %d\nOrder:", total);
	for (i = 0; i <= order_max; i++)
		seq_printf(s, "%6d ", i);

	seq_printf(s, "\nCount:");
	for (i = 0; i <= order_max; i++)
		seq_printf(s, "%6d ", nr_per_order[i]);
	seq_printf(s, "\n");

	return 0;
}