Beispiel #1
0
/* Cleaning up the mapping when the module is unloaded is almost... too easy. */
static void unmap_switcher(void)
{
	unsigned int i;

	/* vunmap() undoes *both* map_vm_area() and __get_vm_area(). */
	vunmap(switcher_text_vma->addr);
	vunmap(switcher_stacks_vma->addr);
	/* Now we just need to free the pages we copied the switcher into */
	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
		__free_pages(lg_switcher_pages[i], 0);
	kfree(lg_switcher_pages);
}
Beispiel #2
0
static int __init init_vdso(void)
{
	struct mips_vdso *vdso;

	vdso_page = alloc_page(GFP_KERNEL);
	if (!vdso_page)
		panic("Cannot allocate vdso");

	vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
	if (!vdso)
		panic("Cannot map vdso");
	clear_page(vdso);

	install_trampoline(vdso->rt_signal_trampoline, __NR_rt_sigreturn);
#ifdef CONFIG_32BIT
	install_trampoline(vdso->signal_trampoline, __NR_sigreturn);
#else
	install_trampoline(vdso->n32_rt_signal_trampoline,
			   __NR_N32_rt_sigreturn);
	install_trampoline(vdso->o32_signal_trampoline, __NR_O32_sigreturn);
	install_trampoline(vdso->o32_rt_signal_trampoline,
			   __NR_O32_rt_sigreturn);
#endif

	vunmap(vdso);

	return 0;
}
Beispiel #3
0
/**
 * OS specific free function.
 */
DECLHIDDEN(void) rtR0MemFree(PRTMEMHDR pHdr)
{
    IPRT_LINUX_SAVE_EFL_AC();

    pHdr->u32Magic += 1;
    if (pHdr->fFlags & RTMEMHDR_FLAG_KMALLOC)
        kfree(pHdr);
#ifdef RTMEMALLOC_EXEC_HEAP
    else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_HEAP)
    {
        RTSpinlockAcquire(g_HeapExecSpinlock);
        RTHeapSimpleFree(g_HeapExec, pHdr);
        RTSpinlockRelease(g_HeapExecSpinlock);
    }
#endif
#ifdef RTMEMALLOC_EXEC_VM_AREA
    else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_VM_AREA)
    {
        PRTMEMLNXHDREX pHdrEx    = RT_FROM_MEMBER(pHdr, RTMEMLNXHDREX, Hdr);
        size_t         iPage     = pHdrEx->pVmArea->nr_pages;
        struct page  **papPages  = pHdrEx->pVmArea->pages;
        void          *pvMapping = pHdrEx->pVmArea->addr;

        vunmap(pvMapping);

        while (iPage-- > 0)
            __free_page(papPages[iPage]);
        kfree(papPages);
    }
#endif
    else
        vfree(pHdr);

    IPRT_LINUX_RESTORE_EFL_AC();
}
Beispiel #4
0
void __iounmap(volatile void __iomem *io_addr)
{
	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
	struct vm_struct *vm;

	read_lock(&vmlist_lock);
	for (vm = vmlist; vm; vm = vm->next) {
		if (vm->addr > addr)
			break;
		if (!(vm->flags & VM_IOREMAP))
			continue;
		/* If this is a static mapping we must leave it alone */
		if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
		    (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
			read_unlock(&vmlist_lock);
			return;
		}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
		/*
		 * If this is a section based mapping we need to handle it
		 * specially as the VM subsystem does not know how to handle
		 * such a beast.
		 */
		if ((vm->addr == addr) &&
		    (vm->flags & VM_ARM_SECTION_MAPPING)) {
			unmap_area_sections((unsigned long)vm->addr, vm->size);
			break;
		}
#endif
	}
	read_unlock(&vmlist_lock);

	vunmap(addr);
}
Beispiel #5
0
/*
 * process xsdt table and load tables with sig, or all if nil.
 * (XXX: should be able to search for sig, oemid, oemtblid)
 */
static int
acpixsdtload(char *sig)
{
	int i, l, t, unmap, found;
	uintptr_t dhpa;
	uint8_t *sdt;
	char tsig[5];

	found = 0;
	for(i = 0; i < xsdt->len; i += xsdt->asize){
		if(xsdt->asize == 8)
			dhpa = l64get(xsdt->p+i);
		else
			dhpa = l32get(xsdt->p+i);
		if((sdt = sdtmap(dhpa, &l, 1)) == nil)
			continue;
		unmap = 1;
		memmove(tsig, sdt, 4);
		tsig[4] = 0;
		if(sig == nil || strcmp(sig, tsig) == 0){
			DBG("acpi: %s addr %#p\n", tsig, sdt);
			for(t = 0; t < nelem(ptables); t++)
				if(strcmp(tsig, ptables[t].sig) == 0){
					dumptable(tsig, sdt, l);
					unmap = ptables[t].f(sdt, l) == nil;
					found = 1;
					break;
				}
		}
		if(unmap)
			vunmap(sdt, l);
	}
	return found;
}
Beispiel #6
0
int memory_poke_kernel_address(const void *addr, word value)
{
    struct page *page;
    void *page_addr;

    if ((word)addr & (sizeof(word) - 1)) {
		ERROR(-ERROR_POINT);
    }

    if (__module_address((unsigned long)addr) == NULL) {
        page = virt_to_page(addr);
    } else {
        page = vmalloc_to_page(addr);
    }
    if (!page) {
		ERROR(-ERROR_POINT);
    }

    page_addr = vmap(&page, 1, VM_MAP, PAGE_KERNEL);
    if (!page_addr) {
		ERROR(-ERROR_MEM);
    }
    *(word *)(page_addr + ((word)addr & (PAGE_SIZE - 1))) = value;
    vunmap(page_addr);
    return 0;
}
Beispiel #7
0
void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
{
	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
		return;

	vunmap(buf->direct.buf);
}
Beispiel #8
0
int videobuf_dma_free(struct videobuf_dmabuf *dma)
{
	int i;
	MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
	BUG_ON(dma->sglen);

	if (dma->pages) {
		for (i = 0; i < dma->nr_pages; i++)
			put_page(dma->pages[i]);
		kfree(dma->pages);
		dma->pages = NULL;
	}

	if (dma->dma_addr) {
		for (i = 0; i < dma->nr_pages; i++) {
			void *addr;

			addr = page_address(dma->vaddr_pages[i]);
			dma_free_coherent(dma->dev, PAGE_SIZE, addr,
					  dma->dma_addr[i]);
		}
		kfree(dma->dma_addr);
		dma->dma_addr = NULL;
		kfree(dma->vaddr_pages);
		dma->vaddr_pages = NULL;
		vunmap(dma->vaddr);
		dma->vaddr = NULL;
	}

	if (dma->bus_addr)
		dma->bus_addr = 0;
	dma->direction = DMA_NONE;

	return 0;
}
Beispiel #9
0
void __iounmap(volatile void __iomem *io_addr)
{
	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
#ifndef CONFIG_SMP
	struct vm_struct *vm;

	/*
	 * If this is a section based mapping we need to handle it
	 * specially as the VM subsystem does not know how to handle
	 * such a beast.
	 */
	read_lock(&vmlist_lock);
	for (vm = vmlist; vm; vm = vm->next) {
		if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) {
			if (vm->flags & VM_ARM_SECTION_MAPPING) {
				unmap_area_sections((unsigned long)vm->addr,
						    vm->size);
			}
			break;
		}
	}
	read_unlock(&vmlist_lock);
#endif

	vunmap(addr);
}
Beispiel #10
0
int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
{
	struct snd_sg_buf *sgbuf = dmab->private_data;
	struct snd_dma_buffer tmpb;
	int i;

	if (! sgbuf)
		return -EINVAL;

	tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
	tmpb.dev.dev = sgbuf->dev;
	for (i = 0; i < sgbuf->pages; i++) {
		tmpb.area = sgbuf->table[i].buf;
		tmpb.addr = sgbuf->table[i].addr;
		tmpb.bytes = PAGE_SIZE;
		snd_dma_free_pages(&tmpb);
	}
	if (dmab->area)
		vunmap(dmab->area);
	dmab->area = NULL;

	kfree(sgbuf->table);
	kfree(sgbuf->page_table);
	kfree(sgbuf);
	dmab->private_data = NULL;
	
	return 0;
}
Beispiel #11
0
static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);

	vunmap(vaddr);
	vgem_unpin_pages(bo);
}
void __iounmap(volatile void __iomem *io_addr)
{
    void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
    struct vm_struct *vm;

    read_lock(&vmlist_lock);
    for (vm = vmlist; vm; vm = vm->next) {
        if (vm->addr > addr)
            break;
        if (!(vm->flags & VM_IOREMAP))
            continue;

        if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
                (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
            read_unlock(&vmlist_lock);
            return;
        }
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
        if ((vm->addr == addr) &&
                (vm->flags & VM_ARM_SECTION_MAPPING)) {
            unmap_area_sections((unsigned long)vm->addr, vm->size);
            break;
        }
#endif
    }
    read_unlock(&vmlist_lock);

    vunmap(addr);
}
Beispiel #13
0
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
	const void *caller)
{
	struct vm_struct *area;
	unsigned long addr;

	/*
	 * DMA allocation can be mapped to user space, so lets
	 * set VM_USERMAP flags too.
	 */
	area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
				  caller);
	if (!area)
		return NULL;
	addr = (unsigned long)area->addr;
	area->phys_addr = __pfn_to_phys(page_to_pfn(page));
#ifdef CONFIG_L4
	area->phys_addr = virt_to_phys((void *)(page_to_pfn(page) << PAGE_SHIFT));
#endif

	if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
		vunmap((void *)addr);
		return NULL;
	}

	l4x_map_pages(addr, page_to_pfn(page) << PAGE_SHIFT, size);

	return (void *)addr;
}
Beispiel #14
0
void mipi_lli_remove_driver(struct mipi_lli *lli)
{
	free_irq(lli->irq_sig, lli);
	vunmap(lli->shdmem_addr);

	g_lli = NULL;
}
Beispiel #15
0
void udl_gem_vunmap(struct udl_gem_object *obj)
{
	if (obj->vmapping)
		vunmap(obj->vmapping);

	udl_gem_put_pages(obj);
}
static void lgx_free_buffer(struct inno_lgx *lgx)
{
	//int i;
	//struct page *page = NULL;
	struct inno_buffer *inno_buf = &lgx->inno_buffer;

	/*
	 * vunmap will do TLB flush for us.
	 */
	down(&inno_buf->sem);
#if 0                       //xingyu buffer issue
	vunmap(inno_buf->vaddr);
	inno_buf->vaddr = NULL;

	for (i = 0; i < inno_buf->page_num; i++) {
		page = inno_buf->pages[i];
		//ClearPageReserved(page);
		__free_page(page);
	}

	kfree(inno_buf->pages);
#else
	if(inno_buf->vaddr !=NULL){
#ifndef _buffer_global                                                      // buffer alloc modify xingyu 0714
		kfree(inno_buf->vaddr );
#endif
		inno_buf->vaddr  =NULL;
		inno_buf->start =NULL;
		inno_buf->bufsize = 0;
	}
#endif
	up(&inno_buf->sem);

	memset(inno_buf, 0, sizeof(struct inno_buffer));
}
Beispiel #17
0
void spu_free_lscsa(struct spu_state *csa)
{
	unsigned char *p;
	int i;

	if (!csa->use_big_pages) {
		spu_free_lscsa_std(csa);
		return;
	}
	csa->use_big_pages = 0;

	if (csa->lscsa == NULL)
		goto free_pages;

	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
		ClearPageReserved(vmalloc_to_page(p));

	vunmap(csa->lscsa);
	csa->lscsa = NULL;

 free_pages:

	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
		if (csa->lscsa_pages[i])
			__free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
}
void __iounmap(volatile void __iomem *io_addr)
{
	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
#ifndef CONFIG_SMP
	struct vm_struct **p, *tmp;

	/*
	 * If this is a section based mapping we need to handle it
	 * specially as the VM subsystem does not know how to handle
	 * such a beast. We need the lock here b/c we need to clear
	 * all the mappings before the area can be reclaimed
	 * by someone else.
	 */
	write_lock(&vmlist_lock);
	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
		if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
			if (tmp->flags & VM_ARM_SECTION_MAPPING) {
				unmap_area_sections((unsigned long)tmp->addr,
						    tmp->size);
			}
			break;
		}
	}
	write_unlock(&vmlist_lock);
#endif

	vunmap(addr);
}
int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
{
	struct snd_sg_buf *sgbuf = dmab->private_data;
	struct snd_dma_buffer tmpb;
	int i;

	if (! sgbuf)
		return -EINVAL;

	if (dmab->area)
		vunmap(dmab->area);
	dmab->area = NULL;

	tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
	tmpb.dev.dev = sgbuf->dev;
	for (i = 0; i < sgbuf->pages; i++) {
		if (!(sgbuf->table[i].addr & ~PAGE_MASK))
			continue; /* continuous pages */
		tmpb.area = sgbuf->table[i].buf;
		tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
		tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
		snd_dma_free_pages(&tmpb);
	}

	kfree(sgbuf->table);
	kfree(sgbuf->page_table);
	kfree(sgbuf);
	dmab->private_data = NULL;

	return 0;
}
Beispiel #20
0
static ssize_t isert_read(struct file *filp, char __user *buf, size_t count,
			  loff_t *f_pos)
{
	struct isert_conn_dev *dev = filp->private_data;
	size_t to_read;

	if (dev->state == CS_DISCONNECTED)
		return -EPIPE;

	if (will_read_block(dev)) {
		int ret;
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		ret = wait_event_freezable(dev->waitqueue,
			!will_read_block(dev));
		if (ret < 0)
			return ret;
	}

	to_read = min(count, dev->read_len);
	if (copy_to_user(buf, dev->read_buf, to_read))
		return -EFAULT;

	dev->read_len -= to_read;
	dev->read_buf += to_read;

	switch (dev->state) {
	case CS_REQ_BHS:
		if (dev->read_len == 0) {
			dev->read_len = dev->login_req->bufflen;
			dev->sg_virt = isert_vmap_sg(dev->pages,
						     dev->login_req->sg,
						     dev->login_req->sg_cnt);
			if (!dev->sg_virt)
				return -ENOMEM;
			dev->read_buf = dev->sg_virt + ISER_HDRS_SZ;
			dev->state = CS_REQ_DATA;
		}
		break;

	case CS_REQ_DATA:
		if (dev->read_len == 0) {
			vunmap(dev->sg_virt);
			dev->sg_virt = NULL;

			spin_lock(&dev->pdu_lock);
			dev->login_req = NULL;
			dev->state = CS_REQ_FINISHED;
			spin_unlock(&dev->pdu_lock);
		}
		break;

	default:
		PRINT_ERROR("Invalid state in %s (%d)\n", __func__,
			    dev->state);
		to_read = 0;
	}

	return to_read;
}
Beispiel #21
0
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{
	unsigned long last_addr, addr;
	unsigned long offset = phys_addr & ~PAGE_MASK;
	struct vm_struct *area;

	pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_READ|_PAGE_WRITE
					|(__HEXAGON_C_DEV << 6));

	last_addr = phys_addr + size - 1;

	
	if (!size || (last_addr < phys_addr))
		return NULL;

	
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area(size, VM_IOREMAP);
	addr = (unsigned long)area->addr;

	if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
		vunmap((void *)addr);
		return NULL;
	}

	return (void __iomem *) (offset + addr);
}
Beispiel #22
0
void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
	struct vm_struct * area;
	unsigned long offset;
	phys_t last_addr;
	void * addr;

	phys_addr = fixup_bigphys_addr(phys_addr, size);

	/* Don't allow wraparound or zero size */
	last_addr = phys_addr + size - 1;
	if (!size || last_addr < phys_addr)
		return NULL;
#if !defined(CONFIG_BRCM_UPPER_768MB)
	/*
	 * Map uncached objects in the low 512mb of address space using KSEG1,
	 * otherwise map using page tables.
	 */
	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
	    flags == _CACHE_UNCACHED)
		return (void __iomem *) CKSEG1ADDR(phys_addr);
#endif

	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
		char *t_addr, *t_end;
		struct page *page;

		t_addr = __va(phys_addr);
		t_end = t_addr + (size - 1);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
				return NULL;
	}

	/*
	 * Mappings have to be page-aligned
	 */
	offset = phys_addr & ~PAGE_MASK;
	phys_addr &= PAGE_MASK;
	size = PAGE_ALIGN(last_addr + 1) - phys_addr;

	/*
	 * Ok, go for it..
	 */
	area = get_vm_area(size, VM_IOREMAP);
	if (!area)
		return NULL;
	addr = area->addr;
	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
		vunmap(addr);
		return NULL;
	}

	return (void __iomem *) (offset + (char *)addr);
}
Beispiel #23
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
#ifndef CONFIG_SQUASHFS_DEBUGGER_AUTO_DIAGNOSE
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
static void kvm_del_vq(struct virtqueue *vq)
{
	struct kvm_vqinfo *vqi = vq->priv;

	vring_del_virtqueue(vq);
	vunmap(vqi->pages);
	kfree(vqi);
}
Beispiel #25
0
void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
{
	if (buf->direct.buf != NULL || buf->nbufs == 1)
		return;

	vunmap(buf->direct.buf);
	buf->direct.buf = NULL;
}
int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
    const size_t cPages = cb >> PAGE_SHIFT;
    struct page *pDummyPage;
    struct page **papPages;

    /* check for unsupported stuff. */
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * Allocate a dummy page and create a page pointer array for vmap such that
     * the dummy page is mapped all over the reserved area.
     */
    pDummyPage = alloc_page(GFP_HIGHUSER);
    if (!pDummyPage)
        return VERR_NO_MEMORY;
    papPages = RTMemAlloc(sizeof(*papPages) * cPages);
    if (papPages)
    {
        void *pv;
        size_t iPage = cPages;
        while (iPage-- > 0)
            papPages[iPage] = pDummyPage;
# ifdef VM_MAP
        pv = vmap(papPages, cPages, VM_MAP, PAGE_KERNEL_RO);
# else
        pv = vmap(papPages, cPages, VM_ALLOC, PAGE_KERNEL_RO);
# endif
        RTMemFree(papPages);
        if (pv)
        {
            PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
            if (pMemLnx)
            {
                pMemLnx->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
                pMemLnx->cPages = 1;
                pMemLnx->apPages[0] = pDummyPage;
                *ppMem = &pMemLnx->Core;
                return VINF_SUCCESS;
            }
            vunmap(pv);
        }
    }
    __free_page(pDummyPage);
    return VERR_NO_MEMORY;

#else   /* < 2.4.22 */
    /*
     * Could probably use ioremap here, but the caller is in a better position than us
     * to select some safe physical memory.
     */
    return VERR_NOT_SUPPORTED;
#endif
}
Beispiel #27
0
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, unsigned long attrs)
{
	if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) {
		void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));

		vunmap(vaddr);
		dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
	}
}
Beispiel #28
0
void msm_gem_vunmap(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
		return;

	vunmap(msm_obj->vaddr);
	msm_obj->vaddr = NULL;
}
Beispiel #29
0
static void*
axpdealloc(Ctlr* ctlr)
{
	int i;

	for(i = 0; i < 16; i++){
		if(ctlr->cc[i].name != nil)
			free(ctlr->cc[i].name);
	}
	if(ctlr->reg != nil)
		vunmap(ctlr->reg, ctlr->pcidev->mem[0].size);
	if(ctlr->mem != nil)
		vunmap(ctlr->mem, ctlr->pcidev->mem[2].size);
	if(ctlr->name != nil)
		free(ctlr->name);
	free(ctlr);

	return nil;
}
Beispiel #30
0
void udl_gem_vunmap(struct udl_gem_object *obj)
{
	if (obj->base.import_attach) {
		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
		return;
	}

	vunmap(obj->vmapping);

	udl_gem_put_pages(obj);
}