Esempio n. 1
0
static int pin_page(struct page *page, unsigned flags)
{
	unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
	int flush;

	if (pgfl)
		flush = 0;		/* already pinned */
	else if (PageHighMem(page))
		/* kmaps need flushing if we found an unpinned
		   highpage */
		flush = 1;
	else {
		void *pt = lowmem_page_address(page);
		unsigned long pfn = page_to_pfn(page);
		struct multicall_space mcs = __xen_mc_entry(0);

		flush = 0;

		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
					pfn_pte(pfn, PAGE_KERNEL_RO),
					flags);
	}

	return flush;
}
Esempio n. 2
0
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
#ifdef CONFIG_RALINK_SOC
	if (!PageHighMem(page)) {
		unsigned long addr = (unsigned long) page_address(page);

		if (pages_do_alias(addr, vmaddr & PAGE_MASK)) {
			if (page_mapped(page) && !Page_dcache_dirty(page)) {
				void *kaddr;

				kaddr = kmap_coherent(page, vmaddr);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_coherent();
			} else {
				flush_data_cache_page(addr);
				ClearPageDcacheDirty(page);
			}
		}
	} else {
		void *laddr = lowmem_page_address(page);

		if (pages_do_alias((unsigned long)laddr, vmaddr & PAGE_MASK)) {
			if (page_mapped(page) && !Page_dcache_dirty(page)) {
				void *kaddr;

				kaddr = kmap_coherent(page, vmaddr);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_coherent();
			} else {
				void *kaddr;

				kaddr = kmap_atomic(page, KM_PTE1);
				flush_data_cache_page((unsigned long)kaddr);
				kunmap_atomic(kaddr, KM_PTE1);
				ClearPageDcacheDirty(page);
			}
		}
	}
#else
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (page_mapped(page) && !Page_dcache_dirty(page)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
			flush_data_cache_page((unsigned long)kaddr);
			kunmap_coherent();
		} else
			flush_data_cache_page(addr);
	}
#endif
}
Esempio n. 3
0
static int unpin_page(struct page *page, unsigned flags)
{
	unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);

	if (pgfl && !PageHighMem(page)) {
		void *pt = lowmem_page_address(page);
		unsigned long pfn = page_to_pfn(page);
		struct multicall_space mcs = __xen_mc_entry(0);

		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
					pfn_pte(pfn, PAGE_KERNEL),
					flags);
	}

	return 0;		/* never need to flush on unpin */
}
Esempio n. 4
0
RMstatus xwcMemTranslateLogical(uint32_t    entity,
                                void       *desc,
                                void       *auxmap)
{
    T2DPD   *pd = desc;

#ifdef OBSOLETE
    uint8_t *ud;
    ud = pd->pair[entity].ptr;
#endif

    pd->pair[entity].ptr =
        (void *)virt_to_phys(lowmem_page_address(pd->pair[entity].ptr));

    return RM_OK;
}
Esempio n. 5
0
RMstatus xwcMemTranslateUserVirtual(uint32_t    entity,
                                    void       *desc,
                                    void       *auxmap)
{
    unsigned       i;
    linkEntry     *hwsgl;
    size_t         count;
    int            pageEst;
    T2DPD         *pd = desc;
    T2DESC_AUXMAP *pdmap = auxmap;

    /* Estimate the number of needed page pointers */
    pageEst = (((unsigned long)pd->pair[entity].ptr & ~PAGE_MASK) +
               pd->pair[entity].size + ~PAGE_MASK) >> PAGE_SHIFT;

    /* Allocate list of pointers to pages for this user buffer reference */
    pdmap->pair[entity].pages = vmalloc(pageEst * sizeof(pdmap->pair[entity].pages));
    if (pdmap->pair[entity].pages == NULL)
        return RM_NO_MEMORY;

    /* Lock this process' pages and map them. The descriptor pair pointer */
    /* still references the user's buffer at this point                   */
    down_read(&current->mm->mmap_sem);
    pdmap->pair[entity].pageCt =
        get_user_pages(current,
                       current->mm,
                       (unsigned long)pd->pair[entity].ptr,
                       pageEst,
                       WRITE, 1,
                       pdmap->pair[entity].pages,
                       NULL);
    up_read(&current->mm->mmap_sem);

    /* here for development, remove once stabilized */
    if (pageEst != pdmap->pair[entity].pageCt)
        printk("t23xwc: user page estimate = %d, actual = %d\n", pageEst, pdmap->pair[entity].pageCt);

    if (pdmap->pair[entity].pageCt > pageEst)
        panic("t23xwc - user pages mapped exceeds estimate\n");

    /* Needed user pages are now mapped. If data element fits in 1 page, then */
    /* we can just do a physical pointer, no scatterlist is needed. If it     */
    /* exceeds one page, we must have a scatterlist                           */

    if (pdmap->pair[entity].pageCt > 1) /* Does entry span pages? */
    {
        /* Allocate "hardware" scatterlist */
        hwsgl = kmalloc(pageEst * sizeof(linkEntry), GFP_KERNEL | GFP_DMA);
        if (hwsgl == NULL)
        {
            /* Out of kmalloc() space, gotta bail. Release mapped pages */
            for (i = 0; i < pdmap->pair[entity].pageCt; i++)
                page_cache_release(pdmap->pair[entity].pages[i]);

            /* Free allocated page list */
            vfree(pdmap->pair[entity].pages);

            return RM_NO_MEMORY;
        }

        count = pd->pair[entity].size;

        hwsgl[0].segAddr =
            (unsigned char *)virt_to_phys(lowmem_page_address(pdmap->pair[entity].pages[0]) +
                                          ((unsigned long)pd->pair[entity].ptr & ~PAGE_MASK));

        hwsgl[0].chainCtrl = 0;
        hwsgl[0].extAddr   = 0;

        if (pdmap->pair[entity].pageCt > 1)
        {
            hwsgl[0].segLen = PAGE_SIZE - ((unsigned long)pd->pair[entity].ptr & ~PAGE_MASK);
            count -= hwsgl[0].segLen;
            for (i = 1; i < pdmap->pair[entity].pageCt; i++)
            {
                hwsgl[i].segLen    = count < PAGE_SIZE ? count : PAGE_SIZE;
                hwsgl[i].segAddr   = (unsigned char *)
                                     virt_to_phys(lowmem_page_address(pdmap->pair[entity].pages[i]));
                hwsgl[i].extAddr   = 0;
                hwsgl[i].chainCtrl = 0;
                count -= PAGE_SIZE;
            }
        }
        else
            hwsgl[0].segLen = pd->pair[entity].size;

        /* mark the last entry in the Talitos scatterlist */
        hwsgl[pdmap->pair[entity].pageCt - 1].chainCtrl = LAST_ENTRY;

        /* Point to descriptor pair to the Talitos scatterlist */
        pd->pair[entity].ptr     = (unsigned char *)virt_to_phys(hwsgl);
        pd->pair[entity].extent |= JUMPTABLE;
    }
    else
        pd->pair[entity].ptr =
            (unsigned char *)virt_to_phys(lowmem_page_address(pdmap->pair[entity].pages[0]) +
                                          ((unsigned long)pd->pair[entity].ptr & ~PAGE_MASK));

    return RM_OK;
}