Esempio n. 1
0
/*
 * Prints chunks (making them with letters) for @nr_pages starting
 * at @start (virtual).
 */
USED static void print_chunks(void *start, int nr_pages)
{
    char chunks[1001], current='A';
    int order, count;
    chunk_head_t *head;
    unsigned long pfn_start = virt_to_pfn(start);
   
    memset(chunks, (int)'_', 1000);
    if(nr_pages > 1000) 
    {
        DEBUG("Can only pring 1000 pages. Increase buffer size.");
    }
    
    for(order=0; order < FREELIST_SIZE; order++)
    {
        head = free_head[order];
        while(!FREELIST_EMPTY(head))
        {
            for(count = 0; count < 1UL<< head->level; count++)
            {
                if(count + virt_to_pfn(head) - pfn_start < 1000)
                    chunks[count + virt_to_pfn(head) - pfn_start] = current;
            }
            head = head->next;
            current++;
        }
    }
    chunks[nr_pages] = '\0';
    printk("%s\n", chunks);
}
Esempio n. 2
0
int pfn_is_nosave(unsigned long pfn)
{
	unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
	unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);

	return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
}
Esempio n. 3
0
/*
 * Build the initial pagetable.
 */
static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
{
    unsigned long start_address, end_address;
    unsigned long pfn_to_map, pt_pfn = *start_pfn;
    static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
    pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
    unsigned long pt_mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
    unsigned long offset;
    int count = 0;
    int rc;

    /* Be conservative: even if we know there will be more pages already
       mapped, start the loop at the very beginning. */
    pfn_to_map = *start_pfn;

    if ( *max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START) )
    {
        minios_printk("WARNING: Mini-OS trying to use Xen virtual space. "
               "Truncating memory from %dMB to ",
               ((unsigned long)pfn_to_virt(*max_pfn) -
                (unsigned long)&_text)>>20);
        *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
        minios_printk("%dMB\n",
               ((unsigned long)pfn_to_virt(*max_pfn) - 
                (unsigned long)&_text)>>20);
    }
Esempio n. 4
0
/*
 * Build the initial pagetable.
 */
static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
{
    unsigned long start_address, end_address;
    unsigned long pfn_to_map, pt_pfn = *start_pfn;
    static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
    pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
    unsigned long pt_mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
    unsigned long offset;
    int count = 0;
    int rc;

    pfn_to_map = 
        (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;

    if ( *max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START) )
    {
        printk("WARNING: Mini-OS trying to use Xen virtual space. "
               "Truncating memory from %dMB to ",
               ((unsigned long)pfn_to_virt(*max_pfn) -
                (unsigned long)&_text)>>20);
        *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
        printk("%dMB\n",
               ((unsigned long)pfn_to_virt(*max_pfn) - 
                (unsigned long)&_text)>>20);
    }
Esempio n. 5
0
void free_pages(void *pointer, int order)
{
    chunk_head_t *freed_ch, *to_merge_ch;
    chunk_tail_t *freed_ct;
    unsigned long mask;
    
    /* First free the chunk */
    map_free(virt_to_pfn(pointer), 1UL << order);
    
    /* Create free chunk */
    freed_ch = (chunk_head_t *)pointer;
    freed_ct = (chunk_tail_t *)((char *)pointer + (1UL<<(order + PAGE_SHIFT)))-1;
    
    /* Now, possibly we can conseal chunks together */
    while(order < FREELIST_SIZE)
    {
        mask = 1UL << (order + PAGE_SHIFT);
        if((unsigned long)freed_ch & mask) 
        {
            to_merge_ch = (chunk_head_t *)((char *)freed_ch - mask);
            if(allocated_in_map(virt_to_pfn(to_merge_ch)) ||
                    to_merge_ch->level != order)
                break;
            
            /* Merge with predecessor */
            freed_ch = to_merge_ch;   
        }
        else 
        {
            to_merge_ch = (chunk_head_t *)((char *)freed_ch + mask);
            if(allocated_in_map(virt_to_pfn(to_merge_ch)) ||
                    to_merge_ch->level != order)
                break;
            
            /* Merge with successor */
            freed_ct = (chunk_tail_t *)((char *)to_merge_ch + mask) - 1;
        }
        
        /* We are commited to merging, unlink the chunk */
        *(to_merge_ch->pprev) = to_merge_ch->next;
        to_merge_ch->next->pprev = to_merge_ch->pprev;
        
        order++;
    }

    /* Link the new chunk */
    freed_ch->level = order;
    freed_ch->next  = free_head[order];
    freed_ch->pprev = &free_head[order];
    freed_ct->level = order;
    
    freed_ch->next->pprev = &freed_ch->next;
    free_head[order] = freed_ch;   
   
}
Esempio n. 6
0
void __init kasan_init(void)
{
	u64 kimg_shadow_start, kimg_shadow_end;
	u64 mod_shadow_start, mod_shadow_end;
	struct memblock_region *reg;
	int i;

	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
	kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));

	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);

	/*
	 * We are going to perform proper setup of shadow memory.
	 * At first we should unmap early shadow (clear_pgds() call below).
	 * However, instrumented code couldn't execute without shadow memory.
	 * tmp_pg_dir used to keep early shadow mapped until full shadow
	 * setup will be finished.
	 */
	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
	dsb(ishst);
	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));

	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
				    (void *)mod_shadow_start);
	kasan_populate_early_shadow((void *)kimg_shadow_end,
				    kasan_mem_to_shadow((void *)PAGE_OFFSET));

	if (kimg_shadow_start > mod_shadow_end)
		kasan_populate_early_shadow((void *)mod_shadow_end,
					    (void *)kimg_shadow_start);

	for_each_memblock(memory, reg) {
		void *start = (void *)__phys_to_virt(reg->base);
		void *end = (void *)__phys_to_virt(reg->base + reg->size);

		if (start >= end)
			break;

		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
				   (unsigned long)kasan_mem_to_shadow(end),
				   early_pfn_to_nid(virt_to_pfn(start)));
	}
Esempio n. 7
0
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev,
				     unsigned index,
				     void (*callback)(struct virtqueue *vq),
				     const char *name)
{
	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
	struct vring_virtqueue *vq;
	void *queue;
	unsigned num = VIRTIO_MMIO_QUEUE_NUM_MIN;

	vq = calloc(1, sizeof(*vq));
	queue = memalign(PAGE_SIZE, VIRTIO_MMIO_QUEUE_SIZE_MIN);
	if (!vq || !queue)
		return NULL;

	writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);

	assert(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX) >= num);

	if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN) != 0) {
		printf("%s: virtqueue %d already setup! base=%p\n",
				__func__, index, vm_dev->base);
		return NULL;
	}

	writel(num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
	writel(VIRTIO_MMIO_VRING_ALIGN,
			vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
	writel(virt_to_pfn(queue), vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);

	vring_init_virtqueue(vq, index, num, VIRTIO_MMIO_VRING_ALIGN,
			     vdev, queue, vm_notify, callback, name);

	return &vq->vq;
}
Esempio n. 8
0
/*
 * load_gdt for early boot, when the gdt is only mapped once
 */
static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
{
	unsigned long va = dtr->address;
	unsigned int size = dtr->size + 1;
	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
	unsigned long frames[pages];
	int f;

	/*
	 * A GDT can be up to 64k in size, which corresponds to 8192
	 * 8-byte entries, or 16 4k pages..
	 */

	BUG_ON(size > 65536);
	BUG_ON(va & ~PAGE_MASK);

	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
		pte_t pte;
		unsigned long pfn, mfn;

		pfn = virt_to_pfn(va);
		mfn = pfn_to_mfn(pfn);

		pte = pfn_pte(pfn, PAGE_KERNEL_RO);

		if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
			BUG();

		frames[f] = mfn;
	}

	if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
		BUG();
}
Esempio n. 9
0
/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
void __init paging_init(void)
{
	pgd_t * pg_dir;
	pte_t * pg_table;
	int i;
	unsigned long address;
	unsigned long next_pgtable;
	unsigned long bootmem_end;
	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
	unsigned long size;


#ifdef TEST_VERIFY_AREA
	wp_works_ok = 0;
#endif
	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
	memset(empty_zero_page, 0, PAGE_SIZE);

	address = PAGE_OFFSET;
	pg_dir = swapper_pg_dir;
	memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
	memset (kernel_pg_dir,  0, sizeof (kernel_pg_dir));

	size = num_pages * sizeof(pte_t);
	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);

	next_pgtable = (unsigned long)alloc_bootmem_pages(size);
	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;

	/* Map whole memory from PAGE_OFFSET (0x0E000000) */
	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;

	while (address < (unsigned long)high_memory) {
		pg_table = (pte_t *) __pa (next_pgtable);
		next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
		pgd_val(*pg_dir) = (unsigned long) pg_table;
		pg_dir++;

		/* now change pg_table to kernel virtual addresses */
		pg_table = (pte_t *) __va ((unsigned long) pg_table);
		for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
			if (address >= (unsigned long)high_memory)
				pte_val (pte) = 0;
			set_pte (pg_table, pte);
			address += PAGE_SIZE;
		}
	}

	mmu_emu_init(bootmem_end);

	current->mm = NULL;

	/* memory sizing is a hack stolen from motorola.c..  hope it works for us */
	zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;

	free_area_init(zones_size);

}
Esempio n. 10
0
/*
 * Prints allocation[0/1] for @nr_pages, starting at @start
 * address (virtual).
 */
USED static void print_allocation(void *start, int nr_pages)
{
    unsigned long pfn_start = virt_to_pfn(start);
    int count;
    for(count = 0; count < nr_pages; count++)
        if(allocated_in_map(pfn_start + count)) printk("1");
        else printk("0");
        
    printk("\n");        
}
Esempio n. 11
0
static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
{
	size_t size = vma->vm_end - vma->vm_start;

	if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
		return -EINVAL;

	if (remap_pfn_range(vma, vma->vm_start,
			    virt_to_pfn(xen_store_interface),
			    size, vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
Esempio n. 12
0
/*
 * ColdFire paging_init derived from sun3.
 */
void __init paging_init(void)
{
	pgd_t *pg_dir;
	pte_t *pg_table;
	unsigned long address, size;
	unsigned long next_pgtable, bootmem_end;
	unsigned long zones_size[MAX_NR_ZONES];
	enum zone_type zone;
	int i;

	empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
	memset((void *) empty_zero_page, 0, PAGE_SIZE);

	pg_dir = swapper_pg_dir;
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));

	size = num_pages * sizeof(pte_t);
	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
	next_pgtable = (unsigned long) alloc_bootmem_pages(size);

	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;

	address = PAGE_OFFSET;
	while (address < (unsigned long)high_memory) {
		pg_table = (pte_t *) next_pgtable;
		next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
		pgd_val(*pg_dir) = (unsigned long) pg_table;
		pg_dir++;

		/* now change pg_table to kernel virtual addresses */
		for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
			if (address >= (unsigned long) high_memory)
				pte_val(pte) = 0;

			set_pte(pg_table, pte);
			address += PAGE_SIZE;
		}
	}

	current->mm = NULL;

	for (zone = 0; zone < MAX_NR_ZONES; zone++)
		zones_size[zone] = 0x0;
	zones_size[ZONE_DMA] = num_pages;
	free_area_init(zones_size);
}
Esempio n. 13
0
void sanity_check(void)
{
    int x;
    chunk_head_t *head;

    for (x = 0; x < FREELIST_SIZE; x++) {
        for (head = free_head[x]; !FREELIST_EMPTY(head); head = head->next) {
            ASSERT(!allocated_in_map(virt_to_pfn(head)));
            if (head->next)
                ASSERT(head->next->pprev == &head->next);
        }
        if (free_head[x]) {
            ASSERT(free_head[x]->pprev == &free_head[x]);
        }
    }
}
Esempio n. 14
0
static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
					unsigned long end)
{
	pte_t *pte;
	unsigned long next;

	if (pmd_none(*pmd))
		pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);

	pte = pte_offset_kimg(pmd, addr);
	do {
		next = addr + PAGE_SIZE;
		set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
					PAGE_KERNEL));
	} while (pte++, addr = next, addr != end && pte_none(*pte));
}
static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
{
	size_t size = vma->vm_end - vma->vm_start;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
		return -EINVAL;

	if (remap_pfn_range(vma, vma->vm_start,
			    virt_to_pfn(xen_store_interface),
			    size, vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
Esempio n. 16
0
void
init_gnttab(void)
{
    struct gnttab_setup_table setup;
    unsigned long frames[NR_GRANT_FRAMES];
    int i;

#ifdef GNT_DEBUG
    memset(inuse, 1, sizeof(inuse));
#endif
    for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
        put_free_entry(i);

    if (!xen_feature(XENFEAT_auto_translated_physmap)) {
        setup.dom = DOMID_SELF;
        setup.nr_frames = NR_GRANT_FRAMES;
        set_xen_guest_handle(setup.frame_list, frames);

        HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
        gnttab_table = map_frames(frames, NR_GRANT_FRAMES);
    }
    else {
        struct xen_add_to_physmap xatp;
        i = NR_GRANT_FRAMES - 1;

        /* map_frames works differently if p2m is autotranslated,
         * in that gnttab_table are just mapped with vaddrs that are the same as
         * paddrs by mini-os itself, and provided to Xen for p2m mapping */
        gnttab_table = map_frames(NULL, NR_GRANT_FRAMES);

        for(i = NR_GRANT_FRAMES - 1; i >= 0; i--) {
            xatp.domid = DOMID_SELF;
            xatp.idx = i;
            xatp.space = XENMAPSPACE_grant_table;
            xatp.gpfn = 
                virt_to_pfn((unsigned long)gnttab_table + (i << PAGE_SHIFT));
            if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0) {
                printk("grant table add_to_physmap failed\n");
                break;
            }
        }
    }
    printk("gnttab_table mapped at %p.\n", gnttab_table);
}
Esempio n. 17
0
static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
{
	unsigned long pte;
	unsigned long j;
	pte_t ptep;

	j = *(volatile unsigned long *)kaddr;
	*(volatile unsigned long *)kaddr = j;

	ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
	pte = pte_val(ptep);
	if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
		sun3_put_pte(vaddr, pte);
		ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte;
	}

	return (vaddr + (kaddr & ~PAGE_MASK));

}
Esempio n. 18
0
int gnttable_init(void)
{
	int				dom0_id = 0;
	unsigned long	vaddr;
	unsigned long	mfn;
	
	vaddr = alloc_pages(0);
	mfn   = pfn_to_mfn(virt_to_pfn(vaddr));

//	printf("[gnttab_test_dom1] set grant table entry %d\n", GNTTAB_REF_NUM);
//	printf("[gnttab_test_dom1] vaddr = 0x%lx, mfn = 0x%lx\n", vaddr, mfn);
	gnttab_grant_foreign_access_ref(GNTTAB_REF_NUM, dom0_id, mfn, 1);

	shared_ring = (shared_ring_t *) vaddr;
	
	shared_ring->start = 0;
	shared_ring->end = 0;
	
	return 0;
}
Esempio n. 19
0
File: dvma.c Progetto: 274914765/C
inline unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
{
    unsigned long pte;
    unsigned long j;
    pte_t ptep;

    j = *(volatile unsigned long *)kaddr;
    *(volatile unsigned long *)kaddr = j;

    ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
    pte = pte_val(ptep);
//        printk("dvma_remap: addr %lx -> %lx pte %08lx len %x\n",
//               kaddr, vaddr, pte, len);
    if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
        sun3_put_pte(vaddr, pte);
        ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte;
    }

    return (vaddr + (kaddr & ~PAGE_MASK));

}
Esempio n. 20
0
static void
grant_ring(struct scsifilt *sf)
{
    DOMAIN_ID domid = sf->backend_domid;
    ULONG_PTR frame = virt_to_pfn(sf->ring_shared);
    int i;

    ungrant_ring(sf);

    for (i = 0; i < (1 << sf->ring_order); i++) {
        sf->ring_gref[i] = GnttabGrantForeignAccessCache(domid,
                                                         frame,
                                                         GRANT_MODE_RW,
                                                         sf->grant_cache);

        /* Because the grant cache always contains enough grefs to cover
           the ring itself. */
        XM_ASSERT(!is_null_GRANT_REF(sf->ring_gref[i]));

        frame++;
    }
}
Esempio n. 21
0
/*
 * Copies length bytes, starting at src_start into an new page,
 * perform cache maintentance, then maps it at the specified address low
 * address as executable.
 *
 * This is used by hibernate to copy the code it needs to execute when
 * overwriting the kernel text. This function generates a new set of page
 * tables, which it loads into ttbr0.
 *
 * Length is provided as we probably only want 4K of data, even on a 64K
 * page system.
 */
static int create_safe_exec_page(void *src_start, size_t length,
				 unsigned long dst_addr,
				 phys_addr_t *phys_dst_addr,
				 void *(*allocator)(gfp_t mask),
				 gfp_t mask)
{
	int rc = 0;
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;
	unsigned long dst = (unsigned long)allocator(mask);

	if (!dst) {
		rc = -ENOMEM;
		goto out;
	}

	memcpy((void *)dst, src_start, length);
	flush_icache_range(dst, dst + length);

	pgdp = pgd_offset_raw(allocator(mask), dst_addr);
	if (pgd_none(READ_ONCE(*pgdp))) {
		pudp = allocator(mask);
		if (!pudp) {
			rc = -ENOMEM;
			goto out;
		}
		pgd_populate(&init_mm, pgdp, pudp);
	}

	pudp = pud_offset(pgdp, dst_addr);
	if (pud_none(READ_ONCE(*pudp))) {
		pmdp = allocator(mask);
		if (!pmdp) {
			rc = -ENOMEM;
			goto out;
		}
		pud_populate(&init_mm, pudp, pmdp);
	}

	pmdp = pmd_offset(pudp, dst_addr);
	if (pmd_none(READ_ONCE(*pmdp))) {
		ptep = allocator(mask);
		if (!ptep) {
			rc = -ENOMEM;
			goto out;
		}
		pmd_populate_kernel(&init_mm, pmdp, ptep);
	}

	ptep = pte_offset_kernel(pmdp, dst_addr);
	set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));

	/*
	 * Load our new page tables. A strict BBM approach requires that we
	 * ensure that TLBs are free of any entries that may overlap with the
	 * global mappings we are about to install.
	 *
	 * For a real hibernate/resume cycle TTBR0 currently points to a zero
	 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
	 * runtime services), while for a userspace-driven test_resume cycle it
	 * points to userspace page tables (and we must point it at a zero page
	 * ourselves). Elsewhere we only (un)install the idmap with preemption
	 * disabled, so T0SZ should be as required regardless.
	 */
	cpu_set_reserved_ttbr0();
	local_flush_tlb_all();
	write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
	isb();

	*phys_dst_addr = virt_to_phys((void *)dst);

out:
	return rc;
}
Esempio n. 22
0
void __init kasan_init(void)
{
	u64 kimg_shadow_start, kimg_shadow_end;
	u64 mod_shadow_start, mod_shadow_end;
	struct memblock_region *reg;
	int i;

	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
	kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);

	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);

	/*
	 * We are going to perform proper setup of shadow memory.
	 * At first we should unmap early shadow (clear_pgds() call bellow).
	 * However, instrumented code couldn't execute without shadow memory.
	 * tmp_pg_dir used to keep early shadow mapped until full shadow
	 * setup will be finished.
	 */
	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
	dsb(ishst);
	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

	vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
			 pfn_to_nid(virt_to_pfn(_text)));

	/*
	 * vmemmap_populate() has populated the shadow region that covers the
	 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
	 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
	 * kasan_populate_zero_shadow() from replacing the page table entries
	 * (PMD or PTE) at the edges of the shadow region for the kernel
	 * image.
	 */
	kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
	kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);

	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
				   (void *)mod_shadow_start);
	kasan_populate_zero_shadow((void *)kimg_shadow_end,
				   kasan_mem_to_shadow((void *)PAGE_OFFSET));

	if (kimg_shadow_start > mod_shadow_end)
		kasan_populate_zero_shadow((void *)mod_shadow_end,
					   (void *)kimg_shadow_start);

	for_each_memblock(memory, reg) {
		void *start = (void *)__phys_to_virt(reg->base);
		void *end = (void *)__phys_to_virt(reg->base + reg->size);

		if (start >= end)
			break;

		/*
		 * end + 1 here is intentional. We check several shadow bytes in
		 * advance to slightly speed up fastpath. In some rare cases
		 * we could cross boundary of mapped shadow, so we just map
		 * some more here.
		 */
		vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
				(unsigned long)kasan_mem_to_shadow(end) + 1,
				pfn_to_nid(virt_to_pfn(start)));
	}
Esempio n. 23
0
static int
dom0_memory_reserve(uint32_t rsv_size)
{
	uint64_t pfn, vstart, vaddr;
	uint32_t i, num_block, size, allocated_size = 0;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
	dma_addr_t dma_handle;
#endif

	/* 2M as memory block */
	num_block = rsv_size / SIZE_PER_BLOCK;

	rsv_mm_info = vmalloc(sizeof(struct memblock_info) * num_block);
	if (!rsv_mm_info) {
		XEN_ERR("Unable to allocate device memory information\n");
		return -ENOMEM;
	}
	memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);

	/* try alloc size of 4M once */
	for (i = 0; i < num_block; i += 2) {
		vstart = (unsigned long)
			__get_free_pages(GFP_ATOMIC, MAX_NUM_ORDER);
		if (vstart == 0)
			break;

		dom0_dev.num_bigblock = i / 2 + 1;
		allocated_size =  SIZE_PER_BLOCK * (i + 2);

		/* size of 4M */
		size = DOM0_MEMBLOCK_SIZE * 2;

		vaddr = vstart;
		while (size > 0) {
			SetPageReserved(virt_to_page(vaddr));
			vaddr += PAGE_SIZE;
			size -= PAGE_SIZE;
		}

		pfn = virt_to_pfn(vstart);
		rsv_mm_info[i].pfn = pfn;
		rsv_mm_info[i].vir_addr = vstart;
		rsv_mm_info[i + 1].pfn =
				pfn + DOM0_MEMBLOCK_SIZE / PAGE_SIZE;
		rsv_mm_info[i + 1].vir_addr =
				vstart + DOM0_MEMBLOCK_SIZE;
	}

	/*if it failed to alloc 4M, and continue to alloc 2M once */
	for (; i < num_block; i++) {
		vstart = (unsigned long)
			__get_free_pages(GFP_ATOMIC, DOM0_CONTIG_NUM_ORDER);
		if (vstart == 0) {
			XEN_ERR("allocate memory fail.\n");
			dom0_memory_free(allocated_size);
			return -ENOMEM;
		}

		allocated_size += SIZE_PER_BLOCK;

		size = DOM0_MEMBLOCK_SIZE;
		vaddr = vstart;
		while (size > 0) {
			SetPageReserved(virt_to_page(vaddr));
			vaddr += PAGE_SIZE;
			size -= PAGE_SIZE;
		}
		pfn = virt_to_pfn(vstart);
		rsv_mm_info[i].pfn = pfn;
		rsv_mm_info[i].vir_addr = vstart;
	}

	sort_viraddr(rsv_mm_info, num_block);

	for (i = 0; i< num_block; i++) {

		/*
		 * This API is used to exchage MFN for getting a block of
		 * contiguous physical addresses, its maximum size is 2M.
		 */
	#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
		if (xen_create_contiguous_region(rsv_mm_info[i].vir_addr,
				DOM0_CONTIG_NUM_ORDER, 0) == 0) {
	#else
		if (xen_create_contiguous_region(rsv_mm_info[i].pfn * PAGE_SIZE,
				DOM0_CONTIG_NUM_ORDER, 0, &dma_handle) == 0) {
	#endif
			rsv_mm_info[i].exchange_flag = 1;
			rsv_mm_info[i].mfn =
				pfn_to_mfn(rsv_mm_info[i].pfn);
			rsv_mm_info[i].used = 0;
		} else {
			XEN_ERR("exchange memeory fail\n");
			rsv_mm_info[i].exchange_flag = 0;
			dom0_dev.fail_times++;
			if (dom0_dev.fail_times > MAX_EXCHANGE_FAIL_TIME) {
				dom0_memory_free(rsv_size);
				return  -EFAULT;
			}
		}
	}

	return 0;
}

static int
dom0_prepare_memsegs(struct memory_info *meminfo, struct dom0_mm_data *mm_data)
{
	uint32_t num_block;
	int idx;

	/* check if there is a free name buffer */
	memcpy(mm_data->name, meminfo->name, DOM0_NAME_MAX);
	mm_data->name[DOM0_NAME_MAX - 1] = '\0';
	idx = dom0_find_mempos();
	if (idx < 0)
		return -1;

	num_block = meminfo->size / SIZE_PER_BLOCK;
	/* find free memory and new memory segments*/
	find_free_memory(num_block, mm_data);
	find_memseg(num_block, mm_data);

	/* update private memory data */
	mm_data->refcnt++;
	mm_data->mem_size = meminfo->size;

	/* update global memory data */
	dom0_dev.mm_data[idx] = mm_data;
	dom0_dev.num_mem_ctx++;
	dom0_dev.used_memsize += mm_data->mem_size;

	return 0;
}
Esempio n. 24
0
void free_num_pages(void *pointer, unsigned long num_pages) {
    unsigned long first_page;

    first_page = virt_to_pfn(pointer);
    bitmap_free(first_page, num_pages);
}