コード例 #1
0
ファイル: x86_xpmap.c プロジェクト: yazshel/netbsd-kernel
vaddr_t
xen_pmap_bootstrap(void)
{
	int count, oldcount;
	long mapsize;
	vaddr_t bootstrap_tables, init_tables;

	memset(xpq_idx_array, 0, sizeof xpq_idx_array);

	xpmap_phys_to_machine_mapping =
	    (unsigned long *)xen_start_info.mfn_list;
	init_tables = xen_start_info.pt_base;
	__PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));

	/* Space after Xen boostrap tables should be free */
	bootstrap_tables = xen_start_info.pt_base +
		(xen_start_info.nr_pt_frames * PAGE_SIZE);

	/*
	 * Calculate how many space we need
	 * first everything mapped before the Xen bootstrap tables
	 */
	mapsize = init_tables - KERNTEXTOFF;
	/* after the tables we'll have:
	 *  - UAREA
	 *  - dummy user PGD (x86_64)
	 *  - HYPERVISOR_shared_info
	 *  - early_zerop
	 *  - ISA I/O mem (if needed)
	 */
	mapsize += UPAGES * NBPG;
#ifdef __x86_64__
	mapsize += NBPG;
#endif
	mapsize += NBPG;
	mapsize += NBPG;

#ifdef DOM0OPS
	if (xendomain_is_dom0()) {
		/* space for ISA I/O mem */
		mapsize += IOM_SIZE;
	}
#endif
	/* at this point mapsize doens't include the table size */

#ifdef __x86_64__
	count = TABLE_L2_ENTRIES;
#else
	count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
#endif /* __x86_64__ */
	
	/* now compute how many L2 pages we need exactly */
	XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
	while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
	    ((long)count << L2_SHIFT) + KERNBASE) {
		count++;
	}
#ifndef __x86_64__
	/*
	 * one more L2 page: we'll alocate several pages after kva_start
	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
	 * counted here. It's not a big issue to allocate one more L2 as
	 * pmap_growkernel() will be called anyway.
	 */
	count++;
	nkptp[1] = count;
#endif

	/*
	 * install bootstrap pages. We may need more L2 pages than will
	 * have the final table here, as it's installed after the final table
	 */
	oldcount = count;

bootstrap_again:
	XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
	/* 
	 * Xen space we'll reclaim may not be enough for our new page tables,
	 * move bootstrap tables if necessary
	 */
	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
		bootstrap_tables = init_tables +
					((count + l2_4_count) * PAGE_SIZE);
	/* make sure we have enough to map the bootstrap_tables */
	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > 
	    ((long)oldcount << L2_SHIFT) + KERNBASE) {
		oldcount++;
		goto bootstrap_again;
	}

	/* Create temporary tables */
	xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
		xen_start_info.nr_pt_frames, oldcount, 0);

	/* Create final tables */
	xen_bootstrap_tables(bootstrap_tables, init_tables,
	    oldcount + l2_4_count, count, 1);

	/* zero out free space after tables */
	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
	    (UPAGES + 1) * NBPG);

	/* Finally, flush TLB. */
	xpq_queue_tlb_flush();

	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
}
コード例 #2
0
ファイル: x86_xpmap.c プロジェクト: ryo/netbsd-src
/*
 * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page
 * tables.
 */
vaddr_t
xen_locore(void)
{
	size_t count, oldcount, mapsize;
	vaddr_t bootstrap_tables, init_tables;

	xen_init_features();

	memset(xpq_idx_array, 0, sizeof(xpq_idx_array));

	xpmap_phys_to_machine_mapping =
	    (unsigned long *)xen_start_info.mfn_list;

	/* Space after Xen boostrap tables should be free */
	init_tables = xen_start_info.pt_base;
	bootstrap_tables = init_tables +
	    (xen_start_info.nr_pt_frames * PAGE_SIZE);

	/*
	 * Calculate how much space we need. First, everything mapped before
	 * the Xen bootstrap tables.
	 */
	mapsize = init_tables - KERNTEXTOFF;
	/* after the tables we'll have:
	 *  - UAREA
	 *  - dummy user PGD (x86_64)
	 *  - HYPERVISOR_shared_info
	 *  - early_zerop
	 *  - ISA I/O mem (if needed)
	 */
	mapsize += UPAGES * PAGE_SIZE;
#ifdef __x86_64__
	mapsize += PAGE_SIZE;
#endif
	mapsize += PAGE_SIZE;
	mapsize += PAGE_SIZE;
#ifdef DOM0OPS
	if (xendomain_is_dom0()) {
		mapsize += IOM_SIZE;
	}
#endif

	/*
	 * At this point, mapsize doesn't include the table size.
	 */
#ifdef __x86_64__
	count = TABLE_L2_ENTRIES;
#else
	count = (mapsize + (NBPD_L2 - 1)) >> L2_SHIFT;
#endif

	/*
	 * Now compute how many L2 pages we need exactly. This is useful only
	 * on i386, since the initial count for amd64 is already enough.
	 */
	while (KERNTEXTOFF + mapsize + (count + l2_4_count) * PAGE_SIZE >
	    KERNBASE + (count << L2_SHIFT)) {
		count++;
	}

#ifndef __x86_64__
	/*
	 * One more L2 page: we'll allocate several pages after kva_start
	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
	 * counted here. It's not a big issue to allocate one more L2 as
	 * pmap_growkernel() will be called anyway.
	 */
	count++;
	nkptp[1] = count;
#endif

	/*
	 * Install bootstrap pages. We may need more L2 pages than will
	 * have the final table here, as it's installed after the final table.
	 */
	oldcount = count;

bootstrap_again:

	/* 
	 * Xen space we'll reclaim may not be enough for our new page tables,
	 * move bootstrap tables if necessary.
	 */
	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
		bootstrap_tables = init_tables +
		    ((count + l2_4_count) * PAGE_SIZE);

	/*
	 * Make sure the number of L2 pages we have is enough to map everything
	 * from KERNBASE to the bootstrap tables themselves.
	 */
	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) > 
	    KERNBASE + (oldcount << L2_SHIFT)) {
		oldcount++;
		goto bootstrap_again;
	}

	/* Create temporary tables */
	xen_bootstrap_tables(init_tables, bootstrap_tables,
	    xen_start_info.nr_pt_frames, oldcount, false);

	/* Create final tables */
	xen_bootstrap_tables(bootstrap_tables, init_tables,
	    oldcount + l2_4_count, count, true);

	/* Zero out free space after tables */
	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
	    (UPAGES + 1) * PAGE_SIZE);

	/* Finally, flush TLB. */
	xpq_queue_tlb_flush();

	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
}