Esempio n. 1
0
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
    struct xen_add_to_physmap xatp;
    unsigned int i = end_idx;

    /*
     * Loop backwards, so that the first hypercall has the largest index,
     * ensuring that the table will grow only once.
     */
    do {
        xatp.domid = DOMID_SELF;
        xatp.idx = i;
        xatp.space = XENMAPSPACE_grant_table;
        xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
        if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
            panic("HYPERVISOR_memory_op failed to map gnttab");
    } while (i-- > start_idx);

    if (shared == NULL) {
        vm_offset_t area;

        area = kmem_alloc_nofault(kernel_map,
                                  PAGE_SIZE * max_nr_grant_frames());
        KASSERT(area, ("can't allocate VM space for grant table"));
        shared = (grant_entry_t *)area;
    }

    for (i = start_idx; i <= end_idx; i++) {
        pmap_kenter((vm_offset_t) shared + i * PAGE_SIZE,
                    resume_frames + i * PAGE_SIZE);
    }

    return (0);
}
Esempio n. 2
0
int
s3c2xx0_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
               int flag, bus_space_handle_t * bshp)
{
    u_long startpa, endpa, pa;
    vm_offset_t va;
    pt_entry_t *pte;
    const struct pmap_devmap *pd;

    if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
        /* Device was statically mapped. */
        *bshp = pd->pd_va + (bpa - pd->pd_pa);
        return 0;
    }

    startpa = trunc_page(bpa);
    endpa = round_page(bpa + size);

    va = kmem_alloc_nofault(kernel_map, endpa - startpa);
    if (!va)
        return (ENOMEM);

    *bshp = (bus_space_handle_t) (va + (bpa - startpa));

    for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
        pmap_kenter(va, pa);
        pte = vtopte(va);
        if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0)
            *pte &= ~L2_S_CACHE_MASK;
    }
    return (0);
}
Esempio n. 3
0
/*
 * Attach - find resources and talk to Xen.
 */
static int
xenpci_attach(device_t dev)
{
        int error;
	struct xenpci_softc *scp = device_get_softc(dev);
	struct xen_add_to_physmap xatp;
	vm_offset_t shared_va;

	error = xenpci_allocate_resources(dev);
	if (error)
		goto errexit;

	scp->phys_next = rman_get_start(scp->res_memory);

	error = xenpci_init_hypercall_stubs(dev, scp);
	if (error)
		goto errexit;

	setup_xen_features();

	xenpci_alloc_space_int(scp, PAGE_SIZE, &shared_info_pa); 

	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
	xatp.gpfn = shared_info_pa >> PAGE_SHIFT;
	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
		panic("HYPERVISOR_memory_op failed");

	shared_va = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
	pmap_kenter(shared_va, shared_info_pa);
	HYPERVISOR_shared_info = (void *) shared_va;

	/*
	 * Hook the irq up to evtchn
	 */
	xenpci_irq_init(dev, scp);
	xenpci_set_callback(dev);

	return (bus_generic_attach(dev));

errexit:
	/*
	 * Undo anything we may have done.
	 */
	xenpci_deallocate_resources(dev);
        return (error);
}
Esempio n. 4
0
Mapping
OS_MapPageHandle(PageHandle handle)     // IN
{
#if __FreeBSD_version < 1000000
   vm_offset_t res = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
#else
   vm_offset_t res = kva_alloc(PAGE_SIZE);
#endif

   vm_page_t page = (vm_page_t)handle;

   if (!res) {
      return MAPPING_INVALID;
   }

   pmap_qenter(res, &page, 1);

   return (Mapping)res;
}
Esempio n. 5
0
static int
gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
    struct gnttab_setup_table setup;
    u_long *frames;

    unsigned int nr_gframes = end_idx + 1;
    int i, rc;

    frames = malloc(nr_gframes * sizeof(unsigned long), M_DEVBUF, M_NOWAIT);
    if (!frames)
        return (ENOMEM);

    setup.dom        = DOMID_SELF;
    setup.nr_frames  = nr_gframes;
    set_xen_guest_handle(setup.frame_list, frames);

    rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
    if (rc == -ENOSYS) {
        free(frames, M_DEVBUF);
        return (ENOSYS);
    }
    KASSERT(!(rc || setup.status),
            ("unexpected result from grant_table_op"));

    if (shared == NULL) {
        vm_offset_t area;

        area = kmem_alloc_nofault(kernel_map,
                                  PAGE_SIZE * max_nr_grant_frames());
        KASSERT(area, ("can't allocate VM space for grant table"));
        shared = (grant_entry_t *)area;
    }

    for (i = 0; i < nr_gframes; i++)
        PT_SET_MA(((caddr_t)shared) + i*PAGE_SIZE,
                  ((vm_paddr_t)frames[i]) << PAGE_SHIFT | PG_RW | PG_V);

    free(frames, M_DEVBUF);

    return (0);
}
Esempio n. 6
0
/*
 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
 */
static void
sf_buf_init(void *arg)
{       
	struct sf_buf *sf_bufs;
	vm_offset_t sf_base;
	int i;
				        
	nsfbufs = NSFBUFS;
	TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
		
	sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
	TAILQ_INIT(&sf_buf_freelist);
	sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
	    M_NOWAIT | M_ZERO);
	for (i = 0; i < nsfbufs; i++) {
		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
		TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
	}
	sf_buf_alloc_want = 0; 
	mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
}
Esempio n. 7
0
static void
cpu_initialize_context(unsigned int cpu)
{
	/* vcpu_guest_context_t is too large to allocate on the stack.
	 * Hence we allocate statically and protect it with a lock */
	vm_page_t m[NPGPTD + 2];
	static vcpu_guest_context_t ctxt;
	vm_offset_t boot_stack;
	vm_offset_t newPTD;
	vm_paddr_t ma[NPGPTD];
	int i;

	/*
	 * Page 0,[0-3]	PTD
	 * Page 1, [4]	boot stack
	 * Page [5]	PDPT
	 *
	 */
	for (i = 0; i < NPGPTD + 2; i++) {
		m[i] = vm_page_alloc(NULL, 0,
		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
		    VM_ALLOC_ZERO);

		pmap_zero_page(m[i]);

	}
	boot_stack = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
	newPTD = kmem_alloc_nofault(kernel_map, NPGPTD * PAGE_SIZE);
	ma[0] = VM_PAGE_TO_MACH(m[0])|PG_V;

#ifdef PAE	
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD + 1]));
	for (i = 0; i < NPGPTD; i++) {
		((vm_paddr_t *)boot_stack)[i] =
		ma[i] = VM_PAGE_TO_MACH(m[i])|PG_V;
	}
#endif	

	/*
	 * Copy cpu0 IdlePTD to new IdlePTD - copying only
	 * kernel mappings
	 */
	pmap_qenter(newPTD, m, 4);
	
	memcpy((uint8_t *)newPTD + KPTDI*sizeof(vm_paddr_t),
	    (uint8_t *)PTOV(IdlePTD) + KPTDI*sizeof(vm_paddr_t),
	    nkpt*sizeof(vm_paddr_t));

	pmap_qremove(newPTD, 4);
	kmem_free(kernel_map, newPTD, 4 * PAGE_SIZE);
	/*
	 * map actual idle stack to boot_stack
	 */
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD]));


	xen_pgdpt_pin(VM_PAGE_TO_MACH(m[NPGPTD + 1]));
	rw_wlock(&pvh_global_lock);
	for (i = 0; i < 4; i++) {
		int pdir = (PTDPTDI + i) / NPDEPG;
		int curoffset = (PTDPTDI + i) % NPDEPG;
		
		xen_queue_pt_update((vm_paddr_t)
		    ((ma[pdir] & ~PG_V) + (curoffset*sizeof(vm_paddr_t))), 
		    ma[i]);
	}
	PT_UPDATES_FLUSH();
	rw_wunlock(&pvh_global_lock);
	
	memset(&ctxt, 0, sizeof(ctxt));
	ctxt.flags = VGCF_IN_KERNEL;
	ctxt.user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.fs = GSEL(GPRIV_SEL, SEL_KPL);
	ctxt.user_regs.gs = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.eip = (unsigned long)init_secondary;
	ctxt.user_regs.eflags = PSL_KERNEL | 0x1000; /* IOPL_RING1 */

	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));

	smp_trap_init(ctxt.trap_ctxt);

	ctxt.ldt_ents = 0;
	ctxt.gdt_frames[0] = (uint32_t)((uint64_t)vtomach(bootAPgdt) >> PAGE_SHIFT);
	ctxt.gdt_ents      = 512;

#ifdef __i386__
	ctxt.user_regs.esp = boot_stack + PAGE_SIZE;

	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = boot_stack + PAGE_SIZE;

	ctxt.event_callback_cs     = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.event_callback_eip    = (unsigned long)Xhypervisor_callback;
	ctxt.failsafe_callback_cs  = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;

	ctxt.ctrlreg[3] = VM_PAGE_TO_MACH(m[NPGPTD + 1]);
#else /* __x86_64__ */
	ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = idle->thread.rsp0;

	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
	ctxt.syscall_callback_eip  = (unsigned long)system_call;

	ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));

	ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
#endif

	printf("gdtpfn=%lx pdptpfn=%lx\n",
	    ctxt.gdt_frames[0],
	    ctxt.ctrlreg[3] >> PAGE_SHIFT);

	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
	DELAY(3000);
	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL));
}
Esempio n. 8
0
static void
shared_page_init(void *dummy __unused)
{
	vm_page_t m;
	vm_offset_t addr;

	sx_init(&shared_page_alloc_sx, "shpsx");
	shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
	    VM_PROT_DEFAULT, 0, NULL);
	VM_OBJECT_LOCK(shared_page_obj);
	m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_RETRY | VM_ALLOC_NOBUSY |
	    VM_ALLOC_ZERO);
	m->valid = VM_PAGE_BITS_ALL;
	VM_OBJECT_UNLOCK(shared_page_obj);
	addr = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
	pmap_qenter(addr, &m, 1);
	shared_page_mapping = (char *)addr;
}

SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)shared_page_init,
    NULL);

static void
timehands_update(struct sysentvec *sv)
{
	struct vdso_timehands th;
	struct vdso_timekeep *tk;
	uint32_t enabled, idx;

	enabled = tc_fill_vdso_timehands(&th);
Esempio n. 9
0
int
sparc64_bus_mem_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size,
    int flags, vm_offset_t vaddr, bus_space_handle_t *hp)
{
	vm_offset_t sva;
	vm_offset_t va;
	vm_paddr_t pa;
	vm_size_t vsz;
	u_long pm_flags;

	/*
	 * Given that we use physical access for bus_space(9) there's no need
	 * need to map anything in unless BUS_SPACE_MAP_LINEAR is requested.
	 */
	if ((flags & BUS_SPACE_MAP_LINEAR) == 0) {
		*hp = addr;
		return (0);
	}

	if (tag->bst_cookie == NULL) {
		printf("%s: resource cookie not set\n", __func__);
		return (EINVAL);
	}

	size = round_page(size);
	if (size == 0) {
		printf("%s: zero size\n", __func__);
		return (EINVAL);
	}

	switch (tag->bst_type) {
	case PCI_CONFIG_BUS_SPACE:
	case PCI_IO_BUS_SPACE:
	case PCI_MEMORY_BUS_SPACE:
		pm_flags = TD_IE;
		break;
	default:
		pm_flags = 0;
		break;
	}

	if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
		pm_flags |= TD_E;

	if (vaddr != 0L)
		sva = trunc_page(vaddr);
	else {
		if ((sva = kmem_alloc_nofault(kernel_map, size)) == 0)
			panic("%s: cannot allocate virtual memory", __func__);
	}

	pa = trunc_page(addr);
	if ((flags & BUS_SPACE_MAP_READONLY) == 0)
		pm_flags |= TD_W;

	va = sva;
	vsz = size;
	do {
		pmap_kenter_flags(va, pa, pm_flags);
		va += PAGE_SIZE;
		pa += PAGE_SIZE;
	} while ((vsz -= PAGE_SIZE) > 0);
	tlb_range_demap(kernel_pmap, sva, sva + size - 1);

	/* Note: we preserve the page offset. */
	rman_set_virtual(tag->bst_cookie, (void *)(sva | (addr & PAGE_MASK)));
	return (0);
}