Esempio n. 1
0
/*
 * MD-specific resume preparation (creating resume time pagetables,
 * stacks, etc).
 */
void
hibernate_prepare_resume_machdep(union hibernate_info *hib_info)
{
	paddr_t pa, piglet_end;
	vaddr_t va;

	/*
	 * At this point, we are sure that the piglet's phys space is going to
	 * have been unused by the suspending kernel, but the vaddrs used by
	 * the suspending kernel may or may not be available to us here in the
	 * resuming kernel, so we allocate a new range of VAs for the piglet.
	 * Those VAs will be temporary and will cease to exist as soon as we
	 * switch to the resume PT, so we need to ensure that any VAs required
	 * during inflate are also entered into that map.
	 */

        hib_info->piglet_va = (vaddr_t)km_alloc(HIBERNATE_CHUNK_SIZE*3,
	    &kv_any, &kp_none, &kd_nowait);
        if (!hib_info->piglet_va)
                panic("Unable to allocate vaddr for hibernate resume piglet\n");

	piglet_end = hib_info->piglet_pa + HIBERNATE_CHUNK_SIZE*3;

	for (pa = hib_info->piglet_pa,va = hib_info->piglet_va;
	    pa <= piglet_end; pa += PAGE_SIZE, va += PAGE_SIZE)
		pmap_kenter_pa(va, pa, VM_PROT_ALL);

	pmap_activate(curproc);
}
Esempio n. 2
0
/*
 * Prepare context switch from oldlwp to newlwp.
 * This code is shared by cpu_switch and cpu_switchto.
 */
struct lwp *
cpu_switch_prepare(struct lwp *oldlwp, struct lwp *newlwp)
{

	newlwp->l_stat = LSONPROC;

	if (newlwp != oldlwp) {
		struct proc *p = newlwp->l_proc;

		curpcb = newlwp->l_md.md_pcb;
		pmap_activate(newlwp);

		/* Check for Restartable Atomic Sequences. */
		if (!LIST_EMPTY(&p->p_raslist)) {
			caddr_t pc;

			pc = ras_lookup(p,
				(caddr_t)newlwp->l_md.md_regs->tf_spc);
			if (pc != (caddr_t) -1)
				newlwp->l_md.md_regs->tf_spc = (int) pc;
		}
	}

	curlwp = newlwp;
	return (newlwp);
}
Esempio n. 3
0
void
userret(struct lwp *l)
{
#if defined(__PROG32) && defined(ARM_MMU_EXTENDED)
	/*
	 * If our ASID got released, access via TTBR0 will have been disabled.
	 * So if it is disabled, activate the lwp again to get a new ASID.
	 */
#ifdef __HAVE_PREEMPTION
	kpreempt_disable();
#endif
	KASSERT(curcpu()->ci_pmap_cur == l->l_proc->p_vmspace->vm_map.pmap);
	if (__predict_false(armreg_ttbcr_read() & TTBCR_S_PD0)) {
		pmap_activate(l);
	}
	KASSERT(!(armreg_ttbcr_read() & TTBCR_S_PD0));
#ifdef __HAVE_PREEMPTION
	kpreempt_enable();
#endif
#endif

	/* Invoke MI userret code */
	mi_userret(l);

#if defined(__PROG32) && defined(DIAGNOSTIC)
	KASSERT(VALID_R15_PSR(lwp_trapframe(l)->tf_pc,
	    lwp_trapframe(l)->tf_spsr));
#endif
}
Esempio n. 4
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 * Copy and update the kernel stack and pcb, making the child
 * ready to run, and marking it so that it can return differently
 * than the parent.  Returns 1 in the child process, 0 in the parent.
 */
void
cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcb = &p2->p_addr->u_pcb;
	struct trapframe *tf;
	struct switchframe *sf;

	/*
	 * If fpuproc != p1, then the fpu h/w state is irrelevant and the
	 * state had better already be in the pcb.  This is true for forks
	 * but not for dumps.
	 *
	 * If fpuproc == p1, then we have to save the fpu h/w state to
	 * p1's pcb so that we can copy it.
	 */
	if (p1->p_addr->u_pcb.pcb_fpcpu != NULL)
		fpusave_proc(p1, 1);

	p2->p_md.md_flags = p1->p_md.md_flags;

#ifdef DIAGNOSTIC
	if (p1 != curproc && p1 != &proc0)
		panic("cpu_fork: curproc");
#endif
	*pcb = p1->p_addr->u_pcb;

	/*
	 * Activate the address space.
	 */
	pmap_activate(p2);

	/* Record where this process's kernel stack is */
	pcb->pcb_kstack = (u_int64_t)p2->p_addr + USPACE - 16;

	/*
	 * Copy the trapframe.
	 */
	p2->p_md.md_regs = tf = (struct trapframe *)pcb->pcb_kstack - 1;
	*tf = *p1->p_md.md_regs;

	setredzone(p2);

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		tf->tf_rsp = (u_int64_t)stack + stacksize;

	sf = (struct switchframe *)tf - 1;
	sf->sf_r12 = (u_int64_t)func;
	sf->sf_r13 = (u_int64_t)arg;
	sf->sf_rip = (u_int64_t)proc_trampoline;
	pcb->pcb_rsp = (u_int64_t)sf;
	pcb->pcb_rbp = 0;
}
Esempio n. 5
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 * Copy and update the kernel stack and pcb, making the child
 * ready to run, and marking it so that it can return differently
 * than the parent.
 */
void
cpu_fork(struct proc *p1, struct proc *p2, void *stack, void *tcb,
    void (*func)(void *), void *arg)
{
	struct cpu_info *ci = curcpu();
	struct pcb *pcb = &p2->p_addr->u_pcb;
	struct pcb *pcb1 = &p1->p_addr->u_pcb;
	struct trapframe *tf;
	struct switchframe *sf;

	/* Save the fpu h/w state to p1's pcb so that we can copy it. */
	if (p1 != &proc0 && (ci->ci_flags & CPUF_USERXSTATE))
		fpusave(&pcb1->pcb_savefpu);

	p2->p_md.md_flags = p1->p_md.md_flags;

#ifdef DIAGNOSTIC
	if (p1 != curproc && p1 != &proc0)
		panic("cpu_fork: curproc");
#endif
	*pcb = *pcb1;

	/*
	 * Activate the address space.
	 */
	pmap_activate(p2);

	/* Record where this process's kernel stack is */
	pcb->pcb_kstack = (u_int64_t)p2->p_addr + USPACE - 16 -
	    (arc4random() & PAGE_MASK & ~_STACKALIGNBYTES);

	/*
	 * Copy the trapframe.
	 */
	p2->p_md.md_regs = tf = (struct trapframe *)pcb->pcb_kstack - 1;
	*tf = *p1->p_md.md_regs;

	setguardpage(p2);

	/*
	 * If specified, give the child a different stack and/or TCB
	 */
	if (stack != NULL)
		tf->tf_rsp = (u_int64_t)stack;
	if (tcb != NULL)
		pcb->pcb_fsbase = (u_int64_t)tcb;

	sf = (struct switchframe *)tf - 1;
	sf->sf_r12 = (u_int64_t)func;
	sf->sf_r13 = (u_int64_t)arg;
	sf->sf_rip = (u_int64_t)proc_trampoline;
	pcb->pcb_rsp = (u_int64_t)sf;
	pcb->pcb_rbp = 0;
}
Esempio n. 6
0
/*
 * Find the highest-priority runnable process and switch to it.
 */
int
cpu_switch(struct lwp *l1, struct lwp *newl)
{
	int which;
	struct prochd *q;
	struct lwp *l2;
	struct proc *p2;
	/*
	 * We enter here with interrupts blocked and sched_lock held.
	 */

#if 0
	printf("cpu_switch: %p ->", l1);
#endif
	curlwp = NULL;
	curpcb = NULL;
	while (sched_whichqs == 0)
		idle();
	which = ffs(sched_whichqs) - 1;
	q = &sched_qs[which];
	l2 = q->ph_link;
	remrunqueue(l2);
	want_resched = 0;
#ifdef LOCKDEBUG
	sched_unlock_idle();
#endif
	/* p->p_cpu initialized in fork1() for single-processor */
	l2->l_stat = LSONPROC;
	curlwp = l2;
	curpcb = &curlwp->l_addr->u_pcb;
#if 0
	printf(" %p\n", l2);
#endif
	if (l2 == l1)
		return (0);
	pmap_deactivate(l1);
	pmap_activate(l2);

	/* Check for Restartable Atomic Sequences. */
	p2 = l2->l_proc;
	if (!LIST_EMPTY(&p2->p_raslist)) {
		struct trapframe *tf = l2->l_addr->u_pcb.pcb_tf;
		caddr_t pc;

		pc = ras_lookup(p2, (caddr_t) tf->tf_pc);
		if (pc != (caddr_t) -1)
			tf->tf_pc = (register_t) pc;
	}

	cpu_loswitch(&l1->l_addr->u_pcb.pcb_sf, l2->l_addr->u_pcb.pcb_sf);
	/* We only get back here after the other process has run. */
	return (1);
}
Esempio n. 7
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 * Copy and update the pcb, set up the stack so that the child
 * ready to run and return to user mode.
 */
void
cpu_fork(register struct thread *td1, register struct proc *p2,
         struct thread *td2, int flags)
{
    struct pcb *pcb2;
    struct trapframe *tf;
    struct switchframe *sf;
    struct mdproc *mdp2;

    if ((flags & RFPROC) == 0)
        return;
    pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
#ifdef __XSCALE__
#ifndef CPU_XSCALE_CORE3
    pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
#endif
#endif
    td2->td_pcb = pcb2;
    bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
    mdp2 = &p2->p_md;
    bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
    pcb2->un_32.pcb32_sp = td2->td_kstack +
                           USPACE_SVC_STACK_TOP - sizeof(*pcb2);
    pcb2->pcb_vfpcpu = -1;
    pcb2->pcb_vfpstate.fpscr = VFPSCR_DN | VFPSCR_FZ;
    pmap_activate(td2);
    td2->td_frame = tf = (struct trapframe *)STACKALIGN(
                             pcb2->un_32.pcb32_sp - sizeof(struct trapframe));
    *tf = *td1->td_frame;
    sf = (struct switchframe *)tf - 1;
    sf->sf_r4 = (u_int)fork_return;
    sf->sf_r5 = (u_int)td2;
    sf->sf_pc = (u_int)fork_trampoline;
    tf->tf_spsr &= ~PSR_C_bit;
    tf->tf_r0 = 0;
    tf->tf_r1 = 0;
    pcb2->un_32.pcb32_sp = (u_int)sf;
    KASSERT((pcb2->un_32.pcb32_sp & 7) == 0,
            ("cpu_fork: Incorrect stack alignment"));

    /* Setup to release spin count in fork_exit(). */
    td2->td_md.md_spinlock_count = 1;
    td2->td_md.md_saved_cspr = 0;
#ifdef ARM_TP_ADDRESS
    td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS;
#else
    td2->td_md.md_tp = (register_t) get_tls();
#endif
}
Esempio n. 8
0
int
mp_cpu_start(struct cpu_info *ci)
{
	unsigned short dwordptr[2];

	/*
	 * "The BSP must initialize CMOS shutdown code to 0Ah ..."
	 */

	outb(IO_RTC, NVRAM_RESET);
	outb(IO_RTC+1, NVRAM_RESET_JUMP);

	/*
	 * "and the warm reset vector (DWORD based at 40:67) to point
	 * to the AP startup code ..."
	 */

	dwordptr[0] = 0;
	dwordptr[1] = MP_TRAMPOLINE >> 4;

	pmap_activate(curproc);

	pmap_kenter_pa(0, 0, PROT_READ | PROT_WRITE);
	memcpy((u_int8_t *)0x467, dwordptr, 4);
	pmap_kremove(0, PAGE_SIZE);

#if NLAPIC > 0
	/*
	 * ... prior to executing the following sequence:"
	 */

	if (ci->ci_flags & CPUF_AP) {
		i386_ipi_init(ci->ci_apicid);

		delay(10000);

		if (cpu_feature & CPUID_APIC) {
			i386_ipi(MP_TRAMPOLINE / PAGE_SIZE, ci->ci_apicid,
			    LAPIC_DLMODE_STARTUP);
			delay(200);

			i386_ipi(MP_TRAMPOLINE / PAGE_SIZE, ci->ci_apicid,
			    LAPIC_DLMODE_STARTUP);
			delay(200);
		}
	}
#endif
	return (0);
}
Esempio n. 9
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 * Copy and update the pcb, set up the stack so that the child
 * ready to run and return to user mode.
 */
void
cpu_fork(register struct thread *td1, register struct proc *p2,
    struct thread *td2, int flags)
{
	struct pcb *pcb1, *pcb2;
	struct trapframe *tf;
	struct switchframe *sf;
	struct mdproc *mdp2;

	if ((flags & RFPROC) == 0)
		return;
	pcb1 = td1->td_pcb;
	pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
#ifdef __XSCALE__
#ifndef CPU_XSCALE_CORE3
	pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
	if (td2->td_altkstack)
		pmap_use_minicache(td2->td_altkstack, td2->td_altkstack_pages *
		    PAGE_SIZE);
#endif
#endif
	td2->td_pcb = pcb2;
	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
	mdp2 = &p2->p_md;
	bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
	pcb2->un_32.pcb32_und_sp = td2->td_kstack + USPACE_UNDEF_STACK_TOP;
	pcb2->un_32.pcb32_sp = td2->td_kstack +
	    USPACE_SVC_STACK_TOP - sizeof(*pcb2);
	pmap_activate(td2);
	td2->td_frame = tf =
	    (struct trapframe *)pcb2->un_32.pcb32_sp - 1;
	*tf = *td1->td_frame;
	sf = (struct switchframe *)tf - 1;
	sf->sf_r4 = (u_int)fork_return;
	sf->sf_r5 = (u_int)td2;
	sf->sf_pc = (u_int)fork_trampoline;
	tf->tf_spsr &= ~PSR_C_bit;
	tf->tf_r0 = 0;
	tf->tf_r1 = 0;
	pcb2->un_32.pcb32_sp = (u_int)sf;

	/* Setup to release spin count in fork_exit(). */
	td2->td_md.md_spinlock_count = 1;
	td2->td_md.md_saved_cspr = 0;
	td2->td_md.md_tp = *(uint32_t **)ARM_TP_ADDRESS;
}
Esempio n. 10
0
/*
 * Prepare context switch from oproc to nproc.
 * This code is used by cpu_switchto.
 */
void
cpu_switch_prepare(struct proc *oproc, struct proc *nproc)
{
	nproc->p_stat = SONPROC;

	if (oproc && (oproc->p_md.md_flags & MDP_STEP))
		_reg_write_2(SH_(BBRB), 0);

	curpcb = nproc->p_md.md_pcb;
	pmap_activate(nproc);

	if (nproc->p_md.md_flags & MDP_STEP) {
		int pm_asid = nproc->p_vmspace->vm_map.pmap->pm_asid;

		_reg_write_2(SH_(BBRB), 0);
		_reg_write_4(SH_(BARB), nproc->p_md.md_regs->tf_spc);
		_reg_write_1(SH_(BASRB), pm_asid);
		_reg_write_1(SH_(BAMRB), 0);
		_reg_write_2(SH_(BRCR), 0x0040);
		_reg_write_2(SH_(BBRB), 0x0014);
	}

	curproc = nproc;
}
/*
 * Create the resume-time page table. This table maps the image(pig) area,
 * the kernel text area, and various utility pages for use during resume,
 * since we cannot overwrite the resuming kernel's page table during inflate
 * and expect things to work properly.
 */
void
hibernate_populate_resume_pt(union hibernate_info *hib_info,
    paddr_t image_start, paddr_t image_end)
{
	int phys_page_number, i;
	paddr_t pa;
	vaddr_t kern_start_2m_va, kern_end_2m_va, page;
	vaddr_t piglet_start_va, piglet_end_va;
	pt_entry_t *pde, npde;

	/* Identity map MMU pages */
	pmap_kenter_pa(HIBERNATE_PML4T, HIBERNATE_PML4T, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_PDPT_LOW, HIBERNATE_PDPT_LOW, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_PDPT_HI, HIBERNATE_PDPT_HI, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_PD_LOW, HIBERNATE_PD_LOW, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_PD_LOW2, HIBERNATE_PD_LOW2, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_PD_HI, HIBERNATE_PD_HI, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_PT_LOW, HIBERNATE_PT_LOW, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_PT_LOW2, HIBERNATE_PT_LOW2, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_PT_HI, HIBERNATE_PT_HI, PROT_MASK);

	/* Identity map 3 pages for stack */
	pmap_kenter_pa(HIBERNATE_STACK_PAGE, HIBERNATE_STACK_PAGE, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_STACK_PAGE - PAGE_SIZE,
		HIBERNATE_STACK_PAGE - PAGE_SIZE, PROT_MASK);
	pmap_kenter_pa(HIBERNATE_STACK_PAGE - 2*PAGE_SIZE,
		HIBERNATE_STACK_PAGE - 2*PAGE_SIZE, PROT_MASK);
	pmap_activate(curproc);

	bzero((caddr_t)HIBERNATE_PML4T, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PDPT_LOW, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PDPT_HI, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PD_LOW, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PD_LOW2, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PD_HI, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PT_LOW, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PT_LOW2, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PT_HI, PAGE_SIZE);
	bzero((caddr_t)(HIBERNATE_STACK_PAGE - 3*PAGE_SIZE) , 3*PAGE_SIZE);

	/* First 512GB PML4E */
	pde = (pt_entry_t *)(HIBERNATE_PML4T +
		(pl4_pi(0) * sizeof(pt_entry_t)));
	npde = (HIBERNATE_PDPT_LOW) | PG_RW | PG_V;
	*pde = npde;

	/* First 1GB PDPTE */
	pde = (pt_entry_t *)(HIBERNATE_PDPT_LOW +
		(pl3_pi(0) * sizeof(pt_entry_t)));
	npde = (HIBERNATE_PD_LOW) | PG_RW | PG_V;
	*pde = npde;
	
	/* PD for first 2MB */
	pde = (pt_entry_t *)(HIBERNATE_PD_LOW +
		(pl2_pi(0) * sizeof(pt_entry_t)));
	npde = (HIBERNATE_PT_LOW) | PG_RW | PG_V;
	*pde = npde;

	/*
	 * Identity map low physical pages.
	 * See arch/amd64/include/hibernate_var.h for page ranges used here.
	 */
	for (i = ACPI_TRAMPOLINE; i <= HIBERNATE_HIBALLOC_PAGE; i += PAGE_SIZE)
		hibernate_enter_resume_mapping(i, i, 0);

	/*
	 * Map current kernel VA range using 2MB pages
	 */
	kern_start_2m_va = (paddr_t)&start & ~(PAGE_MASK_L2);
	kern_end_2m_va = (paddr_t)&end & ~(PAGE_MASK_L2);

	/* amd64 kernels load at 16MB phys (on the 8th 2mb page) */
	phys_page_number = 8;

	for (page = kern_start_2m_va; page <= kern_end_2m_va;
	    page += NBPD_L2, phys_page_number++) {
		pa = (paddr_t)(phys_page_number * NBPD_L2);
		hibernate_enter_resume_mapping(page, pa, 1);
	}

	/*
	 * Identity map the piglet using 2MB pages.
	 */
	phys_page_number = hib_info->piglet_pa / NBPD_L2;

	/* VA == PA */
	piglet_start_va = hib_info->piglet_pa;
	piglet_end_va = piglet_start_va + HIBERNATE_CHUNK_SIZE * 4;

	for (page = piglet_start_va; page <= piglet_end_va;
	    page += NBPD_L2, phys_page_number++) {
		pa = (paddr_t)(phys_page_number * NBPD_L2);
		hibernate_enter_resume_mapping(page, pa, 1);
	}

	/* Unmap MMU pages (stack remains mapped) */
	pmap_kremove(HIBERNATE_PML4T, PAGE_SIZE);
	pmap_kremove(HIBERNATE_PDPT_LOW, PAGE_SIZE);
	pmap_kremove(HIBERNATE_PDPT_HI, PAGE_SIZE);
	pmap_kremove(HIBERNATE_PD_LOW, PAGE_SIZE);
	pmap_kremove(HIBERNATE_PD_LOW2, PAGE_SIZE);
	pmap_kremove(HIBERNATE_PD_HI, PAGE_SIZE);
	pmap_kremove(HIBERNATE_PT_LOW, PAGE_SIZE);
	pmap_kremove(HIBERNATE_PT_LOW2, PAGE_SIZE);
	pmap_kremove(HIBERNATE_PT_HI, PAGE_SIZE);

	pmap_activate(curproc);
}
/*
 * Create the resume-time page table. This table maps the image(pig) area,
 * the kernel text area, and various utility pages for use during resume,
 * since we cannot overwrite the resuming kernel's page table during inflate
 * and expect things to work properly.
 */
void
hibernate_populate_resume_pt(union hibernate_info *hib_info,
    paddr_t image_start, paddr_t image_end)
{
	int phys_page_number, i;
	paddr_t pa, piglet_start, piglet_end;
	vaddr_t kern_start_4m_va, kern_end_4m_va, page;

	/* Identity map PD, PT, and stack pages */
	pmap_kenter_pa(HIBERNATE_PT_PAGE, HIBERNATE_PT_PAGE, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_PD_PAGE, HIBERNATE_PD_PAGE, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_STACK_PAGE, HIBERNATE_STACK_PAGE, VM_PROT_ALL);
	pmap_activate(curproc);

	bzero((caddr_t)HIBERNATE_PT_PAGE, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PD_PAGE, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_STACK_PAGE, PAGE_SIZE);

	/* PDE for low pages */
	hibernate_enter_resume_4k_pde(0);

	/*
	 * Identity map first 640KB physical for tramps and special utility
	 * pages using 4KB mappings
	 */
	for (i = 0; i < 160; i ++) {
		hibernate_enter_resume_mapping(i*PAGE_SIZE, i*PAGE_SIZE, 0);
	}

	/*
	 * Map current kernel VA range using 4M pages
	 */
	kern_start_4m_va = (paddr_t)&start & ~(PAGE_MASK_4M);
	kern_end_4m_va = (paddr_t)&end & ~(PAGE_MASK_4M);

	/* i386 kernels load at 2MB phys (on the 0th 4mb page) */
	phys_page_number = 0;

	for (page = kern_start_4m_va; page <= kern_end_4m_va;
	    page += NBPD, phys_page_number++) {
		pa = (paddr_t)(phys_page_number * NBPD);
		hibernate_enter_resume_mapping(page, pa, 1);
	}

	/*
	 * Identity map the image (pig) area
	 */
	phys_page_number = image_start / NBPD;
	image_start &= ~(PAGE_MASK_4M);
	image_end &= ~(PAGE_MASK_4M);
	for (page = image_start; page <= image_end ;
	    page += NBPD, phys_page_number++) {
		pa = (paddr_t)(phys_page_number * NBPD);
		hibernate_enter_resume_mapping(page, pa, 1);
	}

	/*
	 * Map the piglet
	 */
	phys_page_number = hib_info->piglet_pa / NBPD;
	piglet_start = hib_info->piglet_va;
	piglet_end = piglet_start + HIBERNATE_CHUNK_SIZE * 3;
	piglet_start &= ~(PAGE_MASK_4M);
	piglet_end &= ~(PAGE_MASK_4M);
	for (page = piglet_start; page <= piglet_end ;
	    page += NBPD, phys_page_number++) {
		pa = (paddr_t)(phys_page_number * NBPD);
		hibernate_enter_resume_mapping(page, pa, 1);
	}
}
Esempio n. 13
0
/*
 * Create the resume-time page table. This table maps the image(pig) area,
 * the kernel text area, and various utility pages for use during resume,
 * since we cannot overwrite the resuming kernel's page table during inflate
 * and expect things to work properly.
 */
void
hibernate_populate_resume_pt(union hibernate_info *hib_info,
    paddr_t image_start, paddr_t image_end)
{
	int phys_page_number, i;
	paddr_t pa, piglet_start, piglet_end;
	vaddr_t kern_start_2m_va, kern_end_2m_va, page;
	pt_entry_t *pde, npde;

	/* Identity map MMU pages */
	pmap_kenter_pa(HIBERNATE_PML4T, HIBERNATE_PML4T, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_PDPT_LOW, HIBERNATE_PDPT_LOW, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_PDPT_HI, HIBERNATE_PDPT_HI, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_PD_LOW, HIBERNATE_PD_LOW, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_PD_LOW2, HIBERNATE_PD_LOW2, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_PD_HI, HIBERNATE_PD_HI, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_PT_LOW, HIBERNATE_PT_LOW, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_PT_LOW2, HIBERNATE_PT_LOW2, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_PT_HI, HIBERNATE_PT_HI, VM_PROT_ALL);

	/* Identity map 3 pages for stack */
	pmap_kenter_pa(HIBERNATE_STACK_PAGE, HIBERNATE_STACK_PAGE, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_STACK_PAGE - PAGE_SIZE,
		HIBERNATE_STACK_PAGE - PAGE_SIZE, VM_PROT_ALL);
	pmap_kenter_pa(HIBERNATE_STACK_PAGE - 2*PAGE_SIZE,
		HIBERNATE_STACK_PAGE - 2*PAGE_SIZE, VM_PROT_ALL);
	pmap_activate(curproc);

	bzero((caddr_t)HIBERNATE_PML4T, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PDPT_LOW, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PDPT_HI, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PD_LOW, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PD_LOW2, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PD_HI, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PT_LOW, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PT_LOW2, PAGE_SIZE);
	bzero((caddr_t)HIBERNATE_PT_HI, PAGE_SIZE);
	bzero((caddr_t)(HIBERNATE_STACK_PAGE - 3*PAGE_SIZE) , 3*PAGE_SIZE);

	/* First 512GB PML4E */
	pde = (pt_entry_t *)(HIBERNATE_PML4T +
		(pl4_pi(0) * sizeof(pt_entry_t)));
	npde = (HIBERNATE_PDPT_LOW) | PG_RW | PG_V;
	*pde = npde;

	/* First 1GB PDPTE */
	pde = (pt_entry_t *)(HIBERNATE_PDPT_LOW +
		(pl3_pi(0) * sizeof(pt_entry_t)));
	npde = (HIBERNATE_PD_LOW) | PG_RW | PG_V;
	*pde = npde;
	
	/* PD for first 2MB */
	pde = (pt_entry_t *)(HIBERNATE_PD_LOW +
		(pl2_pi(0) * sizeof(pt_entry_t)));
	npde = (HIBERNATE_PT_LOW) | PG_RW | PG_V;
	*pde = npde;

	/*
	 * Identity map first 640KB physical for tramps and special utility
	 * pages using 4KB mappings
	 */
	for (i = 0; i < 160; i ++) {
		hibernate_enter_resume_mapping(i*PAGE_SIZE, i*PAGE_SIZE, 0);
	}

	/*
	 * Map current kernel VA range using 2MB pages
	 */
	kern_start_2m_va = (paddr_t)&start & ~(PAGE_MASK_2M);
	kern_end_2m_va = (paddr_t)&end & ~(PAGE_MASK_2M);

	/* amd64 kernels load at 16MB phys (on the 8th 2mb page) */
	phys_page_number = 8;

	for (page = kern_start_2m_va; page <= kern_end_2m_va;
	    page += NBPD_L2, phys_page_number++) {
		pa = (paddr_t)(phys_page_number * NBPD_L2);
		hibernate_enter_resume_mapping(page, pa, 1);
	}

	/*
	 * Map the piglet
	 */
	phys_page_number = hib_info->piglet_pa / NBPD_L2;
	piglet_start = hib_info->piglet_va;
	piglet_end = piglet_start + HIBERNATE_CHUNK_SIZE * 3;
	piglet_start &= ~(PAGE_MASK_2M);
	piglet_end &= ~(PAGE_MASK_2M);

	for (page = piglet_start; page <= piglet_end ;
	    page += NBPD_L2, phys_page_number++) {
		pa = (paddr_t)(phys_page_number * NBPD_L2);
		hibernate_enter_resume_mapping(page, pa, 1);
	}
}
Esempio n. 14
0
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcbp;
	struct trapframe *tf;
	register_t sp, osp;

#ifdef DIAGNOSTIC
	if (round_page(sizeof(struct user)) > PAGE_SIZE)
		panic("USPACE too small for user");
#endif

	/* Flush the parent process out of the FPU. */
	hppa_fpu_flush(l1);

	/* Now copy the parent PCB into the child. */
	pcbp = &l2->l_addr->u_pcb;
	bcopy(&l1->l_addr->u_pcb, pcbp, sizeof(*pcbp));

	sp = (register_t)l2->l_addr + PAGE_SIZE;
	l2->l_md.md_regs = tf = (struct trapframe *)sp;
	sp += sizeof(struct trapframe);
	bcopy(l1->l_md.md_regs, tf, sizeof(*tf));

	/*
	 * cpu_swapin() is supposed to fill out all the PAs
	 * we gonna need in locore
	 */
	cpu_swapin(l2);

	/* Activate this process' pmap. */
	pmap_activate(l2);

	/*
	 * theoretically these could be inherited from the father,
	 * but just in case.
	 */
	tf->tf_sr7 = HPPA_SID_KERNEL;
	mfctl(CR_EIEM, tf->tf_eiem);
	tf->tf_ipsw = PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I /* | PSW_L */;
	pcbp->pcb_fpregs[HPPA_NFPREGS] = 0;

	/*
	 * Set up return value registers as libc:fork() expects
	 */
	tf->tf_ret0 = l1->l_proc->p_pid;
	tf->tf_ret1 = 1;	/* ischild */
	tf->tf_t1 = 0;		/* errno */

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		tf->tf_sp = (register_t)stack;

	/*
	 * Build a stack frame for the cpu_switch & co.
	 */
	osp = sp;
	sp += HPPA_FRAME_SIZE + 16*4; /* std frame + calee-save registers */
	*HPPA_FRAME_CARG(0, sp) = tf->tf_sp;
	*HPPA_FRAME_CARG(1, sp) = KERNMODE(func);
	*HPPA_FRAME_CARG(2, sp) = (register_t)arg;
	*(register_t*)(sp + HPPA_FRAME_PSP) = osp;
	*(register_t*)(sp + HPPA_FRAME_CRP) =
		(register_t)switch_trampoline;
	tf->tf_sp = sp;
	fdcache(HPPA_SID_KERNEL, (vaddr_t)l2->l_addr, sp - (vaddr_t)l2->l_addr);
}
Esempio n. 15
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 * Copy and update the kernel stack and pcb, making the child
 * ready to run, and marking it so that it can return differently
 * than the parent.  Returns 1 in the child process, 0 in the parent.
 * We currently double-map the user area so that the stack is at the same
 * address in each process; in the future we will probably relocate
 * the frame pointers on the stack after copying.
 */
void
cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcb = &p2->p_addr->u_pcb;
	struct trapframe *tf;
	struct switchframe *sf;

	/*
	 * If fpuproc != p1, then the fpu h/w state is irrelevant and the
	 * state had better already be in the pcb.  This is true for forks
	 * but not for dumps.
	 *
	 * If fpuproc == p1, then we have to save the fpu h/w state to
	 * p1's pcb so that we can copy it.
	 */
	if (p1->p_addr->u_pcb.pcb_fpcpu != NULL)
		fpusave_proc(p1, 1);

	p2->p_md.md_flags = p1->p_md.md_flags;
	syscall_intern(p2);

	/* Copy pcb from proc p1 to p2. */
	if (p1 == curproc) {
		/* Sync the PCB before we copy it. */
		savectx(curpcb);
	}
#ifdef DIAGNOSTIC
	else if (p1 != &proc0)
		panic("cpu_fork: curproc");
#endif
	*pcb = p1->p_addr->u_pcb;

	/*
	 * Preset these so that gdt_compact() doesn't get confused if called
	 * during the allocations below.
	 *
	 * Note: pcb_ldt_sel is handled in the pmap_activate() call when
	 * we run the new process.
	 */
	p2->p_md.md_tss_sel = GSEL(GNULL_SEL, SEL_KPL);

	/*
	 * Activate the addres space.  Note this will refresh pcb_ldt_sel.
	 */
	pmap_activate(p2);

	/* Fix up the TSS. */
	pcb->pcb_tss.tss_rsp0 = (u_int64_t)p2->p_addr + USPACE - 16;
	pcb->pcb_tss.tss_ist[0] = (u_int64_t)p2->p_addr + PAGE_SIZE - 16;
	p2->p_md.md_tss_sel = tss_alloc(pcb);

	/*
	 * Copy the trapframe.
	 */
	p2->p_md.md_regs = tf = (struct trapframe *)pcb->pcb_tss.tss_rsp0 - 1;
	*tf = *p1->p_md.md_regs;

	setredzone(p2);

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		tf->tf_rsp = (u_int64_t)stack + stacksize;

	sf = (struct switchframe *)tf - 1;
	sf->sf_ppl = IPL_NONE;
	sf->sf_r12 = (u_int64_t)func;
	sf->sf_r13 = (u_int64_t)arg;
	if (func == child_return)
		sf->sf_rip = (u_int64_t)child_trampoline;
	else
		sf->sf_rip = (u_int64_t)proc_trampoline;
	pcb->pcb_rsp = (u_int64_t)sf;
	pcb->pcb_rbp = 0;
}
Esempio n. 16
0
void vm_map_activate(vm_map_t *map) {
  SCOPED_NO_PREEMPTION();

  PCPU_SET(uspace, map);
  pmap_activate(map ? map->pmap : NULL);
}