void vec_restore_from_mcontext(lwp_t *l, const mcontext_t *mcp) { struct pcb * const pcb = lwp_getpcb(l); const union __vr *vr = mcp->__vrf.__vrs; KASSERT(l == curlwp); vec_save(); /* grab the accumulator */ pcb->pcb_vr.vreg[8][0] = vr->__vr32[2]; pcb->pcb_vr.vreg[8][1] = vr->__vr32[3]; /* * We store the high parts of each register in the first 8 vectors. */ for (u_int i = 0; i < 8; i++, vr += 4) { pcb->pcb_vr.vreg[i][0] = vr[0].__vr32[0]; pcb->pcb_vr.vreg[i][1] = vr[1].__vr32[0]; pcb->pcb_vr.vreg[i][2] = vr[2].__vr32[0]; pcb->pcb_vr.vreg[i][3] = vr[3].__vr32[0]; } l->l_md.md_utf->tf_spefscr = pcb->pcb_vr.vscr = mcp->__vrf.__vscr; pcb->pcb_vr.vrsave = mcp->__vrf.__vrsave; }
bool vec_save_to_mcontext(lwp_t *l, mcontext_t *mcp, unsigned int *flagp) { struct pcb * const pcb = lwp_getpcb(l); KASSERT(l == curlwp); if (!vec_used_p(l)) return false; vec_save(); mcp->__gregs[_REG_MSR] |= PSL_SPV; union __vr *vr = mcp->__vrf.__vrs; const register_t *fixreg = l->l_md.md_utf->tf_fixreg; for (u_int i = 0; i < 32; i++, vr += 4, fixreg += 4) { vr[0].__vr32[0] = pcb->pcb_vr.vreg[i][0]; vr[0].__vr32[1] = fixreg[0]; vr[0].__vr32[2] = 0; vr[0].__vr32[3] = 0; vr[1].__vr32[0] = pcb->pcb_vr.vreg[i][1]; vr[1].__vr32[1] = fixreg[1]; vr[1].__vr32[2] = 0; vr[1].__vr32[3] = 0; vr[2].__vr32[0] = pcb->pcb_vr.vreg[i][2]; vr[2].__vr32[1] = fixreg[2]; vr[2].__vr32[2] = 0; vr[2].__vr32[3] = 0; vr[3].__vr32[0] = pcb->pcb_vr.vreg[i][3]; vr[3].__vr32[1] = fixreg[3]; vr[3].__vr32[2] = 0; vr[3].__vr32[3] = 0; } mcp->__vrf.__vrs[0].__vr32[2] = pcb->pcb_vr.vreg[8][0]; mcp->__vrf.__vrs[0].__vr32[3] = pcb->pcb_vr.vreg[8][1]; mcp->__vrf.__vrsave = pcb->pcb_vr.vrsave; mcp->__vrf.__vscr = l->l_md.md_utf->tf_spefscr; *flagp |= _UC_POWERPC_SPE; return true; }
/* * Routine: cpu_sleep * Function: */ void cpu_sleep( void) { struct per_proc_info *proc_info; unsigned int i; unsigned int wait_ncpus_sleep, ncpus_sleep; facility_context *fowner; proc_info = getPerProc(); proc_info->running = FALSE; fowner = proc_info->FPU_owner; /* Cache this */ if(fowner) /* If anyone owns FPU, save it */ fpu_save(fowner); proc_info->FPU_owner = NULL; /* Set no fpu owner now */ fowner = proc_info->VMX_owner; /* Cache this */ if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */ proc_info->VMX_owner = NULL; /* Set no vector owner now */ if (proc_info->cpu_number == master_cpu) { proc_info->cpu_flags &= BootDone; proc_info->interrupts_enabled = 0; proc_info->pending_ast = AST_NONE; if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { ml_phys_write((vm_offset_t)&ResetHandler + 0, RESET_HANDLER_START); ml_phys_write((vm_offset_t)&ResetHandler + 4, (vm_offset_t)_start_cpu); ml_phys_write((vm_offset_t)&ResetHandler + 8, (vm_offset_t)&PerProcTable[master_cpu]); __asm__ volatile("sync"); __asm__ volatile("isync"); }
static savearea_vec *chudxnu_private_get_vec_regs(void) { vec_save(current_act()->mact.curctx); // just in case it's live, save it return current_act()->mact.curctx->VMXsave; // take the top savearea (user or kernel) }