main () { int ii = return_1 (); if (ii != 1) abort_because ("wrong value returned"); int j = return_arg (42); if (j != 42) abort_because ("wrong value returned"); int k = return_sum (-69, 69); if (k != 0) abort_because ("wrong value returned"); foo f1 = return_named_foo (); if (foo::si != 1) abort_because ("wrong number of foos"); f1.i = 5; int l = foo_parm_returns_i (f1); if (l != 5) abort_because ("l != 5"); foo f2 = foo_parm_returns_foo (f1); if (foo::si != 2) abort_because ("wrong number of foos"); if (f2.i != 5) abort_because ("f2.i != 5"); foo f3 = return_foo (); if (foo::si != 3) abort_because ("wrong number of foos"); printf("PASS\n"); return 0; }
sval h_create_partition(struct cpu_thread *thread, uval donated_laddr, uval laddr_size, uval pinfo_offset) { struct os *os = os_create(); uval rc; if (os == NULL) { return H_Parameter; } rc = resource_transfer(thread, MEM_ADDR, 0, donated_laddr, laddr_size, os); if (rc == H_Success) { /* now that we have memory, we can arch_os_init() */ rc = arch_os_init(os, pinfo_offset); } if (rc == H_Success) { return_arg(thread, 1, os->po_lpid); return H_Success; } os_free(os); return H_Parameter; }
sval h_grant_logical(struct cpu_thread *thread, uval flags, uval logical_hi, uval logical_lo, uval length, uval unit_address) { struct os *dest_os; if (!(flags & (MEM_ADDR | MMIO_ADDR | INTR_SRC))) { return H_Parameter; } if (unit_address == (uval)H_SELF_LPID) { dest_os = thread->cpu->os; } else { dest_os = os_lookup(unit_address); } if (!dest_os) { return H_Parameter; } struct sys_resource *res; sval err = grant_resource(&res, thread->cpu->os, flags, logical_hi, logical_lo, length, dest_os); if (err >= 0) { return_arg(thread, 1, err); err = H_Success; } return err; }
/* * h_set_sched_params(per_cpu_os *pcop, uval lpid, * uval cpu, uval required, uval desired) * * Temporary interface for setting scheduling parameters. * * required/desired are bitmaps that specify which slots the calling * OS wants. Bits in "required" represent scheduling slots that must * be assignable to this OS (and locked down). The desired bit-map * represents scheduling slots that may be set/unset, without guarantees at * any time by HV. Thus fullfilling "desired" requests has no bearing * on the success/failure of this call. * * On return, r4 contains a bitmap identifying the locked-down slots * (which cannot be yielded to satisfy set_sched_params() calls of * other partitions). r5 contains a bitmap representing all in-use * scheduling slots. The caller can use this information to try again * if an error has occurred. r6 contains the bitmask actually assigned * (a rotation of "required"). * * The return value, if positive (-> success) identifies the left-wise * rotation required of the input parameters to have fulfilled the request. * * FIXME: Should have a mechanism to restrict rights to calls this function * to the controlling OS only. * * FIXME: Re-implement using standard LPAR interfaces, if appropriate. * * Examples of usage in "test_sched.c". */ sval h_set_sched_params(struct cpu_thread *thread, uval lpid, uval phys_cpu_num, uval required, uval desired) { /* The real per_cpu_os to operate on is specified by the cpu arg */ uval err = H_Success; struct os *target_os = os_lookup(lpid); struct cpu *target_cpu; struct hype_per_cpu_s *hpc = &hype_per_cpu[phys_cpu_num]; /* Bounds/validity checks on lpid and cpu */ if ((!target_os && (lpid != (uval)H_SELF_LPID)) || (phys_cpu_num > MAX_CPU && phys_cpu_num != THIS_CPU)) { err = H_Parameter; goto bad_os; } if (!target_os) { target_os = thread->cpu->os; } write_lock_acquire(&target_os->po_mutex); if (phys_cpu_num == THIS_CPU) { /* TODO - fixme WHAT? */ phys_cpu_num = thread->cpu->logical_cpu_num; /* update our place holder */ hpc = &hype_per_cpu[phys_cpu_num]; } /* TODO - fixme WHAT? */ target_cpu = target_os->cpu[phys_cpu_num]; if (!target_cpu) { err = H_Parameter; goto bad_cpu; } lock_acquire(&hpc->hpc_mutex); err = locked_set_sched_params(target_cpu, phys_cpu_num, required, desired); /* Provide current setting to OS, so it can compensate */ return_arg(thread, 1, hpc->hpc_sched.locked_slots); return_arg(thread, 2, hpc->hpc_sched.used_slots); return_arg(thread, 3, target_cpu->sched.required); lock_release(&hpc->hpc_mutex); /* *INDENT-OFF* */ bad_cpu: /* *INDENT-ON* */ write_lock_release(&target_os->po_mutex); /* *INDENT-OFF* */ bad_os: /* *INDENT-ON* */ return err; }
/* * Return the Root/PDE/PTE entry that is stored in the current shadow page * table, or if the shadow page table is empty, stored in the partition * page table. * * ``flags'' are defined as: * * H_GET_ENTRY_ROOT Return pointer to the current PDE * * H_GET_ENTRY_PDE Return PDE for specified ``vaddr'' * * H_GET_ENTRY_PTE Return PTE for specified ``vaddr'' * * When H_GET_ENTRY_PHYSICAL is or-ed in to the flags, the physical address * for the Root/PDE/PTE is returned rather than the logical address. */ sval h_get_pte(struct cpu_thread *thread, uval flags, uval vaddr) { union pgframe *pgd = thread->pgd; #ifdef PGE_DEBUG hprintf("H_GET_PTE: flags {"); if (flags & H_GET_ENTRY_ROOT) hprintf(" ROOT"); if (flags & H_GET_ENTRY_PDE) hprintf(" PDE"); if (flags & H_GET_ENTRY_PTE) hprintf(" PTE"); if (flags & H_GET_ENTRY_PHYSICAL) hprintf(" Physical"); hprintf(" }, vaddr 0x%lx\n", vaddr); #endif if (vaddr >= HV_VBASE) return H_NOT_FOUND; /* Read a root directory entry */ if (flags & H_GET_ENTRY_ROOT) { if (flags & H_GET_ENTRY_PHYSICAL) { return_arg(thread, 1, pgd->pgdir.hv_paddr); } else { /* logical */ return_arg(thread, 1, pgd->pgdir.lp_laddr); } return H_Success; } uval pdi = (vaddr & PDE_MASK) >> LOG_PDSIZE; /* Read a page directory entry */ if (flags & H_GET_ENTRY_PDE) { uval lp_pde = pgd->pgdir.lp_vaddr[pdi]; uval hv_pde = pgd->pgdir.hv_vaddr[pdi]; if ((lp_pde & PTE_P) == 0) return H_NOT_FOUND; if (flags & H_GET_ENTRY_PHYSICAL) { /* no LPAR pde for this address */ if ((lp_pde & PTE_P) == 0) return H_NOT_FOUND; /* no HV pde for this address, create one */ if ((hv_pde & PTE_P) == 0) { assert(0, "not yet"); } /* hv_pde may have changed */ return_arg(thread, 1, pgd->pgdir.hv_vaddr[pdi]); } else { /* logical */ return_arg(thread, 1, combine(lp_pde, hv_pde)); } return H_Success; } uval pti = (vaddr & PTE_MASK) >> LOG_PGSIZE; /* Read a page table entry */ if (flags & H_GET_ENTRY_PTE) { uval lp_pde = pgd->pgdir.lp_vaddr[pdi]; uval hv_pde = pgd->pgdir.hv_vaddr[pdi]; if ((lp_pde & PTE_P) == 0) return H_NOT_FOUND; if (lp_pde & PTE_PS) { /* large page */ if (flags & H_GET_ENTRY_PHYSICAL) { /* no LPAR pde for this address */ if ((lp_pde & PTE_P) == 0) return H_NOT_FOUND; /* no HV pde for this address, create one */ if ((hv_pde & PTE_P) == 0) { assert(0, "not yet"); } /* hv_pde may have changed */ return_arg(thread, 1, pgd->pgdir.hv_vaddr[pdi]); } else { /* logical */ return_arg(thread, 1, combine(lp_pde, hv_pde)); } return H_Success; } union pgframe *pgt = pgd->pgdir.pgt[pdi]; uval lp_pte = pgt->pgtab.lp_vaddr[pti]; uval hv_pte = pgt->pgtab.hv_vaddr[pti]; if ((lp_pte & PTE_P) == 0) return H_NOT_FOUND; if (flags & H_GET_ENTRY_PHYSICAL) { /* no LPAR pte for this address */ if ((lp_pte & PTE_P) == 0) return H_NOT_FOUND; /* no HV pte for this address, create one */ if ((hv_pte & PTE_P) == 0) { if (!pgc_set_pte(thread, pgt, pti, lp_pte)) return H_NOT_FOUND; } /* hv_pte may have changed */ return_arg(thread, 1, pgt->pgtab.hv_vaddr[pti]); } else { /* logical */ return_arg(thread, 1, combine(lp_pte, hv_pte)); } return H_Success; } return H_Parameter; }