int __remap_reserved_page(u_int va, u_int pte_flags) { u_int i; for (i=0; i < __eea->eea_reserved_pages; i++) { if ((vpt[PGNO((u_int)__eea->eea_reserved_first) + i] & PG_P)) { if (_exos_self_insert_pte(CAP_ROOT, ppnf2pte(PGNO(vpt[PGNO((u_int) __eea-> eea_reserved_first) + i]), pte_flags), va, 0, NULL) < 0 || _exos_self_unmap_page(CAP_ROOT, (u_int)__eea->eea_reserved_first + i * NBPG) < 0) { sys_cputs("__remap_reserved_page: can't remap\n"); return -1; } UAREA.u_reserved_pages--; return 0; } } sys_cputs("__remap_reserved_page: none left\n"); return -1; }
void * exos_bufcache_alloc (u32 dev, u32 blk, int zerofill, int writeable, int usexn) { int ret; unsigned int vaddr = BUFCACHE_ADDR (__sysinfo.si_nppages); if (writeable) { writeable = PG_W; } /* This first call to insert_pte causes a physical page to be allocated. */ /* Start with page mapped writeable, since might be zerofill. */ if (((ret = _exos_self_insert_pte (CAP_ROOT, PG_W | PG_P | PG_U | PG_SHARED, vaddr, ESIP_DONTPAGE, NULL)) < 0) || (vpt[PGNO(vaddr)] == 0)) { kprintf ("exos_bufcache_alloc: _exos_self_insert_pte failed (ret %d)\n", ret); return (NULL); } if (zerofill) { bzero ((char *)vaddr, NBPG); } /* do final-location mapping based on "writeable" variable */ if (((ret = _exos_self_insert_pte (CAP_ROOT, ppnf2pte(va2ppn(vaddr), writeable | PG_P | PG_U | PG_SHARED), BUFCACHE_ADDR (va2ppn(vaddr)), ESIP_DONTPAGE, &__sysinfo.si_pxn[dev])) < 0) || (vpt[PGNO(vaddr)] == 0)) { kprintf ("exos_bufcache_alloc: failed to add real mapping (ret %d)\n", ret); assert (0); } vaddr = BUFCACHE_ADDR (va2ppn(vaddr)); /* Unmap the allocation mapping before inserting into bc, to make sure */ /* that we never have a non-writeable bc entry mapped writable... */ if ((ret = _exos_self_unmap_page (CAP_ROOT, BUFCACHE_ADDR(__sysinfo.si_nppages))) < 0) { kprintf ("exos_bufcache_alloc: failed to clobber fake mapping " "(ret %d)\n", ret); assert (0); } #if 1 vaddr = (u_int) exos_bufcache_insert (dev, blk, (void *)vaddr, usexn); assert (vaddr == BUFCACHE_ADDR (va2ppn(vaddr))); #else vaddr = BUFCACHE_ADDR (va2ppn(vaddr)); #endif return ((void *) vaddr); }
void * exos_bufcache_map64 (struct bc_entry *bc_entry, u32 dev, u_quad_t blk64, u_int writeable) { int ret; u_int vaddr; struct Xn_name *xn; struct Xn_name xn_nfs; if (bc_entry == NULL) { bc_entry = __bc_lookup64 (dev, blk64); if (bc_entry == NULL) { return (NULL); } } vaddr = BUFCACHE_ADDR (bc_entry->buf_ppn); if (writeable) { writeable = PG_W; } if (bc_entry->buf_dev > MAX_DISKS) { xn_nfs.xa_dev = bc_entry->buf_dev; xn_nfs.xa_name = 0; xn = &xn_nfs; } else { xn = &__sysinfo.si_pxn[bc_entry->buf_dev]; } ret = _exos_self_insert_pte (CAP_ROOT, ppnf2pte(bc_entry->buf_ppn, PG_P | PG_U | writeable | PG_SHARED), (u_int)vaddr, ESIP_DONTPAGE, xn); if ((bc_entry->buf_ppn != BUFCACHE_PGNO(vaddr)) || (bc_entry->buf_state == BC_EMPTY) || (bc_entry->buf_blk64 != blk64) || (bc_entry->buf_dev != dev) || (bc_entry->buf_ppn != (va2ppn(vaddr)))) { kprintf ("buf_state %d, buf_blk %x:%x, diskBlock %x:%x, buf_dev %d, " "dev %d\n", bc_entry->buf_state, QUAD2INT_HIGH(bc_entry->buf_blk64), QUAD2INT_LOW(bc_entry->buf_blk64), QUAD2INT_HIGH(blk64), QUAD2INT_LOW(blk64), bc_entry->buf_dev, dev); kprintf ("buf_ppn %d, expected %d\n", bc_entry->buf_ppn, va2ppn(vaddr)); kprintf ("gotcha: lost race detected (and handled) in " "exos_bufcache_map\n"); exos_bufcache_unmap64 (dev, blk64, (void *)vaddr); vaddr = 0; } else if (ret != 0) { kprintf ("exos_bufcache_map: _exos_self_insert_pte failed (ret %d, " "vaddr %x, ppn %d)\n", ret, vaddr, bc_entry->buf_ppn); assert (ret == 0); } return ((void *) vaddr); }
void * exos_bufcache_map (struct bc_entry *bc_entry, u32 dev, u32 blk, u_int writeable) { int ret; u_int vaddr; if (bc_entry == NULL) { bc_entry = __bc_lookup (dev, blk); if (bc_entry == NULL) { return (NULL); } } vaddr = BUFCACHE_ADDR (bc_entry->buf_ppn); if (writeable) { writeable = PG_W; } ret = _exos_self_insert_pte (CAP_ROOT, ppnf2pte(bc_entry->buf_ppn, PG_P | PG_U | writeable | PG_SHARED), (u_int)vaddr, ESIP_DONTPAGE, &__sysinfo.si_pxn[bc_entry->buf_dev]); if ((bc_entry->buf_ppn != BUFCACHE_PGNO(vaddr)) || (bc_entry->buf_state == BC_EMPTY) || (bc_entry->buf_blk != blk) || (bc_entry->buf_dev != dev) || (bc_entry->buf_ppn != (va2ppn(vaddr)))) { kprintf ("buf_state %d, buf_blk %d, diskBlock %d, buf_dev %d, dev %d\n", bc_entry->buf_state, bc_entry->buf_blk, blk, bc_entry->buf_dev, dev); kprintf ("buf_ppn %d, expected %d\n", bc_entry->buf_ppn, va2ppn(vaddr)); kprintf ("gotcha: lost race detected (and handled) in " "exos_bufcache_map\n"); exos_bufcache_unmap (dev, blk, (void *)vaddr); vaddr = 0; } else if (ret != 0) { kprintf ("exos_bufcache_map: _exos_self_insert_pte failed (ret %d, " "vaddr %x, ppn %d)\n", ret, vaddr, bc_entry->buf_ppn); assert (ret == 0); } return ((void *) vaddr); }