int exos_bufcache_unmap (u32 dev, u32 blk, void *ptr) { unsigned int vaddr = (unsigned int) ptr; struct bc_entry *bc_entry; int ret; //assert (size == NBPG); /* GROK -- this check should go away after awhile... */ if (! (((bc_entry = __bc_lookup(dev, blk)) != NULL) && (bc_entry->buf_ppn == BUFCACHE_PGNO(vaddr))) ) { kprintf ("exos_bufcache_unmap: not actually mapped (dev %d, blk %d, " "ptr %p, bc_entry %p)\n", dev, blk, ptr, bc_entry); return (-1); //assert (0); } if ((vaddr < BUFCACHE_REGION_START) || (vaddr >= BUFCACHE_REGION_END)) { kprintf ("exos_bufcache_unmap: ptr (%p) out of range\n", ptr); assert (0); } if (ret = _exos_self_unmap_page (CAP_ROOT, vaddr) < 0) { kprintf ("exos_bufcache_unmap: _exos_self_insert_pte failed (ret %d)\n", ret); assert (0); } return (0); }
void * exos_bufcache_insert (u32 dev, u32 blk, void *ptr, int usexn) { int ret; u_int vaddr; struct bc_entry *bc_entry; vaddr = BUFCACHE_ADDR (va2ppn(ptr)); /* GROK -- this does not current install mappings for new vaddr, so it */ /* must be assuming that vaddr and ptr are same!! */ assert (vaddr == (uint) ptr); if (usexn == 0) { /* kprintf ("vaddr %p, vpn %d, pte %x, ppn %d\n", entry->buffer, ((u_int)entry->buffer >> PGSHIFT), (u_int)vpt[(u_int)entry->buffer >> PGSHIFT], vpt[(u_int)entry->buffer >> PGSHIFT] >> PGSHIFT); */ pp_state.ps_readers = pp_state.ps_writers = PP_ACCESS_ALL; if ((ret = sys_bc_insert (&__sysinfo.si_pxn[dev], blk, 0, CAP_ROOT, va2ppn(ptr), &pp_state)) != 0) { if (ret == -E_EXISTS) { kprintf ("gotcha: lost race detected (and handled) in " "exos_bufcache_insert\n"); return (NULL); } kprintf ("sys_bc_insert failed: ret %d (ppn %d, diskBlock %d, " "buffer %p)\n", ret, va2ppn(ptr), blk, ptr); kprintf ("dev %d, blk %d\n", dev, blk); assert (0); } } else { /* don't use XN */ #ifdef XN //kprintf ("xn_bind: diskBlock %d, inodeNum %d, block %d, zerofill %d\n", diskBlock, entry->header.inodeNum, entry->header.block, zerofill); //if ((ret = sys_xn_bind (blk, virt_to_ppn(ptr), (cap_t)CAP_ROOT, ((zerofill) ? XN_ZERO_FILL : XN_BIND_CONTENTS), 0)) < 0) { if ((ret = sys_xn_bind (blk, va2ppn(ptr), (cap_t)CAP_ROOT, XN_ZERO_FILL, 0)) < 0) { kprintf ("sys_xn_bind failed: ret %d (ppn: %d, blk %d, buffer %p)\n", ret, va2ppn(ptr), blk, ptr); kprintf ("dev %d, blk %d\n", dev, blk); assert (0); } #else assert (0); #endif } if ((bc_entry = __bc_lookup (dev, blk)) == NULL) { kprintf ("lookup failed after insert\n"); assert (0); } assert (bc_entry->buf_ppn == BUFCACHE_PGNO(vaddr)); return ((void *) vaddr); }
void main () { int ret; struct Xn_name xn; struct bc_entry *b; printf ("Hello\n"); xn.xa_dev = PART; xn.xa_name = FSID; sys_bc_flush (PART, micropart_part_off (MICRO_PART), 1); ret = sys_micropart_init (PART, CAP_ROOT); printf ("sys_micropart_init returned %d\n", ret); ret = sys_micropart_bootfs (PART, CAP_ROOT, FSID); printf ("sys_micropart_bootfs returned %d\n", ret); ret = _exos_bc_read_and_insert (PART, micropart_part_off (MICRO_PART), 1, NULL); printf ("_exos_bc_read_and_insert returned %d\n", ret); b = __bc_lookup (PART, micropart_part_off (MICRO_PART)); assert (b); ret = sys_self_bc_buffer_map (&xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | PG_U|PG_P, VA); printf ("sys_self_bc_buffer_map returned %d\n", ret); assert (sys_self_insert_pte (CAP_ROOT, 0, VA) == 0); ret = sys_micropart_alloc (PART, MICRO_PART, CAP_ROOT, FSID); printf ("sys_micropart_alloc returned %d\n", ret); ret = sys_self_bc_buffer_map (&xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | PG_U|PG_P, VA); printf ("sys_self_bc_buffer_map returned %d\n", ret); assert (sys_self_insert_pte (CAP_ROOT, 0, VA) == 0); ret = sys_micropart_dealloc (PART, MICRO_PART, CAP_ROOT, FSID); printf ("sys_micropart_dealloc returned %d\n", ret); ret = sys_self_bc_buffer_map (&xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | PG_U|PG_P, VA); printf ("sys_self_bc_buffer_map returned %d\n", ret); assert (sys_self_insert_pte (CAP_ROOT, 0, VA) == 0); ret = sys_micropart_alloc (PART, MICRO_PART, CAP_ROOT, FSID); printf ("sys_micropart_alloc returned %d\n", ret); ret = sys_self_bc_buffer_map (&xn, CAP_ROOT, (b->buf_ppn << PGSHIFT) | PG_U|PG_P, VA); printf ("sys_self_bc_buffer_map returned %d\n", ret); assert (sys_self_insert_pte (CAP_ROOT, 0, VA) == 0); }
static void msync_mark_bc(struct Mmap *m, caddr_t addr, size_t len) { u_int va; /* XXX */ return; for (va = (u_int)addr; va < (u_int)addr + len; va += NBPG) if (vpt[PGNO(va)] & PG_D) { /* dirty block */ struct bc_entry *b = __bc_lookup(m->mmap_dev, va - (u_int)m->mmap_addr + m->mmap_offset); assert(b); if (b->buf_dirty != BUF_DIRTY) { assert(sys_bc_set_dirty(b->buf_dev, b->buf_blk, 1) == 0); } } }
void * exos_bufcache_map (struct bc_entry *bc_entry, u32 dev, u32 blk, u_int writeable) { int ret; u_int vaddr; if (bc_entry == NULL) { bc_entry = __bc_lookup (dev, blk); if (bc_entry == NULL) { return (NULL); } } vaddr = BUFCACHE_ADDR (bc_entry->buf_ppn); if (writeable) { writeable = PG_W; } ret = _exos_self_insert_pte (CAP_ROOT, ppnf2pte(bc_entry->buf_ppn, PG_P | PG_U | writeable | PG_SHARED), (u_int)vaddr, ESIP_DONTPAGE, &__sysinfo.si_pxn[bc_entry->buf_dev]); if ((bc_entry->buf_ppn != BUFCACHE_PGNO(vaddr)) || (bc_entry->buf_state == BC_EMPTY) || (bc_entry->buf_blk != blk) || (bc_entry->buf_dev != dev) || (bc_entry->buf_ppn != (va2ppn(vaddr)))) { kprintf ("buf_state %d, buf_blk %d, diskBlock %d, buf_dev %d, dev %d\n", bc_entry->buf_state, bc_entry->buf_blk, blk, bc_entry->buf_dev, dev); kprintf ("buf_ppn %d, expected %d\n", bc_entry->buf_ppn, va2ppn(vaddr)); kprintf ("gotcha: lost race detected (and handled) in " "exos_bufcache_map\n"); exos_bufcache_unmap (dev, blk, (void *)vaddr); vaddr = 0; } else if (ret != 0) { kprintf ("exos_bufcache_map: _exos_self_insert_pte failed (ret %d, " "vaddr %x, ppn %d)\n", ret, vaddr, bc_entry->buf_ppn); assert (ret == 0); } return ((void *) vaddr); }