/** * Tworzy kopię segmentu. * @param dst deskryptor segmentu docelowego. * @param space przestrzeń adresowa segmentu docelowego. * @param src segment źródłowy. */ int vm_seg_clone(vm_seg_t *dst, vm_space_t *space, vm_seg_t *src) { vm_seg_create(dst, space, src->base, src->size, src->limit, src->prot, src->flags); vm_region_t *reg = NULL; vm_region_t *clonereg; // TRACE_IN("dst=%p space=%p src=%p", dst, space, src); while ( (reg = list_next(&src->regions, reg)) ) { clonereg = vm_lpool_alloc(&vm_unused_regions); clonereg->begin = reg->begin; clonereg->size = reg->size; clonereg->end = reg->end; clonereg->segment = dst; list_insert_tail(&dst->regions, clonereg); // TRACE_IN("%p-%p", clonereg->begin, clonereg->end); vm_pmap_fill(&space->pmap, clonereg->begin, clonereg->size, dst->prot); vm_addr_t SRC,DST; vm_segmap(dst, clonereg->begin, clonereg->size, &DST); vm_segmap(src, reg->begin, reg->size, &SRC); mem_cpy((void*)DST, (void*)SRC, reg->size); vm_unmap(DST, reg->size); vm_unmap(SRC, reg->size); } #if 0 TRACE_IN("present %u %u", vm_pmap_is_avail(&dst->space->pmap, 0xbfffff00), vm_pmap_is_avail(&src->space->pmap, 0xbfffff00) ); #endif return 0; }
/*===========================================================================* * do_shmdt * *===========================================================================*/ PUBLIC int do_shmdt(message *m) { vir_bytes addr; phys_bytes paddr; int i; addr = m->SHMDT_ADDR; if ((paddr = vm_getphys(who_e, (void *) addr)) == 0) return EINVAL; for (i = 0; i < shm_list_nr; i++) { if (shm_list[i].phys == paddr) { struct shm_struct *shm = &shm_list[i]; shm->shmid_ds.shm_atime = time(NULL); shm->shmid_ds.shm_lpid = getnpid(who_e); /* nattch is updated lazily */ vm_unmap(who_e, (void *) addr); break; } } if (i == shm_list_nr) fprintf(stderr, "IPC: do_shmdt impossible error!\n"); update_refcount_and_destroy(); return OK; }
static void test_vm_unmap(void) { paddr_t paddr; vaddr_t vaddr; vm_as_t* as; as = vm_get_kernel_as(); serial_printl("[?] test unmap\n"); vaddr = (vaddr_t)0xb0000000; paddr = phys_alloc(); vm_map(as, vaddr, paddr); /* no page fault */ *(uint32_t*)vaddr = 0x2a; vm_unmap(as, vaddr); phys_free(paddr); /* page fault */ *(uint32_t*)vaddr = 0x2a; }
/*===========================================================================* * do_shmdt * *===========================================================================*/ int do_shmdt(message *m) { vir_bytes addr; phys_bytes vm_id; int i; addr = (vir_bytes) m->m_lc_ipc_shmdt.addr; if ((vm_id = vm_getphys(who_e, (void *) addr)) == 0) return EINVAL; for (i = 0; i < shm_list_nr; i++) { if (shm_list[i].vm_id == vm_id) { struct shm_struct *shm = &shm_list[i]; shm->shmid_ds.shm_atime = time(NULL); shm->shmid_ds.shm_lpid = getnpid(who_e); /* nattch is updated lazily */ vm_unmap(who_e, (void *) addr); break; } } if (i == shm_list_nr) printf("IPC: do_shmdt impossible error! could not find id %lu to unmap\n", vm_id); update_refcount_and_destroy(); return OK; }
int ramdisk_close(dev_t *dev, dev_minor_t minor) { if (minor > RAM_DISKS || !ramdisk[minor].open) return -1; vm_unmap(NULL, ramdisk[minor].data); ramdisk[minor].open = false; return 0; }
int self_free_table(int t) { unsigned virt = t*1024*PAGE_SIZE; int i; for(i=0;i<1024;++i) { if(page_tables[(virt&PAGE_MASK)/PAGE_SIZE]) vm_unmap(virt); virt += PAGE_SIZE; } return 0; }
void exit(int code) { if(!current_task || current_task->pid == 0) panic(PANIC_NOSYNC, "kernel tried to exit"); task_t *t = (task_t *)current_task; /* Get ready to exit */ assert(t->thread->magic == THREAD_MAGIC); ll_insert(kill_queue, (void *)t); raise_flag(TF_EXITING); if(code != -9) t->exit_reason.cause = 0; t->exit_reason.ret = code; t->exit_reason.pid = t->pid; /* Clear out system resources */ free_thread_specific_directory(); /* tell our parent that we're dead */ if(t->parent) do_send_signal(t->parent->pid, SIGCHILD, 1); if(!sub_atomic(&t->thread->count, 1)) { /* we're the last thread to share this data. Clean it up */ close_all_files(t); if(t->thread->root)iput(t->thread->root); if(t->thread->pwd) iput(t->thread->pwd); mutex_destroy(&t->thread->files_lock); void *addr = t->thread; t->thread = 0; kfree(addr); } /* don't do this while the state is dead, as we may step on the toes of waitpid. * this fixes all tasks that are children of current_task, or are waiting * on current_task. For those waiting, it signals the task. For those that * are children, it fixes the 'parent' pointer. */ search_tqueue(primary_queue, TSEARCH_EXIT_PARENT | TSEARCH_EXIT_WAITING, 0, 0, 0, 0); char flag_last_page_dir_task; /* is this the last task to use this pd_info? */ flag_last_page_dir_task = (sub_atomic(&pd_cur_data->count, 1) == 0) ? 1 : 0; if(flag_last_page_dir_task) { /* no one else is referencing this directory. Clean it up... */ free_thread_shared_directory(); vm_unmap(PDIR_DATA); raise_flag(TF_LAST_PDIR); } set_as_dead(t); for(;;) schedule(); }
static int vm_region_shrink( struct vm_region *vmr, unsigned npages ) { unsigned i; struct vm_page *vmp; for( i = npages; i < vm_page_array_num( vmr->vmr_pages ); ++i ) { vmp = vm_page_array_get( vmr->vmr_pages, i ); if( vmp == NULL ) { swap_unreserve( 1 ); continue; } //unmap tlb entries. vm_unmap( vmr->vmr_base + PAGE_SIZE * i ); //destroy the page. vm_page_destroy( vmp ); } return vm_page_array_setsize( vmr->vmr_pages, npages ); }
/** * mem_free_pages - frees pages of memory * @addr: a pointer to the start of the pages * @nr: the number of pages * @size: the page size (4KB, 2MB, or 1GB) */ void mem_free_pages(void *addr, int nr, int size) { vm_unmap(addr, nr, size); munmap(addr, nr * size); }