void uvm_loan_init(void) { UVM_OBJ_INIT(&uvm_loanzero_object, &ulz_pager, 0); UVMHIST_INIT(loanhist, 300); }
void uvm_loan_init(void) { mutex_init(&uvm_loanzero_lock, MUTEX_DEFAULT, IPL_NONE); uvm_obj_init(&uvm_loanzero_object, &ulz_pager, false, 0); uvm_obj_setlock(&uvm_loanzero_object, &uvm_loanzero_lock); UVMHIST_INIT(loanhist, 300); }
void ubc_init(void) { struct ubc_map *umap; vaddr_t va; int i; /* * init ubc_object. * alloc and init ubc_map's. * init inactive queues. * alloc and init hashtable. * map in ubc_object. */ simple_lock_init(&ubc_object.uobj.vmobjlock); ubc_object.uobj.pgops = &ubc_pager; TAILQ_INIT(&ubc_object.uobj.memq); ubc_object.uobj.uo_npages = 0; ubc_object.uobj.uo_refs = UVM_OBJ_KERN; ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map), M_TEMP, M_NOWAIT); bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map)); va = (vaddr_t)1L; #ifdef PMAP_PREFER PMAP_PREFER(0, &va); if (va < UBC_WINSIZE) { va = UBC_WINSIZE; } ubc_nqueues = va / UBC_WINSIZE; if (ubc_nqueues != 1) { ubc_release_unmap = TRUE; } #endif ubc_object.inactive = malloc(UBC_NQUEUES * sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT); for (i = 0; i < UBC_NQUEUES; i++) { TAILQ_INIT(&ubc_object.inactive[i]); } for (i = 0; i < ubc_nwins; i++) { umap = &ubc_object.umap[i]; TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)], umap, inactive); } ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT, &ubc_object.hashmask); for (i = 0; i <= ubc_object.hashmask; i++) { LIST_INIT(&ubc_object.hash[i]); } if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva, ubc_nwins * UBC_WINSIZE, &ubc_object.uobj, 0, (vsize_t)va, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) { panic("ubc_init: failed to map ubc_object\n"); } UVMHIST_INIT(ubchist, 300); }
void ubc_init(void) { struct ubc_map *umap; vaddr_t va; int i; /* * Make sure ubc_winshift is sane. */ if (ubc_winshift < PAGE_SHIFT) ubc_winshift = PAGE_SHIFT; /* * init ubc_object. * alloc and init ubc_map's. * init inactive queues. * alloc and init hashtable. * map in ubc_object. */ simple_lock_init(&ubc_object.uobj.vmobjlock); ubc_object.uobj.pgops = &ubc_pager; TAILQ_INIT(&ubc_object.uobj.memq); ubc_object.uobj.uo_npages = 0; ubc_object.uobj.uo_refs = UVM_OBJ_KERN; ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map), M_TEMP, M_NOWAIT); if (ubc_object.umap == NULL) panic("ubc_init: failed to allocate ubc_map"); memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map)); if (ubc_winshift < PAGE_SHIFT) { ubc_winshift = PAGE_SHIFT; } va = (vaddr_t)1L; #ifdef PMAP_PREFER PMAP_PREFER(0, &va); ubc_nqueues = va >> ubc_winshift; if (ubc_nqueues == 0) { ubc_nqueues = 1; } if (ubc_nqueues != 1) { ubc_release_unmap = TRUE; } #endif ubc_winsize = 1 << ubc_winshift; ubc_object.inactive = malloc(UBC_NQUEUES * sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT); if (ubc_object.inactive == NULL) panic("ubc_init: failed to allocate inactive queue heads"); for (i = 0; i < UBC_NQUEUES; i++) { TAILQ_INIT(&ubc_object.inactive[i]); } for (i = 0; i < ubc_nwins; i++) { umap = &ubc_object.umap[i]; TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)], umap, inactive); } ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT, &ubc_object.hashmask); for (i = 0; i <= ubc_object.hashmask; i++) { LIST_INIT(&ubc_object.hash[i]); } if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva, ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) { panic("ubc_init: failed to map ubc_object\n"); } UVMHIST_INIT(ubchist, 300); }