int omap4430_enable_emu() { int ret = -1; void *base_cm_emu, *base_l3instr_l3; int timeout = GLOBAL_TIMEOUT; base_cm_emu = map_page(OMAP4430_CM_EMU); if (base_cm_emu == NULL) goto end; base_l3instr_l3 = map_page(OMAP4430_CM_L3INSTR_L3); if (base_l3instr_l3 == NULL) goto unmap_cm_emu; // Enable clocks __writel(0x2, base_cm_emu + 0xA00); __writel(0x1, base_l3instr_l3 + 0xE20); __writel(0x1, base_l3instr_l3 + 0xE28); // Check if it worked while (--timeout) if (((__readl(base_cm_emu + 0xA00) & 0xf00) == 0x300) && (__readl(base_cm_emu + 0xA20) & 0x40000)) { ret = 0; break; } unmap_page(base_l3instr_l3); unmap_cm_emu: unmap_page(base_cm_emu); end: return ret; }
static int fetch_file_page(struct filemap *fm, void *addr) { struct file *filp; unsigned long pfn; unsigned long pos; int rc; filp = (struct file *) olock(fm->file, OBJECT_FILE); if (!filp) return -EBADF; pfn = alloc_pageframe('FMAP'); if (pfn == 0xFFFFFFFF) { orel(filp); return -ENOMEM; } map_page(addr, pfn, PT_WRITABLE | PT_PRESENT); pos = (char *) addr - fm->addr; rc = pread(filp, addr, PAGESIZE, fm->offset + pos); if (rc < 0) { orel(filp); unmap_page(addr); free_pageframe(pfn); return rc; } pfdb[pfn].owner = fm->self; map_page(addr, pfn, fm->protect | PT_PRESENT); orel(filp); return 0; }
/* * Inject data into the given vspace. * TODO: Don't keep these pages mapped in */ static int load_segment_into_vspace(seL4_RISCV_PageDirectory dest_as, char *src, unsigned long segment_size, unsigned long file_size, unsigned long dst, unsigned long permissions) { assert(file_size <= segment_size); unsigned long pos; /* We work a page at a time in the destination vspace. */ pos = 0; while(pos < segment_size) { seL4_Word paddr; seL4_CPtr sos_cap, tty_cap; seL4_Word vpage, kvpage; unsigned long kdst; int nbytes; int err; kdst = dst + PROCESS_SCRATCH; vpage = PAGE_ALIGN(dst); kvpage = PAGE_ALIGN(kdst); /* First we need to create a frame */ paddr = ut_alloc(seL4_PageBits); conditional_panic(!paddr, "Out of memory - could not allocate frame"); err = cspace_ut_retype_addr(paddr, seL4_RISCV_4K, seL4_PageBits, cur_cspace, &tty_cap); conditional_panic(err, "Failed to retype to a frame object"); /* Copy the frame cap as we need to map it into 2 address spaces */ sos_cap = cspace_copy_cap(cur_cspace, cur_cspace, tty_cap, seL4_AllRights); conditional_panic(sos_cap == 0, "Failed to copy frame cap"); /* Map the frame into tty_test address spaces */ err = map_page(tty_cap, dest_as, vpage, permissions, seL4_RISCV_Default_VMAttributes); conditional_panic(err, "Failed to map to tty address space"); /* Map the frame into sos address spaces */ err = map_page(sos_cap, seL4_CapInitThreadPD, kvpage, seL4_AllRights, seL4_RISCV_Default_VMAttributes); conditional_panic(err, "Failed to map sos address space"); /* Now copy our data into the destination vspace. */ nbytes = PAGESIZE - (dst & PAGEMASK); if (pos < file_size){ memcpy((void*)kdst, (void*)src, MIN(nbytes, file_size - pos)); } /* Not observable to I-cache yet so flush the frame */ // seL4_ARM_Page_Unify_Instruction(sos_cap, 0, PAGESIZE); pos += nbytes; dst += nbytes; src += nbytes; } return 0; }
enum status physical_page_remove(phys_addr address) { enum status status = Error_Absent; assert(is_aligned(address, Page_Small)); phys_addr original = Physical_Page; phys_addr current = original; lock_acquire_writer(&physical_allocator_lock); while (current != invalid_phys_addr) { unmap_page(Physical_Page_Stack, false); status = map_page(Physical_Page_Stack, current, Memory_Writable); assert_ok(status); current = Physical_Page_Stack->next; if (Physical_Page_Stack->next == address) { unmap_page(Physical_Page_Stack, false); assert_ok(map_page(Physical_Page_Stack, Physical_Page_Stack->next, Memory_Writable)); phys_addr next_next = Physical_Page_Stack->next; unmap_page(Physical_Page_Stack, false); assert_ok(map_page(Physical_Page_Stack, current, Memory_Writable)); Physical_Page_Stack->next = next_next; status = Ok; break; } } unmap_page(Physical_Page_Stack, false); assert_ok(map_page(Physical_Page_Stack, original, Memory_Writable)); lock_release_writer(&physical_allocator_lock); return status; }
/* * paging_init() sets up the page tables - in fact we've already done this. */ void __init paging_init(void) { unsigned long total_ram = lmb_phys_mem_size(); unsigned long top_of_ram = lmb_end_of_DRAM(); unsigned long max_zone_pfns[MAX_NR_ZONES]; #ifdef CONFIG_HIGHMEM map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); kmap_prot = PAGE_KERNEL; #endif /* CONFIG_HIGHMEM */ printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT; #else max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; #endif free_area_init_nodes(max_zone_pfns); }
void kmain(struct multiboot_info *mbt) { vga_init(); gdt_install(); idt_install(); isr_install(); irq_install(); syscalls_install(); puts_c(__kernel_name " kernel v" __kernel_version_str "\n\n", COLOR_LIGHT_BLUE, COLOR_DEFAULT_BG); uint64_t mem; get_multiboot_info(mbt, &mem); extern uint32_t _kernel_memory_end[]; kprintf("End of kernel's memory: 0x%x\n", (uint64_t) (uint32_t) _kernel_memory_end); kprintf("Memory:\n%l B\n%l KB\n%l MB\n%l GB\n", mem, mem / 1024, mem / 1024 / 1024, mem / 1024 / 1024 / 1024); init_paging(); map_page(0xFD7FF000, 0x60000, 3); int *p = (int *) 0xFD7FF000; *p = 12; kprintf("*(0x%x) = %i\n", (uint64_t) (uint32_t) p, *p); map_page(0x10000000, 0x60000, 3); int *p2 = (int *) 0x10000000; kprintf("*(0x%x) = %i\n", (uint64_t) (uint32_t) p2, *p2); print_next_available_page(); uint32_t ap = allocate_page(203); map_page(ap, 0x60000, 3); int *p3 = (int *) ap; kprintf("*(0x%x) = %i\n", (uint64_t) ap, *p3); print_next_available_page(); ap = allocate_page(203); kprintf("ap = 0x%x\n", (uint32_t) ap); struct kthread thread; create_kthread(thread_test, &thread); start_kthread(&thread); kprintf("Returned from thread.\n"); _asm_print_test(); return; }
// Обработчик #PF void pf_handler(uint address, uint errcode) { // printf_color(0x04, "Page Fault: addr=0x%x, errcode=0x%x\n", address, errcode); ulong ok = 0; TaskStruct *task = Task[Current]; // Младшый бит errcode определяет, было ли вызвано // исключение отсутствующей страницей (0) или нарушением // привелегий if ((errcode & 1) == 0) { if (BinFormats[task->BinFormat].load_page) // Функция возвратит физический адрес новой страницы, А ТАКЖЕ ФЛАГИ для этой записи // в таблице страниц ok = BinFormats[task->BinFormat].load_page(address); } else { printf_color(0x04, "Strange #PF errcode=0x%x (pid=%d)\n", errcode, Task[Current]->pid); } if (! ok) { printf_color(0x04, "Process requested an invalid page (req addr=0x%x)! Killing him...\n", address); scheduler_kill_current(); } else map_page(ok, Task[Current], address, ok & PA_MASK); }
static void* shm_map(shm_node_t* node) { void* p = (void*) get_free_pages(node->size / PAGE_SIZE, 0, 0); KASSERT(p); map_page((virtaddr_t) p, node->physaddr, node->size); return p; }
uint32_t *vm_copy_kernel_pdir() { uint32_t *new_pdir = pm_alloc(); if (!new_pdir) return NULL; map_page(new_pdir, new_pdir, 0); memset(new_pdir, 0x0, PAGE_SIZE); for (unsigned i = 0; i < PAGE_SIZE / sizeof(uint32_t); i++){ if (kernel_pdir[i] & PDE_P){ if (!(kernel_pdir[i] & PDE_U)){ /* page directory entry present and meant for kernel, let's copy */ new_pdir[i] = kernel_pdir[i]; } } } /* map the page directory into itself at the next-to-last page directory entry * so that when this page directory is loaded into the cr3 it will be mapped * and is going to be easily modifiable */ /* basically, it's going to replace the kernel's page directory in the virtual * memory */ new_pdir[1023] = (uint32_t)new_pdir | PDE_P | PDE_W; return new_pdir; }
phys_addr physical_alloc(void) { phys_addr phys; phys_addr next; lock_acquire_writer(&physical_allocator_lock); assert(is_aligned(Physical_Page_Stack->length, Page_Small)); Physical_Page_Stack->length -= Page_Small; if (Physical_Page_Stack->length == 0) { phys = Physical_Page; next = Physical_Page_Stack->next; unmap_page(Physical_Page_Stack, false); if (next != invalid_phys_addr) { if (map_page(Physical_Page_Stack, next, Memory_Writable) != Ok) { phys = invalid_phys_addr; goto out; } } Physical_Page = next; assert(phys != next); } else { phys = Physical_Page + Physical_Page_Stack->length; } out: lock_release_writer(&physical_allocator_lock); assert((phys & 0xfff) == 0); assert((phys & 0xffff000000000000) == 0); assert(phys != Physical_Page); return phys; }
static void *obj_cache_add_slab(struct obj_cache *cache) { struct slab_meta *meta; void *slab = map_page(cache->slab_size); if (!slab) { return NULL; } meta = find_slab_meta(slab, cache->slab_size); meta->refcount = 0; if (cache->slabs) { meta->next = cache->slabs; } else { meta->next = NULL; } cache->slabs = meta; obj_cache_init_freelist(cache, slab); cache->slab_count++; return slab; }
void physical_stack_debug(void) { phys_addr original = Physical_Page; phys_addr current = Physical_Page; logf(Log_Debug, "physical_stack_debug\n"); lock_acquire_writer(&physical_allocator_lock); while (current != invalid_phys_addr) { unmap_page(Physical_Page_Stack, false); assert_ok(map_page(Physical_Page_Stack, current, Memory_Writable)); logf(Log_Debug, "%lx %zu\n", current, Physical_Page_Stack->length); current = Physical_Page_Stack->next; } logf(Log_Debug, "%lx\n", current); unmap_page(Physical_Page_Stack, false); assert_ok(map_page(Physical_Page_Stack, original, Memory_Writable)); lock_release_writer(&physical_allocator_lock); }
int MPTKern_test1() { unsigned int vaddr = 4096*1024*300; container_split(0, 100); if (get_ptbl_entry_by_va(1, vaddr) != 0) { dprintf("test 1.1 failed.\n"); return 1; } if (get_pdir_entry_by_va(1, vaddr) != 0) { dprintf("test 1.2 failed.\n"); return 1; } map_page(1, vaddr, 100, 7); if (get_ptbl_entry_by_va(1, vaddr) == 0) { dprintf("test 1.3 failed.\n"); return 1; } if (get_pdir_entry_by_va(1, vaddr) == 0) { dprintf("test 1.4 failed.\n"); return 1; } unmap_page(1, vaddr); if (get_ptbl_entry_by_va(1, vaddr) != 0) { dprintf("test 1.5 failed.\n"); return 1; } dprintf("test 1 passed.\n"); return 0; }
/* Map a physical address to a virtual one */ void *AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length) { // The first one megabyte is already mapped to 0xC0000000 if( PhysicalAddress < 0x100000 && (PhysicalAddress+Length) < 0x100000 ){ return (void*)(PhysicalAddress+0xC0000000); } // Local Variables u32 vaddr = 0xF0000000; // virtual address page_t* page = NULL; // page for the virtual address u32 physical_page ATTR((unused)) = PhysicalAddress & 0xFFFFF000; // physical page address u32 page_count = Length + (PhysicalAddress & 0xFFF); // page page // Round to a page boundary if( (page_count & 0xFFF) != 0 ) page_count = (page_count & 0xFFFFF000) + 0x1000; // Calculate the number of pages required page_count /= 0x1000; // Look for an open virtual address for(; vaddr != 0; vaddr += 0x1000) { // Check for an empty page page = get_page((void*)vaddr, 1, current->t_dir); if( page == NULL ) continue; if( page->present == 1 ) continue; // Check for enough consecutive blocks! u32 count = 1; for(u32 incr = 0x1000; count < page_count && (vaddr+incr) != 0; count++, incr += 0x1000) { page = get_page((void*)(vaddr + incr), 1, current->t_dir); if( page == NULL || page->present == 1 ) break; } // we didn't find enough consecutive pages if( count != page_count ) continue; // We found an appropriate block! Allocate! break; } // No free memory! if( page == NULL || page->present == 1 ){ syslog(KERN_ERR, "AcpiOsMapMemory: no more free virtual addresses!"); return NULL; } // Map the pages for(u32 incr = 0; incr < (page_count*0x1000); incr += 0x1000) { map_page(current->t_dir, (void*)(vaddr+incr), PhysicalAddress+incr, 0, 1); } // return the virtual address return (void*)(vaddr + (PhysicalAddress & 0xFFF)); }
/* * Add multiple pages to a mapping * @map -- the mapping * @virt -- virtual address inside the mapping (page aligned) * @phys -- physical address of the page * @n -- amount of pages to be mapped * @flags -- read/write markers */ void map_pages(struct mapping *map, void *virt, u32 phys, u32 n, u16 flags) { u32 i; for (i = 0; i < n; i++) map_page(map, (uintptr_t)virt + PAGE_SIZE*i, phys + PAGE_SIZE*i, flags); }
/* * This function will allocate the requested contiguous pages and * map them into the kernel's vmalloc() space. This is done so we * get unique mapping for these pages, outside of the kernel's 1:1 * virtual:physical mapping. This is necessary so we can cover large * portions of the kernel with single large page TLB entries, and * still get unique uncached pages for consistent DMA. */ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) { struct vm_struct *area; unsigned long page, va, pa; void *ret; int order, err, i; if (in_interrupt()) BUG(); /* only allocate page size areas */ size = PAGE_ALIGN(size); order = get_order(size); page = __get_free_pages(gfp, order); if (!page) { BUG(); return NULL; } /* allocate some common virtual space to map the new pages */ area = get_vm_area(size, VM_ALLOC); if (area == 0) { free_pages(page, order); return NULL; } va = VMALLOC_VMADDR(area->addr); ret = (void *) va; /* this gives us the real physical address of the first page */ *dma_handle = pa = virt_to_bus((void *) page); /* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free * all pages that were allocated. */ if (order > 0) { struct page *rpage = virt_to_page(page); for (i = 1; i < (1 << order); i++) set_page_count(rpage + i, 1); } err = 0; for (i = 0; i < size && err == 0; i += PAGE_SIZE) err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE); if (err) { vfree((void *) va); return NULL; } /* we need to ensure that there are no cachelines in use, or worse dirty in this area * - can't do until after virtual address mappings are created */ frv_cache_invalidate(va, va + size); return ret; }
/** * This function will be called when there's no mapping found in the page structure * for the given virtual address [vaddr], e.g., by the page fault handler when * a page fault happened because the user process accessed a virtual address * that is not mapped yet. * The task of this function is to allocate a physical page and use it to register * the mapping for the virtual address with given permission. * It should return the physical page index registered in the page directory, i.e., the * return value from map_page. * In the case of error, it should return the MagicNumber. */ unsigned int alloc_page (unsigned int proc_index, unsigned int vaddr, unsigned int perm) { unsigned int page_id; if (page_id = container_alloc(proc_index)) { return map_page(proc_index, vaddr, page_id, perm); } return MagicNumber; }
static void __init highmem_init(void) { pr_debug("%x\n", (u32)PKMAP_BASE); map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); kmap_prot = PAGE_KERNEL; }
void *vmmap(void *addr, unsigned long size, int protect, struct file *filp, off64_t offset, int *rc) { int pages = PAGES(size); unsigned long flags = pte_flags_from_protect(protect); struct filemap *fm; int i; char *vaddr; if (rc) *rc = 0; if (size == 0 || flags == 0xFFFFFFFF) { if (rc) *rc = -EINVAL; return NULL; } addr = (void *) PAGEADDR(addr); if (addr == NULL) { addr = (void *) PTOB(rmap_alloc(vmap, pages)); if (addr == NULL) { if (rc) *rc = -ENOMEM; return NULL; } } else { if (rmap_reserve(vmap, BTOP(addr), pages)) { if (rc) *rc = -ENOMEM; return NULL; } } fm = (struct filemap *) kmalloc(sizeof(struct filemap)); if (!fm) { rmap_free(vmap, BTOP(addr), pages); if (rc) *rc = -ENOMEM; return NULL; } init_object(&fm->object, OBJECT_FILEMAP); fm->self = halloc(&fm->object); fm->file = halloc(&filp->iob.object); if (fm->self < 0 || fm->file < 0) { if (rc) *rc = -ENFILE; return NULL; } hprotect(fm->self); hprotect(fm->file); fm->offset = offset; fm->pages = pages; fm->object.signaled = 1; fm->addr = addr; fm->size = size; fm->protect = flags | PT_FILE; vaddr = (char *) addr; flags = (flags & ~PT_USER) | PT_FILE; for (i = 0; i < pages; i++) { map_page(vaddr, fm->self, flags); vaddr += PAGESIZE; } return addr; }
/* * paging_init() sets up the page tables - in fact we've already done this. */ void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; unsigned long total_ram = lmb_phys_mem_size(); unsigned long top_of_ram = lmb_end_of_DRAM(); #ifdef CONFIG_HIGHMEM map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); kmap_prot = PAGE_KERNEL; #endif /* CONFIG_HIGHMEM */ printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); /* * All pages are DMA-able so we put them all in the DMA zone. */ memset(zones_size, 0, sizeof(zones_size)); memset(zholes_size, 0, sizeof(zholes_size)); zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; #ifdef CONFIG_HIGHMEM zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT; #else zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; #endif /* CONFIG_HIGHMEM */ free_area_init_node(0, NODE_DATA(0), zones_size, __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); }
int bochs_init(void) { outw(VBE_DISPI_IOPORT_INDEX, 0); int n = inw(VBE_DISPI_IOPORT_DATA); if(!(CHECK_BGA(n))) return E_ERR; __lfbptr = 0; void pci_func(uint32_t device, uint16_t vendor_id, uint16_t device_id, void* arg) { if(likely(!( (vendor_id == 0x1234) && (device_id == 0x1111) ))) return; __lfbptr = (uintptr_t) pci_read_field(device, PCI_BAR0, 4); } int i; for(i = 0; i < 65536 && !__lfbptr; i++) pci_scan(&pci_func, i, NULL); if(!__lfbptr) return E_ERR; #define ALIGN(x) \ (((x) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1)) if(__lfbptr) { uintptr_t frame = ALIGN(__lfbptr) - PAGE_SIZE; uintptr_t end = ALIGN(frame + BGA_VIDEORAM_SIZE); for(; frame < end; frame += PAGE_SIZE) map_page(frame, frame, 1); } else return E_ERR; fbdev->name = "Bochs VBE Extensions"; fbdev->setvideomode = bga_setvideomode; return E_OK; #else int bga_init(void) { return E_ERR; #endif }
void *miomap(unsigned long addr, int size, int protect) { char *vaddr; int i; unsigned long flags = pte_flags_from_protect(protect); int pages = PAGES(size); vaddr = (char *) PTOB(rmap_alloc(vmap, pages)); if (vaddr == NULL) return NULL; for (i = 0; i < pages; i++) { map_page(vaddr + PTOB(i), BTOP(addr) + i, flags | PT_PRESENT); } return vaddr; }
void sos_map_page_dir_cb(int pid, seL4_CPtr reply_cap, void *args, int err) { if (SOS_DEBUG) printf("sos_map_page_dir_cb\n"); frame_alloc_args *alloc_args = (frame_alloc_args *) args; sos_map_page_args *map_args = alloc_args->cb_args; if (err || !alloc_args->index) { eprintf("Error caught in sos_map_page_dir_cb\n"); free(alloc_args); map_args->cb(pid, reply_cap, map_args->cb_args, -1); free(map_args); return; } seL4_Word dir_index = PT_TOP(map_args->vaddr); printf("directory index %p, pagetable addr %p\n",(void *)dir_index, (void *)alloc_args->vaddr); printf("index %d\n", alloc_args->index); proc_table[pid]->page_directory[dir_index] = (seL4_Word *) alloc_args->vaddr; seL4_ARM_Page_Unmap(frametable[alloc_args->index].frame_cap); err = map_page(frametable[alloc_args->index].frame_cap ,seL4_CapInitThreadPD ,alloc_args->vaddr ,seL4_AllRights ,seL4_ARM_Default_VMAttributes ); if (err) { eprintf("Error caught in sos_map_page_dir_cb\n"); free(alloc_args); map_args->cb(pid, reply_cap, map_args->cb_args, -1); free(map_args); return; } memset((void *)alloc_args->vaddr, 0, PAGE_SIZE); frametable[alloc_args->index].vaddr = -1; printf("Setting index %p to don't swap\n", (void *)alloc_args->index); frametable[alloc_args->index].frame_status |= FRAME_DONT_SWAP; free(alloc_args); sos_map_page_cb(pid, reply_cap, map_args, 0); if (SOS_DEBUG) printf("sos_map_page_dir_cb ended\n"); }
static int expand_htab() { unsigned long pfn; handle_t h; if (htabsize == HTABSIZE / sizeof(struct object *)) return -ENFILE; pfn = alloc_pageframe('HTAB'); map_page(htab + htabsize, pfn, PT_WRITABLE | PT_PRESENT); for (h = htabsize + HANDLES_PER_PAGE - 1; h >= htabsize; h--) { htab[h] = hfreelist; hfreelist = h; } htabsize += HANDLES_PER_PAGE; return 0; }
static int map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[], void *vaddr, size_t num_pages, size_t size_bits, seL4_CapRights rights, int cacheable) { int error = seL4_NoError; for (int i = 0; i < num_pages && error == seL4_NoError; i++) { error = map_page(vspace, caps[i], vaddr, rights, cacheable, size_bits); if (error == seL4_NoError) { uintptr_t cookie = cookies == NULL ? 0 : cookies[i]; error = update_entries(vspace, (uintptr_t) vaddr, caps[i], size_bits, cookie); vaddr = (void *) ((uintptr_t) vaddr + (1 << size_bits)); } } return error; }
int guard_page_handler(void *addr) { unsigned long pfn; struct thread *t = self(); if (!t->tib) return -EFAULT; if (addr < t->tib->stacklimit || addr >= t->tib->stacktop) return -EFAULT; if (t->tib->stacklimit <= t->tib->stackbase) return -EFAULT; pfn = alloc_pageframe('STK'); if (pfn == 0xFFFFFFFF) return -ENOMEM; t->tib->stacklimit = (char *) t->tib->stacklimit - PAGESIZE; map_page(t->tib->stacklimit, pfn, PT_GUARD | PT_WRITABLE | PT_PRESENT); memset(t->tib->stacklimit, 0, PAGESIZE); return 0; }
/* * Allocate <sz> worth of physical pages, and map them continuously into the * current virtual address space, starting at address <vaddr> */ void vm_alloc_pages_at(void *vaddr, unsigned flags, unsigned sz) { void *physical_page; if (sz == 0) return; vaddr = PALIGNDOWN(vaddr); /* the last bit is to map a sufficent number of pages */ unsigned npages = sz / PAGE_SIZE + (sz % PAGE_SIZE > 0); for (int i = 0; i < npages; i++){ physical_page = pm_alloc(); map_page(physical_page, vaddr, flags); vaddr += PAGE_SIZE; } }
/* * Map <sz> memory of continuous pages */ void map_pages(void *paddr, void *vaddr, unsigned flags, unsigned sz) { if (sz == 0){ kprintf("[vm] Warning: attempting to map 0 bytes (0x%x -> 0x%x)!\n", paddr, vaddr); return; } paddr = PALIGNDOWN(paddr); vaddr = PALIGNDOWN(vaddr); /* the last bit is to map a sufficent number of pages */ unsigned npages = sz / PAGE_SIZE + (sz % PAGE_SIZE > 0); for (int i = 0; i < npages; i++){ map_page(paddr, vaddr, flags); paddr += PAGE_SIZE; vaddr += PAGE_SIZE; } }
/* Add DELTA to the kernel's break address, returning the old break address. Note that DELTA may be negative if you want. Returns -1 if no more memory is available. */ void * kernel_sbrk(long delta) { #ifndef SBRK_DOESNT_ALLOC u_char *old = kernel_brk, *ptr; u_long flags; save_flags(flags); cli(); kernel_brk += round_to(delta, 4); if((u_long)kernel_brk < (PHYS_MAP_ADDR - KERNEL_BASE_ADDR)) { ptr = (u_char *)round_to((u_long)old, PAGE_SIZE); while(ptr < kernel_brk) { page *p = alloc_page(); if(p == NULL) goto error; map_page(logical_kernel_pd, p, TO_LINEAR(ptr), PTE_PRESENT); ptr += PAGE_SIZE; } load_flags(flags); return old; } error: kernel_brk = old; load_flags(flags); return (void *)-1; #else /* Don't need to map in any pages or anything; let the page-fault- handler do that. Should really release any unneeded pages if DELTA is negative. */ register void *ptr = kernel_brk; kernel_brk += round_to(delta, 4); if((u_long)kernel_brk < (PHYS_MAP_ADDR - KERNEL_BASE_ADDR)) return ptr; kernel_brk = ptr; return (void *)-1; #endif }
/* * map_and_copy * * Maps a series of physical pages into a process's address space, copying the data * from the corresponding pages in another process. * * This operates similarly to map_new_pages. The difference is that instead of * the pages being empty, their contents is copied from existing pages that are * already mapped by another process. In order to obtain the latter, we perform a * lookup on the source process's page directory to get the physical address of * each page, and then use that to obtain the data to store in the newly-allocated * physical pages of the destination process. * * This is used by the fork system call, which needs to duplicate all aspects of * a process's state. It uses this function to copy the text, data, and stack * segments of the parent process. */ static void map_and_copy(page_dir src_dir, page_dir dest_dir, unsigned int start, unsigned int end) { assert(0 == start % PAGE_SIZE); assert(0 == end % PAGE_SIZE); unsigned int addr; for (addr = start; addr < end; addr += PAGE_SIZE) { /* * Map new page */ unsigned int page = (unsigned int)alloc_page(); map_page(dest_dir, addr, page, PAGE_USER, PAGE_READ_WRITE); /* * Copy from source */ unsigned int src_phys; int sl = lookup_page(src_dir, addr, &src_phys); assert(sl); memmove((void *)page, (void *)src_phys, PAGE_SIZE); } }