pg_tbl_t* proc_create(char* virt_start, char* virt_end, uint32_t stack_size) { pg_tbl_t* tbl = malloc(0x5000); if(!tbl) return (pg_tbl_t*)0; uint32_t stepsize = __plat_pg_tbl_maxentry(); //Note: If this is changed, physical address calculations for setting up the pagetable have to be adjusted void* entry_loc = mem_phys_find_free(stepsize)+(uint32_t)PLATFORM_KERNEL_BASE; if(!entry_loc) return (pg_tbl_t*)0; mem_phys_set(entry_loc - (uint32_t)PLATFORM_KERNEL_BASE, stepsize); pg_create(tbl, entry_loc, (size_t)PLATFORM_PROC_MAX_MEM); uint32_t start_virt = ((uint32_t)virt_start)/stepsize; uint32_t end_virt = ((uint32_t)virt_end-1)/stepsize; uint32_t fail = 0; for(uint32_t i = start_virt; i<=end_virt && !fail; i++) { void* phys_addr = mem_phys_find_free(stepsize); if(phys_addr) { printf("proc: mapping %x to %x\r\n", (char*)(i*stepsize), phys_addr); pg_map(tbl, (char*)(i*stepsize), phys_addr , stepsize, 0, PERM_PRW_URW, 0, 0, 0); mem_phys_set(phys_addr , stepsize); } else { fail = 1; break; } } uint32_t stack_end = (uint32_t)virt_start-stack_size; stack_end /= stepsize; for(uint32_t i=stack_end; i<start_virt && !fail; i++) { void* phys_addr = mem_phys_find_free(stepsize); if(phys_addr) { printf("stack: mapping %x to %x\r\n", (char*)(i*stepsize), phys_addr); pg_map(tbl, (char*)(i*stepsize), phys_addr , stepsize, 0, PERM_PRW_URW, 0, 0, 0); mem_phys_set(phys_addr , stepsize); } else { fail = 1; break; } } //TODO free memory if(fail) return (pg_tbl_t*)0; return tbl; }
//#define SBRK_DBG void* sbrk() { int extra = (int)__plat_thread_getparam(curr_thread, 1); //align extra with pagesize extra = (extra+(PAGE_SIZE-1))&~(PAGE_SIZE-1); #ifdef SBRK_DBG printf("sbrk called with parameter: %x and pg_tbl: %x\r\n", extra, curr_thread->proc->pg_tbl); #endif void* obrk = (void*)curr_thread->proc->brk; //If we don't need to get or remove memory from the process if(!extra) return obrk; int32_t stepsize = __plat_pg_tbl_maxentry(); uint32_t offset = 0; if(extra > 0) { while(extra>stepsize) { p_addr_t start = mem_phys_find_free(stepsize); if(!start) return (void*)-1; mem_phys_set(start, stepsize); #ifdef SBRK_DBG printf("sbrk: Mapping %x bytes from %x to %x in pg_tbl at %x\n", stepsize, (obrk+offset), start, curr_thread->proc->pg_tbl); #endif pg_map(curr_thread->proc->pg_tbl, (void*)(obrk+offset), start, stepsize, 0, PERM_PRW_URW, 0, 0, 0); extra-=stepsize; offset+=stepsize; } p_addr_t start = mem_phys_find_free(extra); if(!start) return (void*)-1; mem_phys_set(start, stepsize); #ifdef SBRK_DBG printf("sbrk: Mapping %x bytes from %x to %x in pg_tbl at %x\n", extra, (obrk+offset), start, curr_thread->proc->pg_tbl); #endif pg_map(curr_thread->proc->pg_tbl, (void*)(obrk+offset), start, extra, 0, PERM_PRW_URW, 0, 0, 0); curr_thread->proc->brk += offset + extra; } else { while(extra<-stepsize) { pg_unmap(curr_thread->proc->pg_tbl, (void*)(obrk-offset-stepsize), stepsize); offset+=stepsize; extra+=stepsize; } pg_unmap(curr_thread->proc->pg_tbl,(void*)(obrk-offset+extra), -extra); curr_thread->proc->brk += extra - offset; } return obrk; }
static int libexec_pg_alloc(struct exec_info *execi, vir_bytes vaddr, size_t len) { pg_map(PG_ALLOCATEME, vaddr, vaddr+len, &kinfo); pg_load(); memset((char *) vaddr, 0, len); alloc_for_vm += len; return OK; }
int kipc_map(v_addr_t target_addr, uint32_t port) { if(port>=IPC_PORTS_MAX) return -1; if(!ipc_port_tbl.ports[port].loc) return -1; //mmap the appropriate processes page table return pg_map(curr_thread->proc->pg_tbl, target_addr, ipc_port_tbl.ports[port].loc, 0x1000, 0, PERM_PRW_URW, CACHE_TYPE_OI_WB_NW, 1, 0); }
/** * Allocates another page of memory for use on the heap */ int kbrk() { uint32_t allocated_page = mm_page_allocate(); if(allocated_page == 0) return 0; pg_map(pg_kernel, kernel_heap_start + kernel_heap_alloc * 0x1000, allocated_page, 1); struct heap_block_hdr_t *block = (struct heap_block_hdr_t *) (kernel_heap_start + kernel_heap_alloc * 0x1000); block->block_size = 0x1000 / sizeof(struct heap_block_hdr_t); block->next_block = 0; kernel_heap_alloc++; kfree(block + 1); return 1; }
/** * Allocates a page on the heap but does not insert it into the "free" list */ void *kalloc_page() { ptr_t address, aligned_address; size_t alignment, overhead, nbytes; struct heap_block_hdr_t *p, *prevp, *r, *q; alignment = 0x1000; // Align to page boundary if((prevp = kernel_heap_free) == NULL) { kernel_heap_free_base.block_size = 0; kernel_heap_free = prevp = &kernel_heap_free_base; kernel_heap_free_base.next_block = kernel_heap_free; } for(p = prevp->next_block; ; prevp = p, p = p->next_block) { address = (ptr_t) p; aligned_address = (address / alignment) * alignment; // Align to _next_ page boundary if(address > aligned_address) aligned_address += 0x1000; // Compute space wasted by alignment overhead = aligned_address - address; // Compute amount of space after point of alignment (zero if negative) if((p->block_size * sizeof(struct heap_block_hdr_t)) > overhead) { nbytes = p->block_size * sizeof(struct heap_block_hdr_t) - overhead; } else { nbytes = 0; } if(nbytes >= 0x1000) { q = p->next_block; // If units before block: change amount of free units if(overhead > 0) { p->block_size = overhead / sizeof(struct heap_block_hdr_t); } else { p = prevp; } // If units after block: add another segment if(nbytes > 0x1000) { r = (struct heap_block_hdr_t*) (aligned_address + 0x1000); r->block_size = (nbytes - 0x1000) / sizeof(struct heap_block_hdr_t); r->next_block = q; p->next_block = r; } else { p->next_block = p->next_block->next_block; } return (void *) aligned_address; } // Couldn't find page, allocate new block if(p == kernel_heap_free) { break; } } uint32_t allocated_page = mm_page_allocate(); if(allocated_page == 0) return 0; pg_map(pg_kernel, kernel_heap_start + kernel_heap_alloc * 0x1000, allocated_page, 1); void *block = (void *) (kernel_heap_start + kernel_heap_alloc * 0x1000); kernel_heap_alloc++; return block; }