thread_t* thr_create(process_t* parent, thread_start_t entry) { thread_t* thr = kheap_alloc(sizeof(thread_t)); if(!thr) return NULL; memset(thr, 0, sizeof(thread_t)); thr->id = prc_next_tid(parent); thr->parent = parent; thr->context = kheap_alloc(sizeof(thr_context_t)); thr->stack = stka_alloc(parent->stka); thr->priority = parent->priority; memset(thr->context, 0, sizeof(thr_context_t)); // TODO: error checking // TODO: user mode trampoline! thr->context->state.rip = (uintptr_t)thr_trampoline; thr->context->state.rdi = (uintptr_t)thr; thr->context->state.rsi = (uintptr_t)entry; thr->context->state.rflags = FL_IF; // enable interrupts when starting thread. thr->context->state.rsp = thr->stack->top - (sizeof(uintptr_t) * 2); thr->context->thread = thr; if(parent->ring == 0) { thr->context->state.ss = GDT_KDATA64; thr->context->state.cs = GDT_KCODE64; } // TODO: else thr->state = Runnable; return thr; }
pmap_t* pmap_create() { pmap_t *pmap = (pmap_t*)kheap_alloc(sizeof(pmap_t)); memset(pmap, 0, sizeof(pmap_t)); // Create pgd // TODO: This will not work! We need to allocate 16 KiB of contiguous memory aligned to a 16 KiB address boundary pmap->pgd = (pgd_t*)kheap_alloc(sizeof(pgd_t)); memset(pmap->pgd, 0, sizeof(pgd_t)); // Get the physical address of the pgd pmap->pgd_pa = TRUNC_PAGE(KERNEL_PGTS_BASE[PGD_GET_INDEX((vaddr_t)pmap->pgd)-KERNEL_PGD_PGT_INDEX_BASE].pte[PGT_GET_INDEX((vaddr_t)pmap->pgd)]); pmap_reference(pmap); return pmap; }
static struct mblock *kheap_get_block(size_t size) { size = size + (size % 8); struct mblock *i = mlist; struct mblock *last = i; struct mblock *ret = NULL; uint32_t smallest = -1; while (i) { if (i->size >= size && i->size < smallest && i->state == MBLOCK_FREE) { smallest = i->size; ret = i; } last = i; i = i->next_block; } if (ret) { return ret; } else { struct mblock *new_block = (struct mblock *)kheap_alloc(sizeof(struct mblock)); new_block->size = size; new_block->magic = MAGIC; new_block->state = MBLOCK_ALLOCATED; new_block->next_block = NULL; if (mlist == NULL) mlist = new_block; else last->next_block = new_block; return new_block; } }
void pmem_add(phys_addr_t start, size_t length) { pmem_region_t* reg; recheck: reg = pmem_region_head; while(reg) { if(start >= reg->start && start < (reg->start + reg->length)) { /* start within this region */ if((start + length) <= (reg->start + reg->length)) { /* end withing this section, so completely with us already */ return; } length -= (reg->start + reg->length) - start; start = reg->start + reg->length; goto recheck; } if((start + length) > reg->start && (start + length) < (reg->start + reg->length)) { /* end within this region */ if(start >= reg->start) { /* start too, so no more left over. */ return; } length = (reg->start - start); goto recheck; } reg = reg->next; } /* we have a checked region here, with correct start and end. * now allocate the required management structures, etc. */ pmem_region_t* region = (pmem_region_t*)kheap_alloc(sizeof(pmem_region_t)); bitmap_t* bmap = bmap_new(PMEM_PAGES(length)); if(!region || !bmap) { error("not enough memory to allocate memory management structures!\n"); if(region) kheap_free(region); if(bmap) kheap_free(bmap); return; } region->start = start; region->length = length; region->bmap = bmap; region->next = NULL; pmem_region_tail->next = region; pmem_region_tail = region; }
void *kalloc(size_t size) { struct mblock *new_block = kheap_get_block(size); if (new_block->state == MBLOCK_ALLOCATED) new_block->memory = kheap_alloc(size); else new_block->state = MBLOCK_ALLOCATED; return new_block->memory; }
/* allocate a chunk of memory */ void * nf_malloc(size_t size) { return kheap_alloc(size); }