uint32_t brk(uint32_t addr) { /* change the value of break address. this increases * or decreases data segment size. */ uint32_t first_page, last_page, i; /* receive umem structure of current process: */ umem_t *umem = &(curproc->umem); /* check limits: */ if (addr >= umem->heap_end || addr < umem->heap_start) /* no memory :( */ return umem->brk_addr; /* just return current break. */ /* compare addr with current break: */ if (addr > umem->brk_addr) { /* move forward (map) */ first_page = (umem->brk_addr+PAGE_SIZE-1) & PAGE_BASE_MASK; last_page = (addr-1) & PAGE_BASE_MASK; for (i = first_page; i <= last_page; i+=PAGE_SIZE) arch_vmpage_map(umem, i, 1 /* user mode */); } else if (addr < umem->brk_addr) { /* move backwards (umap) */ first_page = (addr+PAGE_SIZE-1) & PAGE_BASE_MASK; last_page = (umem->brk_addr-1) & PAGE_BASE_MASK; for (i = first_page; i <= last_page; i+=PAGE_SIZE) arch_vmpage_unmap(umem, i); } /* set & return the new break: */ return umem->brk_addr = addr; }
void *kmalloc(uint32_t size) { /* local vars */ linknode *ptr; uint32_t base, i; /* Get free hole: */ base = get_hole(log2(nextpow2(size))); /* Allocate physical pages: */ for (i = base & 0xFFFFF000; i < base+size; i+=PAGE_SIZE) arch_vmpage_map(NULL, i, 0); /* Store info about this allocated space... */ ptr = (linknode *) get_hole(log2(16)); arch_vmpage_map(NULL, (uint32_t) ptr, 0); linkedlist_add(&usedlist, ptr); ptr->datum[0] = base; ptr->datum[1] = size; return (void *) base; }
void kmem_init() { /* initialize free lists: */ /* ----------------------- */ uint32_t i; for (i = 0; i < 32; i++) linkedlist_init(&freelist[i]); linkedlist_init(&usedlist); arch_vmpage_map(NULL, KERNEL_MEMORY_BASE, 0); linkedlist_add(&freelist[U], (linknode *)((uint32_t) KERNEL_MEMORY_BASE)); kmem_initialized = 1; }
int32_t umem_copy(umem_t *src, umem_t *dest) { /* used by fork() to copy src into dest. */ uint32_t i, j; uint8_t *buf1, *buf2; buf1 = (uint8_t *) kmalloc(PAGE_SIZE); if (buf1 == NULL) return ENOMEM; buf2 = (uint8_t *) kmalloc(PAGE_SIZE); if (buf2 == NULL) { kfree(buf1); return ENOMEM; } /* copy umem; */ for (i=USER_MEMORY_BASE; i<KERNEL_MEMORY_BASE; i+=PAGE_DIR_SIZE) { if (!arch_vmdir_isMapped(src, i)) continue; for (j = i; j < i + PAGE_DIR_SIZE; j+=PAGE_SIZE) { if (!arch_vmpage_isMapped(src, j)) continue; arch_vmpage_map(dest, (int32_t) j, 1 /* user mode */); arch_vmpage_copy(src, j, dest, j, buf1, buf2); } } /* free the buffers: */ kfree(buf1); kfree(buf2); /* copy heap parameters: */ dest->heap_start = src->heap_start; dest->brk_addr = src->brk_addr; dest->heap_end = src->heap_end; /* done: */ return ESUCCESS; }
uint32_t get_hole(uint32_t i) { if (i > U) return NULL; if (i < L) i = L; if (freelist[i].count) { linknode *ptr = freelist[i] .first; /* Remove first element from the free list. */ linkedlist_remove(&freelist[i], ptr, NULL); if (i > 11) arch_vmpage_unmap(NULL, ptr); return (uint32_t) ptr; } else { uint32_t ret = get_hole(i+1); if (ret == NULL) return ret; /* Split ret to two (2^i) chunks, one is free, the other returned. */ arch_vmpage_map(NULL, ret + pow2(i), 0); linkedlist_add(&freelist[i], (linknode *) (ret + pow2(i))); /* printk("ret: %x %dBytes\n", ret, pow2(i)); */ return ret; } }
uint32_t mmap(uint32_t base, uint32_t size, uint32_t type, uint32_t flags, uint32_t fd, uint64_t off) { int32_t pages; uint32_t addr; umem_t *umem = &(curproc->umem); /* current process umem image. */ /*printk("mmap called: %x, size: %x\n", base, size);*/ /* make sure fd is valid if a file is to be used */ if (type & MMAP_TYPE_FILE) { if (fd < 0 || fd >= FD_MAX || curproc->file[fd] == NULL) return 0; } if (!base) { /* allocate space */ base = umem->heap_end - size; } size += base & (~PAGE_BASE_MASK); /* rectify "size". */ base = base & PAGE_BASE_MASK; pages = (size+PAGE_SIZE-1)/PAGE_SIZE; /* pages to be allocated. */ size = pages*PAGE_SIZE; /* actual size. */ /*printk(" - %x, size: %x\n", base, size); */ /* make sure the system is initialized. */ if (curproc == NULL) return 0; /* system is not initialized. */ /* check whether the given range is valid or not. */ for (addr = base; addr < base + size; addr+=PAGE_SIZE) { if (addr<USER_MEMORY_BASE || addr>=KERNEL_MEMORY_BASE) { return 0; /* invalid */ } } /* update heap end: */ if (base < umem->heap_end) umem->heap_end = base; /* now allocate. */ for (addr = base; addr < base + size; addr+=PAGE_SIZE) { arch_vmpage_map(umem, (int32_t) addr, 1 /* user mode */); /* mapping a file? */ if (type & MMAP_TYPE_FILE) { /* read file information */ file_t *file = curproc->file[fd]; inode_t *inode = file->inode; file_mem_t *region = inode->sma.first; /* shared? */ if (flags & MMAP_FLAGS_SHARED) { /* search for the wanted region */ while (region != NULL) { if (region->pos == off) { /* found */ region->ref++; break; } region = region->next; } /* region found or not? */ if (!region) { /* region not found, create it */ region = kmalloc(sizeof(file_mem_t)); if (!region) return 0; if (file_reopen(file, &(region->file))) return 0; region->pos = off; region->paddr = 0; region->ref = 1; /* add to the inode */ linkedlist_add(&(inode->sma), region); } } else { /* not shared, allocate a new one */ region = kmalloc(sizeof(file_mem_t)); if (!region) return 0; if (file_reopen(file, &(region->file))) return 0; region->pos = off; region->paddr = 0; region->ref = 1; } /* attach the virtual page to the mapping */ arch_vmpage_attach_file(umem, (int32_t) addr, region); /* update offset */ off += PAGE_SIZE; } } /* return the base address. */ return base; }