/* * Initializes the file system manager. */ PUBLIC void fs_init(void) { binit(); inode_init(); superblock_init(); /* Sanity check. */ CHKSIZE(sizeof(struct d_dirent), sizeof(struct dirent)); rootdev = superblock_read(ROOT_DEV); /* Failed to read root super block. */ if (rootdev == NULL) kpanic("failed to mount root file system"); superblock_unlock(rootdev); root = inode_get(ROOT_DEV, 1); /* Failed to read root inode. */ if (root == NULL) kpanic("failed to read root inode"); kprintf("fs: root file system mounted"); /* Hand craft idle process. */ IDLE->pwd = root; IDLE->root = root; root->count += 2; inode_unlock(root); }
/** * Do the final cleanup of a task struct. * @return: the exit code of the task */ int cleanup_child(struct task_struct *task) { int rv; if(!task) kpanic("Waiting on NULL task\n"); /* Remove the child from list of all tasks. */ if(task->next_task) task->next_task->prev_task = task->prev_task; if(task->prev_task) task->prev_task->next_task = task->next_task; if(!task->parent) kpanic("Reaping a child that has no parent!\n"); /* Remove task from list of children in parent*/ if(task->parent->chld == task) { /* task was first child */ task->parent->chld = task->sib; } else { struct task_struct *prev = task->parent->chld; for(; prev->sib != task; prev = prev->sib) { } prev->sib = task->sib; /* remove task */ } rv = task->exit_code; kfree(task); return rv; }
static int unix_file_bd_write_block(BD_t * object, bdesc_t * block, uint32_t number) { struct unix_file_info * info = (struct unix_file_info *) object; int r, revision_back; off_t seeked; /* make sure it's a valid block */ assert(block->length && number + block->length / object->blocksize <= object->numblocks); seeked = lseek(info->fd, number * object->blocksize, SEEK_SET); if(seeked != number * object->blocksize) { perror("lseek"); assert(0); } #if REVISION_TAIL_INPLACE revision_back = revision_tail_prepare(block, object); if(revision_back < 0) { kpanic("revision_tail_prepare gave: %d\n", revision_back); return revision_back; } if(write(info->fd, block->data, block->length) != block->length) { perror("write"); assert(0); } #else static uint8_t buffer[4096]; assert(block->length <= 4096); revision_back = revision_tail_prepare(block, object, buffer); if(revision_back < 0) { kpanic("revision_tail_prepare gave: %d\n", revision_back); return revision_back; } if(write(info->fd, buffer, block->length) != block->length) { perror("write"); assert(0); } #endif if(block_log) fprintf(block_log, "%d write %u %d\n", info->user_name, number, block->flags); r = revision_tail_acknowledge(block, object); if(r < 0) { kpanic("revision_tail_acknowledge gave error: %i\n", r); return r; } if(revision_back != r) printf("%s(): block %u: revision_back (%d) != revision_forward (%d)\n", __FUNCTION__, number, revision_back, r); return 0; }
void vbeinitcons(int w, int h) { struct cons *cons = constab; conschar_t **buf; void *ptr; long bufsz = CONSNTEXTROW * sizeof(conschar_t *); long rowsz = (w + 1) * sizeof(conschar_t); long l; long row; long n = 0; for (l = 0 ; l < NCONS ; l++) { cons->puts = vbeputs; cons->putchar = vbeputchar; cons->fg = GFX_WHITE; cons->bg = GFX_BLACK; cons->buf = vbescreen.fbuf; cons->col = 0; cons->row = 0; cons->ncol = w; cons->nrow = h; cons->ntextrow = CONSNTEXTROW; #if 0 /* TODO: allocate scrollback buffer */ buf = kcalloc(bufsz); if (!buf) { kprintf("CONS: failed to allocate console row buffer\n"); kpanic(); } n++; cons->textbuf = buf; for (row = 0 ; row < CONSNTEXTROW ; row++) { /* allocate NUL-terminated row */ ptr = kcalloc(rowsz); if (!ptr) { kprintf("CONS %l: failed to allocate console row %l (%l)\n", l, row, n); kpanic(); } n++; buf[row] = ptr; } #endif cons++; } conscur = 0; return; }
void exec_preemptuser(void) { long err; err = task_files_init(curr_task); if(err) { kpanic("task_files_init failed: %s\n", strerror(-err)); } err = do_execve("/bin/preemptuser", NULL, NULL, 0); if(err) { kpanic("do_execve failed: %s\n", strerror(-err)); } kill_curr_task(0); }
/** * Init stdin, stdout, stderr in the specified task */ int task_files_init(struct task_struct *task) { struct file *fp; if(!task) kpanic("task is NULL!\n"); if(task->files[0] || task->files[1] || task->files[2]) kpanic("A file is open during init!!\n"); fp = term_open(); if(!fp) return -ENOMEM; fp->f_count += 2; /* we make 2 "copies" */ task->files[0] = fp; task->files[1] = fp; task->files[2] = fp; return 0; }
/* This is the main routine for the NativeOS Kernel. It will start the system and jump to user mode so that the init process can run. At the moment no information is gathered from multiboot but I expect this to change in the near future. Multiboot will provide two arguments here: one is the magic number, which must be 0x2BADB002, and the other one is the data structure with information that might be required for some things. */ void kmain(unsigned int magic_number, multiboot_info_t *multiboot_ptr) { gdt_init(); idt_init(); int i; for (i = 0; i < 16; i++) idt_set_handler(i, &bsod); /* Set up the core drivers. */ VGACon_Init(); keyboard_init(); timer_init(); /* Check that the magic code is valid. */ if (magic_number != 0x2BADB002) { kpanic(0x88, "Wrong magic number"); } unsigned int memory_amount = count_memory(multiboot_ptr); frames_init(memory_amount); printk("Starting NativeOS...\n"); for(;;); }
static void tsc_calibration (void) { u64 hz, mhz, loops; u8 i; sys.cpu[0].hz = (u64)-1; loops_per_jiffy = (u64)-1; /* Best of four :) */ i = 4; while (i--) { hz = tsc_calibration_withpit (&loops); if (hz < sys.cpu[0].hz) sys.cpu[0].hz = hz; if (loops < loops_per_jiffy) loops_per_jiffy = loops; } mhz = sys.cpu[0].hz / 1000000; /* We require a 1 mhz CPU :) */ if (mhz == 0) kpanic("Can't calibrate the cpu!!"); kprintf ("Cpu speed calibrated to: %ldMhz, %ld BogoMIPS\n", mhz, loops_per_jiffy); }
/* * free file table entry: * -- puts it into free list head, O(1); * -- sets references to 0; */ void fput(struct fl *fp) { struct ind *in; if (--fp->rc) /* other fd's remain */ return; /* no fd's remain */ /* -> free list head */ crin(); fp->nxt = ftb.fhd; ftb.fhd = fp; crout(); /* determine type */ switch(fp->t) { /* normally faster then if */ case FPP: rmpp(fp); break; case FIND: in = fp->in; ilck(in); /* close device if needed */ if (in->t == FCHR) devtb[in->mj].close(in->mn); iput(in); /* unlck */ break; default: kpanic("fput: fp->t == ?\n"); } }
static int unix_file_bd_destroy(BD_t * bd) { struct unix_file_info * info = (struct unix_file_info *) bd; int r; r = modman_rem_bd(bd); if(r < 0) return r; blockman_destroy(&info->blockman); close(info->fd); memset(info, 0, sizeof(*info)); free(info); if(block_log) { block_log_users--; if(!block_log) { r = fclose(block_log); if(r == EOF) { perror("fclose(block_log)"); kpanic("unable to close block log\n"); } block_log = NULL; } } return 0; }
/** * @brief Frees a disk block. * * @details Frees a disk block by freeing all underlying disk blocks. * * @param sb Superblock in which the disk block should be freed. * @param num Number of the disk block that shall be freed. * @param lvl Level of indirection to be parsed: zero for direct blocks, one for * single indirect blocks, and two for doubly indirect blocks. * * @note The superblock must be locked. */ PUBLIC void block_free(struct superblock *sb, block_t num, int lvl) { /* Free disk block. */ switch (lvl) { /* Direct block. */ case 0: block_free_direct(sb, num); break; /* Single indirect block. */ case 1: block_free_indirect(sb, num); break; /* Doubly indirect block. */ case 2: block_free_dindirect(sb, num); break; /* Should not happen. */ default: kpanic("fs: bad indirection level"); } }
void kfree(void *addr){ csection_lock(&kmem_mtx); if(memblock_free(&kernel_heap, addr) < 0) kpanic(0x0, "double free at %p\n", addr); csection_unlock(&kmem_mtx); }
void btable_ins(struct buf *bufp) { int i; for (i = 0; i < NR_BUF_BLKS; i++) if (!table_bufs[i]) table_bufs[i] = bufp; kpanic("table_bufs_put(): no space to place a buffer"); }
void cdev_register_dev (struct cdevsw *cdev, u32 major) { if (major > CMAJORMAX) kpanic ("CDEV: too big major number registered."); cdevsw[major] = cdev; }
static void serve_readdir(fuse_req_t req, fuse_ino_t fuse_ino, size_t size, off_t foff, struct fuse_file_info * fi) { fdesc_t * fdesc = fi_get_fdesc(fi); uint32_t off = foff; uint32_t total_size = 0; char * buf = NULL; int r; Dprintf("%s(ino = %lu, size = %u, off = %lld)\n", __FUNCTION__, fuse_ino, size, foff); while (1) { dirent_t dirent; int nbytes; struct stat stbuf; inode_t entry_cfs_ino; size_t oldsize = total_size; nbytes = CALL(reqcfs(req), get_dirent, fdesc, &dirent, sizeof(dirent), &off); if (nbytes == -1) break; else if (nbytes < 0) { fprintf(stderr, "%s:%s(): CALL(cfs, get_dirent, fdesc = %p, off = %d) = %d (%s)\n", __FILE__, __FUNCTION__, fdesc, off, nbytes, strerror(nbytes)); assert(nbytes >= 0); } if (total_size + fuse_dirent_size(dirent.d_namelen) > size) break; Dprintf("%s: \"%s\"\n", __FUNCTION__, dirent.d_name); total_size += fuse_dirent_size(dirent.d_namelen); buf = (char *) realloc(buf, total_size); if (!buf) kpanic("realloc() failed"); memset(&stbuf, 0, sizeof(stbuf)); // Generate "." and ".." here rather than in the base file system // because they are not able to find ".."'s inode from just // "."'s inode if (!strcmp(dirent.d_name, ".")) entry_cfs_ino = fusecfsino(req, fuse_ino); else if (!strcmp(dirent.d_name, "..")) entry_cfs_ino = fdesc->common->parent; else { r = CALL(reqcfs(req), lookup, fusecfsino(req, fuse_ino), dirent.d_name, &entry_cfs_ino); assert(r >= 0); } stbuf.st_ino = cfsfuseino(req, entry_cfs_ino); fuse_add_dirent(buf + oldsize, dirent.d_name, &stbuf, off); } r = fuse_reply_buf(req, buf, total_size); fuse_reply_assert(!r); free(buf); }
void * mm_malloc(size_t size) { _next_alloc = ((char *)_next_alloc) + size; if (_next_alloc - _memory_start > _memory_length) kpanic("@kmalloc - Out of memory"); return (((char *)_next_alloc) - size); }
struct buf * blk_read(blk_nr_t blk_nr) { struct buf *bufp; bufp = blk_get(blk_nr); if (!bufp) kpanic("blk_read(): Null buffer block pointer"); if (!bufp->valid) ramfs_read(bufp); return bufp; }
/** * C side of page fault handler. * * @param errcode errorcode pushed on stack by the fault * @param cr3 value of cr3 register (location of fault) */ void handle_page_fault(u32 errcode, u32 cr2, u32 eip, u32 *ebp) { u32 *cr3 = get_pagedir(); kerror(ERR_MEDERR, "Page fault at 0x%08X --> 0x%08X (%s%s%s%s%s)", cr2, pgdir_get_page_entry(cr3, (void *)cr2) & 0xFFFFF000, ((errcode & 0x01) ? "present" : "non-present"), ((errcode & 0x02) ? ", write" : ", read"), ((errcode & 0x04) ? ", user-mode" : ", kernel-mode"), ((errcode & 0x08) ? ", modified reserved field" : ""), ((errcode & 0x10) ? ", instruction fetch" : "")); kerror(ERR_MEDERR, " -> EIP: %08X", eip); if(cr2 >= (u32)firstframe) { int frame = (cr2 - (u32)firstframe) / 0x1000; kerror(ERR_MEDERR, " -> On frame %08X(%d)", frame, frame); } else kerror(ERR_MEDERR, " -> Occurred in kernel-space, not in the page frames"); kerror(ERR_MEDERR, " -> Page flags: 0x%03X", pgdir_get_page_entry(cr3, (void *)cr2) & 0xFFF); kerror(ERR_MEDERR, " -> Page Directory: 0x%08X", cr3); kerror(ERR_MEDERR, " -> Kernel pagedir: 0x%08X", kernel_cr3); if(tasking) { int pid = current_pid; int p = proc_by_pid(pid); if(p == -1) { kerror(ERR_MEDERR, "Failed to get process index from pid (%d)", pid); for(;;); } kerror(ERR_MEDERR, " -> Caused by process %d [%s]", pid, procs[p].name); if(((cr2 < procs[p].stack_beg) && (cr2 > procs[p].stack_end - STACK_SIZE)) || // Remember, the x86 stack is upside-down ((cr2 < procs[p].stack_beg + STACK_SIZE) && (cr2 > procs[p].stack_end))) { kerror(ERR_MEDERR, " -> Caused a stack overflow and is being dealt with", pid); } if(ebp != NULL) { stack_trace(5, ebp, eip); } exit(1); } if(ebp != NULL) { stack_trace(5, ebp, eip); } kpanic("Page fault, multitasking not enabled, nothing to do to fix this."); for(;;); }
void check_block(malloc_block_t* block) { if ( (block->magic!=MALLOC_MAGIC) | (BLOCK_END(block)->magic!=MALLOC_MAGIC) | (BLOCK_END(block)->backlink!=block)) { kpanic("corrupted malloc data"); } if ((block->flags&MALLOC_LAST)==0) { malloc_block_t* next = BLOCK_NEXT(block); if ((next->magic!=MALLOC_MAGIC) || (next->flags&MALLOC_FIRST)) { kpanic("corrupted malloc chain"); } } if ((block->flags&MALLOC_FIRST)==0) { malloc_block_t* prev = BLOCK_PREVIOUS(block); if ((prev->magic!=MALLOC_MAGIC) || (prev->flags&MALLOC_LAST)) { kpanic("corrupted malloc chain"); } } }
void *kmalloc (u16 owner, u16 size) { u16 real_size = get_real_size(size); free_mem -= real_size; if (size > free_mem) { kpanic ("Out of memory"); } Kmalloc_header *chunk = MEMORY_START; while (true) { if ((void *)chunk >= MEMORY_END) { kpanic ("Out of memory"); } if (chunk->user == MEMORY_OWNER_FREE && chunk->size == real_size) { chunk->user = owner; break; } else if (chunk->user == MEMORY_OWNER_FREE && chunk->size > real_size) { // We split the chunk in two chunk->data[size] = MEMORY_OWNER_FREE; chunk->data[size + 1] = chunk->size - real_size; // And now it's ours chunk->user = owner; chunk->size = size; break; } chunk = next(chunk); } for (u16 i = 0; i < size; ++i) { chunk->data[i] = 0; } return chunk->data; }
void kernel_start(void) { vga_init(); if (magic != 0x2badb002) { kpanic("We are not booting off a Multiboot bootloader."); } kprintf("muskios 0.1 (git %s)\nCopyright 2013 Maxthon Chan\n\n", MUSKIOS_VERSION); kprintf("Multiboot 0x%x", magic); }
/** * Add task to the correct queue. */ void queue_add_by_state(struct task_struct *task) { // debug("Adding task: %s\n", task->cmdline); if(task->state == TASK_RUNNABLE) { rr_queue_add(&just_ran_queue, task); } else if(task->state == TASK_BLOCKED) { rr_queue_add(&block_queue, task); } else if(task->state == TASK_SLEEPING) { rr_queue_add(&sleep_queue, task); } else if(task->state == TASK_WAITING) { rr_queue_add(&wait_queue, task); } else { kpanic("Don't know which queue to put task into: state=%d\n", task->state); } }
bool poll_port(uint16_t port, uint8_t pattern, uint16_t timeout) { uint16_t counter = timeout; while((inb(port) & pattern) != pattern) { counter--; if(counter == 0) { kpanic("timeout on poll operation"); break; } } return counter != 0; }
static int mem_bd_write_block(BD_t * object, bdesc_t * block, uint32_t number) { struct mem_info * info = (struct mem_info *) object; int r; assert(block->length == object->blocksize); assert(number < object->numblocks); #if REVISION_TAIL_INPLACE r = revision_tail_prepare(block, object); if(r < 0) { kpanic("revision_tail_prepare gave: %d\n", r); return r; } memcpy(&info->blocks[number * object->blocksize], block->data, object->blocksize); #else r = revision_tail_prepare(block, object, &info->blocks[number * object->blocksize]); if(r < 0) { kpanic("revision_tail_prepare gave: %d\n", r); return r; } #endif r = revision_tail_acknowledge(block, object); if(r < 0) { kpanic("revision_tail_acknowledge gave error: %i\n", r); return r; } return 0; }
/** * Remove task from the queue it resides in. */ void queue_remove_by_state(struct task_struct *task) { if(task->state == TASK_RUNNABLE) { /* Could be in either queue */ rr_queue_remove(&run_queue, task); rr_queue_remove(&just_ran_queue, task); } else if(task->state == TASK_BLOCKED) { rr_queue_remove(&block_queue, task); } else if(task->state == TASK_SLEEPING) { rr_queue_remove(&sleep_queue, task); } else if(task->state == TASK_WAITING) { rr_queue_remove(&wait_queue, task); } else { kpanic("Don't know which queue to remvoe task from: state=%d\n", task->state); } }
/** * @brief Get log_level from a buffer * * @return log_level or 0 if default log_level */ PUBLIC char get_code(const char *buffer) { if ((buffer[0] == KERN_SOH_ASCII) && !(&buffer[1] == NULL)) { if ((buffer[1] >= '0' && buffer[1] <= '7')) return buffer[1]; else { kpanic("log level error: invalid log level"); return -1; } } return 0; }
/* * Initializes the RAM disk device driver. */ PUBLIC void ramdisk_init(void) { int err; kprintf("dev: initializing ramdisk device driver"); /* ramdisk[0] = INITRD. */ ramdisks[0].start = INITRD_VIRT; ramdisks[0].end = INITRD_VIRT + INITRD_SIZE; ramdisks[0].size = INITRD_SIZE; err = bdev_register(RAMDISK_MAJOR, &ramdisk_driver); /* Failed to register ramdisk device driver. */ if (err) kpanic("failed to register RAM disk device driver. "); }
void mm_setup(multiboot_info_t * mbi) { /* 1 MB Safe distance */ #define SAFE_DISTANCE (1024*1024) #define MINIMUM_LEN (1024*1024) unsigned long int largest_len = 0; unsigned long int largest_addr; unsigned long int safe_addr = (unsigned long int)kmain + SAFE_DISTANCE + 1; multiboot_memory_map_t *mmap = (multiboot_memory_map_t *)mbi->mmap_addr; while(mmap < (multiboot_memory_map_t *)(mbi->mmap_addr + mbi->mmap_length)) { if (mmap->len > largest_len) { /* Heuristics to determine a ensure a safe distance from kernel code. */ if (mmap->addr > safe_addr) { largest_len = mmap->len; largest_addr = mmap->addr; } else if ((safe_addr - mmap->addr) > mmap->len) { largest_len = mmap->len - ((unsigned long int)safe_addr - mmap->addr); largest_addr = safe_addr; } } mmap = (multiboot_memory_map_t*) ( (unsigned int)mmap + mmap->size + sizeof(unsigned int) ); } if (largest_len < MINIMUM_LEN) kpanic("mm_setup - Could not find any suitable memory map"); mm_init((void *)largest_addr, largest_len); }
//As the Kernel End has been aligned, everything here is already aligned to a 4kb boundary! void* kalloc_block(uint32_t size) { //How many blocks are we allocating according to the memory size requested??? uint32_t nblocks = ceil(size, BLOCK_SIZE); block_t* block = head; //Whoa... Something's wrong.. if(head == NULL) { kpanic("memmmngr: Head is NULL", NULL); } while(block != NULL) { if(block->next == NULL) { //This is a bit of a clusterfuck... block->next = (block + ((nblocks * BLOCK_SIZE) / sizeof(block_t))) + sizeof(block_t); //This ONLY works because sizeof(block_t) = 0x10!! Fixme please! block->next->prev = block; block->prev->next = block; break; } if(block->next != NULL || block->flags & BLOCK_USED || block->flags & BLOCK_RESVD) block = (block + ((nblocks * BLOCK_SIZE) / sizeof(block_t))) + sizeof(block_t); if(block >= blocks_avail) //Looks like we're at the end of the block list, we should really be doing some defragging here, but let's just return NULL for simplicity's sake now. return NULL; } block->size = nblocks * BLOCK_SIZE; //How many bytes does this block take up?? block->flags |= BLOCK_USED; //kprintc("mem: ", 0x0A); //kprintf("Allocating one block of size %dkb\n", block->size); //kprintf("This block is at 0x%p\n", block); //kprintf("The previous block is at 0x%p\n", block->prev); //kprintf("The block flags are %p\n", block->flags); return (void*)block; }
void* kmalloc(size_t payload) { malloc_block_t* block; malloc_block_t* ptr; kdebug("< kmalloc %d", payload); block = 0; ptr = first_block; while (1) { check_block(ptr); // free and big enough ? if (((ptr->flags&MALLOC_USED)==0) && (PAYLOAD_SIZE(ptr)>=payload)) { // better than the current fit (if any) ? if ((block==0) || (ptr->size<block->size)) { block = ptr; } } if (ptr->flags&MALLOC_LAST) { break; } ptr = BLOCK_NEXT(ptr); } if (ptr==0) { kpanic("could not allocate %d", payload); return 0; } int split_block = (block->size > (payload+BLOCK_OVERHEAD)); if (split_block) { int last = block->flags&MALLOC_LAST; size_t size = payload + BLOCK_OVERHEAD; size_t rem_size = block->size - size; setup_block(block, (block->flags&~MALLOC_LAST)|MALLOC_USED, size); setup_block(BLOCK_NEXT(block), last, rem_size); } else { block->flags |= MALLOC_USED; } kmalloc_print(); kdebug(">"); return (void*)block->data; }