/** * @brief Allocate a chunk of memory of size <size> aligned to * <alignment>. * * @param alignment The request alignment. kmalloc will align your allocation * to the lowest power of 2 greater than <alignment>. * * @param size The number of bytes to allocate * * @return * NULL if the allocation failed * otherwise return the address of the chunk */ void *kmemalign(size_t alignment, size_t size) { unsigned long flags; unsigned shift; void *chunk; #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsign-compare" /* Find the alignment shift in bits. XXX use proc_ops.h */ for (shift = 0; (1 << shift) < alignment; shift++); spin_lock_irq(&kmalloc_lock, &flags); /* * Allocate a chunk of LMM memory with the specified alignment shift * and an offset such that the memory block we return will be aligned. */ chunk = lmm_alloc_aligned(&kheap_lmm, size, 0, shift, 0); if (!chunk) goto out; #pragma GCC diagnostic pop kheap_used += size; out: spin_unlock_irq(&kmalloc_lock, flags); return chunk; }
/* * Init the VM code. */ void oskit_uvm_redzone_init(void) { oskit_addr_t addr; /* * We use a task gate to catch page faults, since a stack overflow * will try and dump more stuff on the stack. This is the easiest * way to deal with it. */ if ((addr = (oskit_addr_t) lmm_alloc_aligned(&malloc_lmm, STACKSIZE, 0, 12, 0)) == 0) panic(__FUNCTION__": Could not allocate stack\n"); task_tss.ss0 = KERNEL_DS; task_tss.esp0 = addr + STACKSIZE - sizeof(double); task_tss.esp = task_tss.esp0; task_tss.ss = KERNEL_DS; task_tss.ds = KERNEL_DS; task_tss.es = KERNEL_DS; task_tss.fs = KERNEL_DS; task_tss.gs = KERNEL_DS; task_tss.cs = KERNEL_CS; task_tss.io_bit_map_offset = sizeof(task_tss); task_tss.eip = (int) double_fault_handler; /* Make sure the task is started with interrupts disabled */ osenv_intr_disable(); task_tss.eflags = (int) get_eflags(); osenv_intr_enable(); /* Both TSSs has to know about the page tables */ task_tss.cr3 = get_cr3(); base_tss.cr3 = get_cr3(); /* Initialize the base TSS descriptor. */ fill_descriptor(&base_gdt[KERNEL_TRAP_TSS / 8], kvtolin(&task_tss), sizeof(task_tss) - 1, ACC_PL_K|ACC_TSS|ACC_P, 0); /* * NOTE: The task switch will include an extra word on the stack, * pushed by the CPU. The handler will need to be in assembly code * if we care about that value. As it is, the handler routine * stack is going to be slightly messed up, but since the handler * calls panic, it is not a problem right now. */ fill_gate(&base_idt[T_DOUBLE_FAULT], 0, KERNEL_TRAP_TSS, ACC_TASK_GATE|ACC_P|ACC_PL_K, 0); base_idt_load(); base_gdt_load(); }
void *_smemalign(size_t alignment, size_t size) { unsigned shift; void *chunk; /* Find the alignment shift in bits. XXX use proc_ops.h */ for (shift = 0; (1 << shift) < alignment; shift++); /* * Allocate a chunk of LMM memory with the specified alignment shift * and an offset such that the memory block we return will be aligned. */ if (!(chunk = lmm_alloc_aligned(&malloc_lmm, size, 0, shift, 0))) return NULL; return chunk; }