/** * Initialize disk device driver. Reserves memory for data structures * and register driver to the interrupt handler. * * @param desc Pointer to the YAMS IO device descriptor of the disk * * @return Pointer to the device structure of the disk */ device_t *disk_init(io_descriptor_t *desc) { device_t *dev; gbd_t *gbd; disk_real_device_t *real_dev; uint32_t irq_mask; dev = (device_t*)stalloc(sizeof(device_t)); gbd = (gbd_t*)stalloc(sizeof(gbd_t)); real_dev = (disk_real_device_t*)stalloc(sizeof(disk_real_device_t)); if (dev == NULL || gbd == NULL || real_dev == NULL) KERNEL_PANIC("Could not allocate memory for disk driver."); dev->generic_device = gbd; dev->real_device = real_dev; dev->descriptor = desc; dev->io_address = desc->io_area_base; dev->type = desc->type; gbd->device = dev; gbd->read_block = disk_read_block; gbd->write_block = disk_write_block; gbd->block_size = disk_block_size; gbd->total_blocks = disk_total_blocks; spinlock_reset(&real_dev->slock); real_dev->request_queue = NULL; real_dev->request_served = NULL; irq_mask = 1 << (desc->irq + 10); interrupt_register(irq_mask, disk_interrupt_handle, dev); return dev; }
semaphore_t *semaphore_create(int value) { interrupt_status_t intr_status; static int next = 0; int i; int sem_id; KERNEL_ASSERT(value >= 0); intr_status = _interrupt_disable(); spinlock_acquire(&semaphore_table_slock); /* Find free semaphore from semaphore table */ for(i = 0; i < CONFIG_MAX_SEMAPHORES; i++) { sem_id = next; next = (next + 1) % CONFIG_MAX_SEMAPHORES; if (semaphore_table[sem_id].creator == -1) { semaphore_table[sem_id].creator = thread_get_current_thread(); break; } } spinlock_release(&semaphore_table_slock); _interrupt_set_state(intr_status); if (i == CONFIG_MAX_SEMAPHORES) { /* semaphore table does not have any free semaphores, creation fails */ return NULL; } semaphore_table[sem_id].value = value; spinlock_reset(&semaphore_table[sem_id].slock); return &semaphore_table[sem_id]; }
/** * Physical memory initialization. Finds out number of physical pages and * number of staticly reserved physical pages. Marks reserved pages * reserved in physmem_free_pages. */ void physmem_init(void *bootinfo) { int num_res_pages; int i; /* We dont use this */ bootinfo = bootinfo; physmem_num_pages = physmem_get_size(); physmem_free_pages = (uint32_t *)stalloc(bitmap_sizeof(physmem_num_pages)); bitmap_init(physmem_free_pages, physmem_num_pages); /* Note that number of reserved pages must be get after we have (staticly) reserved memory for bitmap. */ num_res_pages = physmem_get_reserved_size(); physmem_num_free_pages = physmem_num_pages - num_res_pages; physmem_static_end = num_res_pages; for (i = 0; i < num_res_pages; i++) bitmap_set(physmem_free_pages, i, 1); spinlock_reset(&physmem_slock); kprintf("Physmem: Found %d pages of size %d\n", physmem_num_pages, PAGE_SIZE); kprintf("Physmem: Static allocation for kernel: %d pages\n", num_res_pages); }
/* Initialize process table and spinlock */ void process_init() { int i; spinlock_reset(&process_table_slock); for (i = 0; i <= PROCESS_MAX_PROCESSES; ++i) process_reset(i); }
void semaphore_init(void) { int i; spinlock_reset(&semaphore_table_slock); for(i = 0; i < CONFIG_MAX_SEMAPHORES; i++) semaphore_table[i].creator = -1; }
void process_init() { int i; spinlock_reset(&process_table_slock); for (i = 0; i <= MAX_PROCESSES; i++) { process_table[i].state = PROCESS_FREE; process_table[i].executable[0] = 0; } }
/** * Initializes the process table for use. */ void process_init() { // Initialize our spinlock. spinlock_reset(&process_table_slock); // Mark all process slots as available. int i; for (i = 0; i < MAX_PROCESSES; i++) { process_table[i].state = PROCESS_SLOT_AVAILABLE; } }
void physmem_init(void *boot_info) { multiboot_info_t *mb_info = (multiboot_info_t*)boot_info; uint64_t *mem_ptr = (uint64_t*)(uint64_t)mb_info->memory_map_addr; uint64_t Itr = 0, last_address = 0; /* Setup Memory Stuff */ highest_page = 0; memory_size = mb_info->memory_high; memory_size += mb_info->memory_low; total_blocks = (memory_size * 1024) / PAGE_SIZE; used_blocks = total_blocks; bitmap_size = total_blocks / PMM_BLOCKS_PER_BYTE; _mem_bitmap = (uint64_t*)stalloc(bitmap_size); physmem_lock = (spinlock_t*)stalloc(sizeof(spinlock_t)); spinlock_reset(physmem_lock); /* Set all memory as used, and use memory map to set free */ memoryset(_mem_bitmap, 0xF, bitmap_size); /* Physical Page Bitmap */ kprintf("Memory size: %u Kb\n", (uint32_t)memory_size); /* Go through regions */ for(Itr = (uint64_t)mem_ptr; Itr < ((uint64_t)mem_ptr + mb_info->memory_map_length); ) { /* Get next member */ mem_region_t *mem_region = (mem_region_t*)Itr; /* Output */ //kprintf("Memory Region: Address 0x%xL, length 0x%xL, Type %u\n", // mem_region->base_address, mem_region->length, mem_region->Type); /* Is it free? */ if(mem_region->type == MEMTYPE_FREE) physmem_freeregion(mem_region->base_address, mem_region->length); /* Advance by one structure */ Itr += sizeof(mem_region_t); } /* Mark all memory up to the static allocation point as used */ last_address = (physaddr_t)stalloc(1); stalloc_disable(); for(Itr = physmem_allocblock(); Itr < last_address;) Itr = physmem_allocblock(); /* Debug*/ kprintf("New memory allocation starts at 0x%xl\n", Itr); }
/** * Initializes interrupt driven tty driver. Memory is reserved for * data structures and tty interrupt handler is registerded. * * @param desc Pointer to a YAMS device descriptor data structure. * * @return Pointer to tty's device_t structure. */ device_t *tty_init(io_descriptor_t *desc) { device_t *dev; gcd_t *gcd; tty_real_device_t *tty_rd; uint32_t irq_mask; static int num_of_inits = 0; dev = (device_t*)stalloc(sizeof(device_t)); if(dev == NULL) KERNEL_PANIC("Could not reserve memory for tty driver."); gcd = (gcd_t*)stalloc(sizeof(gcd_t)); if(gcd == NULL) KERNEL_PANIC("Could not reserve memory for tty driver."); dev->generic_device = gcd; dev->io_address = desc->io_area_base; dev->type = desc->type; gcd->device = dev; gcd->write = tty_write; gcd->read = tty_read; tty_rd = (tty_real_device_t*)stalloc(sizeof(tty_real_device_t)); if(tty_rd == NULL) KERNEL_PANIC("Could not reserve memory for tty driver."); dev->real_device = tty_rd; if (num_of_inits == 0) { /* First tty driver will share the device with the polling TTY. * That is, we use the same spinlock with it. (The spinlock is * kprintf's because that is the only proper way to access the * polling tty.) */ tty_rd->slock = &kprintf_slock; } else { tty_rd->slock = (spinlock_t*)stalloc(sizeof(spinlock_t)); if(tty_rd->slock == NULL) KERNEL_PANIC("Could not reserve memory for tty driver spinlock."); spinlock_reset(tty_rd->slock); } num_of_inits++; tty_rd->write_head = 0; tty_rd->write_count = 0; tty_rd->read_head = 0; tty_rd->read_count = 0; irq_mask = 1 << (desc->irq + 10); interrupt_register(irq_mask, tty_interrupt_handle, dev); return dev; }
void usr_semaphore_init(void) { int i; interrupt_status_t intr_status; intr_status = _interrupt_disable(); spinlock_reset(&usr_semaphore_table_slock); spinlock_acquire(&usr_semaphore_table_slock); // Initiale all userland semaphores to NULL for(i = 0; i < MAX_USR_SEMAPHORES; i++) { usr_semaphore_table[i].sem = NULL; } spinlock_release(&usr_semaphore_table_slock); _interrupt_set_state(intr_status); }
int process_join(process_id_t pid) { // kprintf("PROCESS JOIN ER STARTET\n"); spinlock_t lock; if (!(process_table[pid].parent_id = process_get_current_process())) return PROCESS_ILLEGAL_JOIN; // kprintf("PROCESS JOIN ER LEGAL\n"); // disable interrupts. _interrupt_disable(); // kprintf("interrupts disabled\n"); //acquire the resource spinlock spinlock_reset(&lock); spinlock_acquire(&lock); // kprintf("LOCK er ACQUIRED\n"); //add to sleeq.. process_table[process_get_current_process()].state = WAITING; while(!(process_table[pid].state == ZOMBIE)) { sleepq_add(&process_table[pid]); //release the resource spinlock. spinlock_release(&lock); // kprintf("TRÅD BLIVER SAT I SENG\n"); //thread_switch() thread_switch(); //Acquire the resource spinlock. spinlock_acquire(&lock); } //Do your duty with the resource (Frigøre processen, nu hvor den er færdig) process_table[pid].state = FREE; //release the resource spinlock spinlock_release(&lock); process_table[process_get_current_process()].state = RUNNING; //Restore the interrupt mask. _interrupt_enable(); // kprintf("PROCESS_JOIN ER KOMMET IGENNEM\n"); return process_table[process_get_current_process()].retval; }
/** Initializes a CPU status device. These devices are currently used * for detecting the total number of CPUs in the system. In addition * to this a mechanism for generating interrupts on the CPU is * supported. * * @param desc Pointer to the YAMS IO device descriptor of the CPU * status device * * @return Pointer to the device structure of the CPU status device */ device_t *cpustatus_init(io_descriptor_t *desc) { device_t *dev; cpu_real_device_t *cpu; uint32_t irq_mask; dev = fill_device_t(desc, NULL); cpu = kmalloc(sizeof(cpu_real_device_t)); if (cpu == NULL) KERNEL_PANIC("Could not reserve memory for CPU status device driver."); spinlock_reset(&cpu->slock); dev->real_device = cpu; irq_mask = 1 << (desc->irq + 10); interrupt_register(irq_mask, cpustatus_interrupt_handle, dev); return dev; }
/* * Reset an already allocated lock structure if possible * Returns 0 if successful and -1 otherwise */ int lock_reset(lock_t *lock) { interrupt_status_t intr_status; intr_status = _interrupt_disable(); /* Assume error */ int rtn = -1; /* Reset and acquire spinlock for the lock state */ spinlock_reset(&lock->spinlock); /* If allocated */ if (lock != NULL) { lock->state = LOCK_OPEN; rtn = 0; } _interrupt_set_state(intr_status); return rtn; }
/** Initializes the threading system. Does this by setting all thread * table entry states to THREAD_FREE. Called only once before any * threads are created. */ void thread_table_init(void) { int i; /* Thread table entry _must_ be 64 bytes long, because the context switching code in kernel/cswitch.S expects that. Let's make sure it is. If you hit this error, you have changed either context_t or thread_table_t, but have not changed padding in the end of thread_table_t definition in kernel/thread.h */ KERNEL_ASSERT(sizeof(thread_table_t) == 64); spinlock_reset(&thread_table_slock); /* Init all entries to 'NULL' */ for (i=0; i<CONFIG_MAX_THREADS; i++) { /* Set context pointers to the top of the stack*/ thread_table[i].context = (context_t *) (thread_stack_areas +CONFIG_THREAD_STACKSIZE*i + CONFIG_THREAD_STACKSIZE - sizeof(context_t)); thread_table[i].user_context = NULL; thread_table[i].state = THREAD_FREE; thread_table[i].sleeps_on = 0; thread_table[i].pagetable = NULL; thread_table[i].attribs = 0; thread_table[i].process_id = -1; thread_table[i].next = -1; } /* Setup Idle Thread */ _context_set_ip(thread_table[IDLE_THREAD_TID].context, (virtaddr_t)_idle_thread_wait_loop); _context_set_sp(thread_table[IDLE_THREAD_TID].context, (virtaddr_t) thread_stack_areas + CONFIG_THREAD_STACKSIZE - 4 - sizeof(context_t)); _context_enable_ints(thread_table[IDLE_THREAD_TID].context); thread_table[IDLE_THREAD_TID].state = THREAD_READY; thread_table[IDLE_THREAD_TID].context->prev_context = thread_table[IDLE_THREAD_TID].context; }
int condition_reset(cond_t *cond){ spinlock_reset(&(cond->spinlock)); return 1; }
int lock_reset( lock_t *lock ){ spinlock_reset(&lock->slock); lock->locked = LOCK_UNLOCKED; return 0; }