bool k_heap_init() { int i; k_memset(&memPool, 0, sizeof(MemPool_t)); /* find first block of phisical memory what we wish */ for(i = 0; i < k_phisical_memory_map_size; i++) { if(k_phisical_memory_map[i].type != MEMORY_USE_NORMAL) continue; if(k_phisical_memory_map[i].base == KERNEL_HEAP_BEGIN && k_phisical_memory_map[i].length >= KERNEL_HEAP_MIN_SIZE) { memPool.phisicalMemBeginPtr = memPool.phisicalMemPtr = (ptr_t)LOPART(k_phisical_memory_map[i].base); memPool.phisicalMemPtrMax = memPool.phisicalMemBeginPtr + LOPART(k_phisical_memory_map[i].length); break; } } /* if not found.. mean low memory - panic */ if(memPool.phisicalMemBeginPtr == NULL) return false; return true; }
bool k_vfs_mkdir(const char *path, const char *name) { node_t dirNode; k_memset(&dirNode, 0, sizeof(dirNode)); k_strcpy(dirNode.name, name); return k_vfs_mknode(path, &dirNode, FILE_IS_FOLDER); }
bool k_vfs_init() { k_memset(&vfs_root, 0, sizeof(fsnode_t)); tree_link_init(&vfs_root.link); vfs_root.flags = FILE_IS_FOLDER; vfs_root.node.name[0] = '/'; return true; }
bool k_heap_init() { int i; k_memset(&memPool, 0, sizeof(MemPool_t)); memPool.phisicalMemBeginPtr = memPool.phisicalMemPtr = malloc(KERNEL_HEAP_MIN_SIZE); memPool.phisicalMemPtrMax = memPool.phisicalMemBeginPtr + KERNEL_HEAP_MIN_SIZE; /* if not found.. mean low memory - panic */ if(memPool.phisicalMemBeginPtr == NULL) return false; return true; }
void get_memory_info(memInfo_t *info) { int i; k_memset(info, 0, sizeof(memInfo_t)); info->totalSize = (size_t)(memPool.phisicalMemPtrMax - memPool.phisicalMemBeginPtr); info->heapAddress = (size_t)memPool.phisicalMemBeginPtr; info->heapCached = (size_t)(memPool.phisicalMemPtr - memPool.phisicalMemBeginPtr); for(i = 0; i < MEMORY_SLICES_MAX_COUNT; i++) { if(memPool.numAllocatedBlocks[i] != 0) info->memoryUsed += (memPool.numAllocatedBlocks[i] * (1<<i)); } }
void init_idt(void) { idt_address.base = (uint32_t) &idt_descriptors; idt_address.limit = sizeof (idt_descriptor_t) * 255 - 1; k_memset(&idt_descriptors, 0, idt_address.limit); idt_set_descriptor(0, (uint32_t) &isr0, 0x08, 0x08E); idt_set_descriptor(1, (uint32_t) &isr1, 0x08, 0x08E); idt_set_descriptor(2, (uint32_t) &isr2, 0x08, 0x08E); idt_set_descriptor(3, (uint32_t) &isr3, 0x08, 0x08E); idt_set_descriptor(4, (uint32_t) &isr4, 0x08, 0x08E); idt_set_descriptor(5, (uint32_t) &isr5, 0x08, 0x08E); idt_set_descriptor(6, (uint32_t) &isr6, 0x08, 0x08E); idt_set_descriptor(7, (uint32_t) &isr7, 0x08, 0x08E); idt_set_descriptor(8, (uint32_t) &isr8, 0x08, 0x08E); idt_set_descriptor(9, (uint32_t) &isr9, 0x08, 0x08E); idt_set_descriptor(10, (uint32_t) &isr10, 0x08, 0x08E); idt_set_descriptor(11, (uint32_t) &isr11, 0x08, 0x08E); idt_set_descriptor(12, (uint32_t) &isr12, 0x08, 0x08E); idt_set_descriptor(13, (uint32_t) &isr13, 0x08, 0x08E); idt_set_descriptor(14, (uint32_t) &isr14, 0x08, 0x08E); idt_set_descriptor(15, (uint32_t) &isr15, 0x08, 0x08E); idt_set_descriptor(16, (uint32_t) &isr16, 0x08, 0x08E); idt_set_descriptor(17, (uint32_t) &isr17, 0x08, 0x08E); idt_set_descriptor(18, (uint32_t) &isr18, 0x08, 0x08E); idt_set_descriptor(19, (uint32_t) &isr19, 0x08, 0x08E); idt_set_descriptor(20, (uint32_t) &isr20, 0x08, 0x08E); idt_set_descriptor(21, (uint32_t) &isr21, 0x08, 0x08E); idt_set_descriptor(22, (uint32_t) &isr22, 0x08, 0x08E); idt_set_descriptor(23, (uint32_t) &isr23, 0x08, 0x08E); idt_set_descriptor(24, (uint32_t) &isr24, 0x08, 0x08E); idt_set_descriptor(25, (uint32_t) &isr25, 0x08, 0x08E); idt_set_descriptor(26, (uint32_t) &isr26, 0x08, 0x08E); idt_set_descriptor(27, (uint32_t) &isr27, 0x08, 0x08E); idt_set_descriptor(28, (uint32_t) &isr28, 0x08, 0x08E); idt_set_descriptor(29, (uint32_t) &isr29, 0x08, 0x08E); idt_set_descriptor(30, (uint32_t) &isr30, 0x08, 0x08E); idt_set_descriptor(31, (uint32_t) &isr31, 0x08, 0x08E); load_idt((uint32_t) &idt_address); }
bool k_vfs_mknode(const char *path, const node_t *node, uint32_t flags) { fsnode_t *fsNode, *dstNode; char fullName[FILE_FULL_PATH] = { '\0' }; if(!k_strlen(path) || !k_strlen(node->name)) return false; fsNode = k_malloc(sizeof(fsnode_t)); if(!fsNode) goto failed_exit; k_memset(fsNode, 0, sizeof(fsnode_t)); tree_link_init(&fsNode->link); list_init(&fsNode->openFiles); fsNode->node = *node; fsNode->flags = flags; if((dstNode = k_vfs_find_node_by_full_path(path)) == NULL) goto failed; /* it mast a directory */ if(!(dstNode->flags & FILE_IS_FOLDER)) goto failed; /* check if file already exist */ k_strncat(fullName, path, FILE_FULL_PATH); if(path[k_strlen(path)-1] != '/') k_strncat(fullName, "/", FILE_FULL_PATH); k_strncat(fullName, node->name, FILE_FULL_PATH); if(k_vfs_path_is_exist(fullName)) goto failed; /* add to vfs tree */ tree_add_link(&dstNode->link, &fsNode->link); return true; failed: k_free(fsNode); failed_exit: return false; }
uint32_t k_fopen(const char *path, uint32_t mode) { file_t *file = NULL; file = k_malloc(sizeof(file_t)); k_memset(file, 0, sizeof(file_t)); if(!file) return 0; if(mode & FILE_OPEN_IN_VFS) { /* get file desc in vfs */ if(!k_vfs_open_file(file, path, mode)) goto failed; } if(!file->open) goto failed; if(!file->open(path, mode, file)) goto failed; file->flags = mode; /* check next file descriptor is present */ /* if not - fd array is full, and relocation failed */ if(!fdcheck()) goto failed; /* setup next file descriptor */ fdt.fdarray[fdt.fdcounter] = file; file->fd = fdt.fdcounter; return fdt.fdcounter; failed: k_free(file); return 0; }
void _NewContext( char *pStackMem, /* pointer to aligned stack memory */ unsigned stackSize, /* size of stack in bytes */ _ContextEntry pEntry, /* context entry point function */ void *parameter1, /* first parameter to context entry point function */ void *parameter2, /* second parameter to context entry point function */ void *parameter3, /* third parameter to context entry point function */ int priority, /* context priority */ unsigned options /* context options: USE_FP, USE_SSE */ ) { unsigned long *pInitialContext; #ifdef CONFIG_INIT_STACKS k_memset(pStackMem, 0xaa, stackSize); #endif /* carve the context entry struct from the "base" of the stack */ pInitialContext = (unsigned long *)STACK_ROUND_DOWN(pStackMem + stackSize); /* * Create an initial context on the stack expected by the _Swap() * primitive. * Given that both task and fiber contexts execute at privilege 0, the * setup for both contexts are equivalent. */ /* push arguments required by _context_entry() */ *--pInitialContext = (unsigned long)parameter3; *--pInitialContext = (unsigned long)parameter2; *--pInitialContext = (unsigned long)parameter1; *--pInitialContext = (unsigned long)pEntry; /* push initial EFLAGS; only modify IF and IOPL bits */ *--pInitialContext = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL; #ifdef CONFIG_GDB_INFO /* * Arrange for the _ContextEntryWrapper() function to be called * to adjust the stack before _context_entry() is invoked. */ *--pInitialContext = (unsigned long)_ContextEntryWrapper; #else /* CONFIG_GDB_INFO */ *--pInitialContext = (unsigned long)_context_entry; #endif /* CONFIG_GDB_INFO */ /* * note: stack area for edi, esi, ebx, ebp, and eax registers can be * left * uninitialized, since _context_entry() doesn't care about the values * of these registers when it begins execution */ /* * For kernel tasks and fibers the context the context control struct * (CCS) * is located at the "low end" of memory set aside for the context's * stack */ _NewContextInternal(pStackMem, stackSize, priority, options); }