static char *test_linkedlist_insert_two() { linkedlist_t list; linkedlist_init(&list); int x = 1; linkedlist_insert(&list, &x); mu_assert_equals_int("Error: Invalid head value", x, *(int *)linkedlist_gethead(&list)); int y = 2; linkedlist_insert(&list, &y); mu_assert_equals_int("Error: Invalid head value", x, *(int *)linkedlist_gethead(&list)); mu_assert_equals_int("Error: Invalid tail value", y, *(int *)linkedlist_gettail(&list)); mu_assert_equals_int("Error: incorrect size.", 2, linkedlist_getsize(&list)); linkedlist_destroy(&list); return 0; }
void mm_reclaim_register(size_t (*fn)(void), size_t size) { struct reclaimer *rec = kmalloc(sizeof(struct reclaimer)); rec->size = size; rec->fn = fn; linkedlist_insert(&reclaimers, &rec->node, rec); }
/* indicates that the inode needs to be written back to the filesystem */ void vfs_inode_set_dirty(struct inode *node) { assert(!(node->flags & INODE_NEEDREAD)); if(!(atomic_fetch_or(&node->flags, INODE_DIRTY) & INODE_DIRTY)) { linkedlist_insert(ic_dirty, &node->dirty_item, node); } }
void fs_fsm_init(void) { linkedlist_create(&fsdriverslist, LINKEDLIST_MUTEX); linkedlist_create(&fslist, LINKEDLIST_MUTEX); hash_create(&fsdrivershash, 0, 10); devfs = fs_filesystem_create(); linkedlist_insert(&fslist, &devfs->listnode, devfs); }
int fs_mount(struct inode *pt, struct filesystem *fs) { if(!strcmp(fs->type, "devfs")) { fs = devfs; } else { linkedlist_insert(&fslist, &fs->listnode, fs); } vfs_inode_mount(pt, fs); return 0; }
static char *test_linkedlist_insert_one() { linkedlist_t list; linkedlist_init(&list); int x = 5; linkedlist_insert(&list, &x); mu_assert_equals_int("Error: insert error.", x, *(int *)linkedlist_gethead(&list)); mu_assert_equals_int("Error: incorrect size.", 1, linkedlist_getsize(&list)); linkedlist_destroy(&list); return 0; }
static char *test_linkedlist_remove() { linkedlist_t list; linkedlist_init(&list); int x = 1, y = 2, z = 3; linkedlist_insert(&list, &x); linkedlist_insert(&list, &y); linkedlist_insert(&list, &z); mu_assert_equals_int("Error: head value", x, *(int *)linkedlist_gethead(&list)); mu_assert_equals_int("Error: tail value", z, *(int *)linkedlist_gettail(&list)); mu_assert_equals_int("Error: size value before remove", 3, linkedlist_getsize(&list)); linkedlist_remove(&list, &y); mu_assert_equals_int("Error: head value", x, *(int *)linkedlist_gethead(&list)); mu_assert_equals_int("Error: tail value", z, *(int *)linkedlist_gettail(&list)); mu_assert_equals_int("Error: size value after remove", 2, linkedlist_getsize(&list)); linkedlist_destroy(&list); return 0; }
struct inode *vfs_inode_create (void) { struct inode *node = kmalloc(sizeof(struct inode)); rwlock_create(&node->lock); rwlock_create(&node->metalock); mutex_create(&node->mappings_lock, 0); hash_create(&node->dirents, 0, 1000); node->flags = INODE_INUSE; linkedlist_insert(ic_inuse, &node->inuse_item, node); blocklist_create(&node->readblock, 0, "inode-read"); blocklist_create(&node->writeblock, 0, "inode-write"); return node; }
int hash_insert(struct hash *h, const void *key, size_t keylen, struct hashelem *elem, void *data) { __lock(h); size_t index = __hashfn(key, keylen, h->length); elem->ptr = data; elem->key = key; elem->keylen = keylen; if(h->table[index] == NULL) { /* lazy-init the buckets */ h->table[index] = linkedlist_create(0, LINKEDLIST_LOCKLESS); } else { struct linkedentry *ent = linkedlist_find(h->table[index], __ll_check_exist, elem); if(ent) { __unlock(h); return -EEXIST; } } linkedlist_insert(h->table[index], &elem->entry, elem); h->count++; __unlock(h); return 0; }
/* read in an inode from the inode cache, OR pull it in from the FS */ struct inode *vfs_icache_get(struct filesystem *fs, uint32_t num) { /* create if it doesn't exist */ struct inode *node; assert(fs); int newly_created = 0; uint32_t key[2] = {fs->id, num}; mutex_acquire(ic_lock); if((node = hash_lookup(icache, key, sizeof(key))) == NULL) { /* didn't find it. Okay, create one */ node = vfs_inode_create(); node->filesystem = fs; node->flags = INODE_NEEDREAD; node->id = num; node->key[0] = fs->id; node->key[1] = num; hash_insert(icache, node->key, sizeof(node->key), &node->hash_elem, node); newly_created = 1; } assert(node->filesystem == fs); atomic_fetch_add(&node->count, 1); /* move to in-use */ if(!(atomic_fetch_or(&node->flags, INODE_INUSE) & INODE_INUSE)) { atomic_fetch_add(&fs->usecount, 1); if(!newly_created) { queue_remove(ic_lru, &node->lru_item); linkedlist_insert(ic_inuse, &node->inuse_item, node); } } if(fs_inode_pull(node) != 0) { /* pull failed. Probably EIO. */ vfs_icache_put(node); node = 0; } mutex_release(ic_lock); return node; }
int fs_filesystem_register(struct fsdriver *fd) { linkedlist_insert(&fsdriverslist, &fd->listnode, fd); return hash_insert(&fsdrivershash, fd->name, strlen(fd->name), &fd->hash_elem, fd); }
int LinkedlistExercise(int verbose, struct cfg *cfg, char *args[]) { int rate, i, n = 0, idx; char *str; struct linkedlist *l = linkedlist_new(EXERCISE_MED_COUNT, NULL); cfg = NULL; args[0] = NULL; if (l == NULL) { AMSG(""); return -1; } rate = EXERCISE_R0; for (i = 0; i < EXERCISE_MED_COUNT; i++) { if (i == EXERCISE_MED_P1) { rate = EXERCISE_R1; } else if (i == EXERCISE_MED_P2) { rate = EXERCISE_R2; } else if (i == EXERCISE_MED_P3) { rate = EXERCISE_R3; } if (rand() % 10 < rate) { idx = 0; str = malloc(8); sprintf(str, "%07d", n++); if (rand() % 5) { idx = linkedlist_size(l); if (idx) { idx = rand() % idx; } } if (linkedlist_insert(l, idx, str) == -1) { PMNO(errno); return -1; } tcase_printf(verbose, "INSERT: %s size now %d\n", str, linkedlist_size(l)); } else { if (linkedlist_is_empty(l)) { tcase_printf(verbose, "EMPTY\n"); } else { idx = rand() % linkedlist_size(l); str = linkedlist_get(l, idx); if (linkedlist_remove_data(l, str) == NULL) { PMNO(errno); return -1; } if ((idx % 10) == 0) { unsigned int count = 0; iter_t iter; linkedlist_iterate(l, &iter); while (linkedlist_next(l, &iter)) { count++; } if (count != linkedlist_size(l)) { PMSG("count=%u,linkedlist_size=%u\n", count, linkedlist_size(l)); return -1; } } if (str) { tcase_printf(verbose, "REMOVE: %s %d\n", str, linkedlist_size(l)); free(str); } else { PMSG("remove failure"); return -1; } } } } linkedlist_del(l, allocator_free, NULL); return 0; }
int linkedlist_append(p_linkedlist_t p_head, int val) { int len = linkedlist_length(p_head); return linkedlist_insert(p_head, len+1, val); }
int linkedlist_prepend(p_linkedlist_t p_head, int val) { return linkedlist_insert(p_head, 0, val); }
void tm_init_multitasking(void) { printk(KERN_DEBUG, "[sched]: Starting multitasking system...\n"); sysgate_page = mm_physical_allocate(PAGE_SIZE, true); mm_physical_memcpy((void *)sysgate_page, (void *)signal_return_injector, MEMMAP_SYSGATE_ADDRESS_SIZE, PHYS_MEMCPY_MODE_DEST); process_table = hash_create(0, 0, 128); process_list = linkedlist_create(0, LINKEDLIST_MUTEX); mutex_create(&process_refs_lock, 0); mutex_create(&thread_refs_lock, 0); thread_table = hash_create(0, 0, 128); struct thread *thread = kmalloc(sizeof(struct thread)); struct process *proc = kernel_process = kmalloc(sizeof(struct process)); proc->refs = 2; thread->refs = 1; hash_insert(process_table, &proc->pid, sizeof(proc->pid), &proc->hash_elem, proc); hash_insert(thread_table, &thread->tid, sizeof(thread->tid), &thread->hash_elem, thread); linkedlist_insert(process_list, &proc->listnode, proc); valloc_create(&proc->mmf_valloc, MEMMAP_MMAP_BEGIN, MEMMAP_MMAP_END, PAGE_SIZE, 0); linkedlist_create(&proc->threadlist, 0); mutex_create(&proc->map_lock, 0); mutex_create(&proc->stacks_lock, 0); mutex_create(&proc->fdlock, 0); hash_create(&proc->files, HASH_LOCKLESS, 64); proc->magic = PROCESS_MAGIC; blocklist_create(&proc->waitlist, 0, "process-waitlist"); mutex_create(&proc->fdlock, 0); memcpy(&proc->vmm_context, &kernel_context, sizeof(kernel_context)); thread->process = proc; /* we have to do this early, so that the vmm system can use the lock... */ thread->state = THREADSTATE_RUNNING; thread->magic = THREAD_MAGIC; workqueue_create(&thread->resume_work, 0); thread->kernel_stack = (addr_t)&initial_kernel_stack; spinlock_create(&thread->status_lock); primary_cpu->active_queue = tqueue_create(0, 0); primary_cpu->idle_thread = thread; primary_cpu->numtasks=1; ticker_create(&primary_cpu->ticker, 0); workqueue_create(&primary_cpu->work, 0); tm_thread_add_to_process(thread, proc); tm_thread_add_to_cpu(thread, primary_cpu); atomic_fetch_add_explicit(&running_processes, 1, memory_order_relaxed); atomic_fetch_add_explicit(&running_threads, 1, memory_order_relaxed); set_ksf(KSF_THREADING); *(struct thread **)(thread->kernel_stack) = thread; primary_cpu->flags |= CPU_RUNNING; #if CONFIG_MODULES loader_add_kernel_symbol(tm_thread_delay_sleep); loader_add_kernel_symbol(tm_thread_delay); loader_add_kernel_symbol(tm_timing_get_microseconds); loader_add_kernel_symbol(tm_thread_set_state); loader_add_kernel_symbol(tm_thread_exit); loader_add_kernel_symbol(tm_thread_poke); loader_add_kernel_symbol(tm_thread_block); loader_add_kernel_symbol(tm_thread_got_signal); loader_add_kernel_symbol(tm_thread_unblock); loader_add_kernel_symbol(tm_blocklist_wakeall); loader_add_kernel_symbol(kthread_create); loader_add_kernel_symbol(kthread_wait); loader_add_kernel_symbol(kthread_join); loader_add_kernel_symbol(kthread_kill); loader_add_kernel_symbol(tm_schedule); loader_add_kernel_symbol(arch_tm_get_current_thread); #endif }