struct vfs_dirent_s* vfs_inode_lookup(struct vfs_inode_s *inode, char *name) { struct metafs_s *child; cpu_trace_write(current_cpu, vfs_inode_lookup); if( (((name[0] == '/') || (name[0] == '.')) && (name[1] == '\0')) || ((name[0] == '.') && (name[1] == '.')) ) { printk(ERROR, "%s: This should not happen\n", __FUNCTION__); while(1); } if((child = metafs_lookup(&inode->i_meta, name)) == NULL) { vfs_dmsg(1,"[ thread: %x || GID: %x ] metafs_lookup (inode %d, dirent %s) not found\n", current_thread, current_thread->lcpu->gid, inode->i_number, name); return NULL; } vfs_dmsg(1,"[ thread: %x || GID: %x ] metafs_lookup (inode %d, dirent %s) found\n", current_thread, current_thread->lcpu->gid, inode->i_number, name); return metafs_container(child, struct vfs_dirent_s, d_meta); }
void timer_irq_handler (struct irq_action_s *action) { register struct cpu_s *cpu; register struct device_s *timer; cpu = current_cpu; cpu_trace_write(cpu, timer_irq_handler); cpu_clock(cpu); timer = action->dev; timer_reset_irq(timer); timer_set_period(timer, CPU_CLOCK_TICK); timer_run(timer, 1); }
//TODO //if no dirent avialable set this as a negatif dirent //This set dirent->inode! //dirent->d_parent = parent; error_t vfs_real_lookup(struct vfs_inode_s *parent, uint32_t flags, uint32_t isLast, struct vfs_dirent_s *dirent) { error_t err = 0; struct thread_s *this; this = current_thread; cpu_trace_write(current_cpu, vfs_real_lookup); err = parent->i_op->lookup(parent, dirent); vfs_dmsg(1,"[ thread: %x || GID: %x ] i_op->lookup (parent %d, dirent %s) : err %d\n", this, this->lcpu->gid, parent->i_number, dirent->d_name, err); if((err != VFS_NOT_FOUND) && (err != VFS_FOUND)) return err; if((err == VFS_FOUND) && (flags & VFS_O_EXCL) && (flags & VFS_O_CREATE) && (isLast)) if((err == VFS_FOUND) && (flags & VFS_O_EXCL) && (flags & VFS_O_CREATE) && (isLast)) return EEXIST; vfs_dmsg(1,"[ %x :: %d ] node %s, found ? %d, isLast ? %d, VFS_O_CREATE ? %d, VFS_FIFO? %d\n", this, this->lcpu->gid, dirent->d_name,err, isLast,flags & VFS_O_CREATE, dirent->d_attr & VFS_FIFO); if((err == VFS_NOT_FOUND) && (flags & VFS_O_CREATE) && (isLast)) { if((err=parent->i_op->create(parent, dirent))) return err; } return err; }
void* thread_idle(void *arg) { extern uint_t __ktext_start; register uint_t id; register uint_t cpu_nr; register struct thread_s *this; register struct cpu_s *cpu; struct thread_s *thread; register struct page_s *reserved_pg; register uint_t reserved; kthread_args_t *args; bool_t isBSCPU; uint_t tm_now; uint_t count; error_t err; this = current_thread; cpu = current_cpu; id = cpu->gid; cpu_nr = arch_onln_cpu_nr(); args = (kthread_args_t*) arg; isBSCPU = (cpu == cpu->cluster->bscpu); cpu_trace_write(cpu, thread_idle_func); if(isBSCPU) pmm_tlb_flush_vaddr((vma_t)&__ktext_start, PMM_UNKNOWN); cpu_set_state(cpu, CPU_ACTIVE); rt_timer_read(&tm_now); this->info.tm_born = tm_now; this->info.tm_tmp = tm_now; //// Reset stats /// cpu_time_reset(cpu); //////////////////// mcs_barrier_wait(&boot_sync); printk(INFO, "INFO: Starting Thread Idle On Core %d\tOK\n", cpu->gid); if(isBSCPU && (id == args->val[2])) { for(reserved = args->val[0]; reserved < args->val[1]; reserved += PMM_PAGE_SIZE) { reserved_pg = ppm_ppn2page(&cpu->cluster->ppm, reserved >> PMM_PAGE_SHIFT); page_state_set(reserved_pg, PGINIT); ppm_free_pages(reserved_pg); } } thread = kthread_create(this->task, &thread_event_manager, NULL, cpu->cluster->id, cpu->lid); if(thread == NULL) PANIC("Failed to create default events handler Thread for CPU %d\n", id); thread->task = this->task; cpu->event_mgr = thread; wait_queue_init(&thread->info.wait_queue, "Events"); err = sched_register(thread); assert(err == 0); sched_add_created(thread); if(isBSCPU) { dqdt_update(); #if 0 thread = kthread_create(this->task, &cluster_manager_thread, cpu->cluster, cpu->cluster->id, cpu->lid); if(thread == NULL) { PANIC("Failed to create cluster manager thread, cid %d, cpu %d\n", cpu->cluster->id, cpu->gid); } thread->task = this->task; cpu->cluster->manager = thread; wait_queue_init(&thread->info.wait_queue, "Cluster-Mgr"); err = sched_register(thread); assert(err == 0); sched_add_created(thread); #endif if(clusters_tbl[cpu->cluster->id].flags & CLUSTER_IO) { thread = kthread_create(this->task, &kvfsd, NULL, cpu->cluster->id, cpu->lid); if(thread == NULL) { PANIC("Failed to create KVFSD on cluster %d, cpu %d\n", cpu->cluster->id, cpu->gid); } thread->task = this->task; wait_queue_init(&thread->info.wait_queue, "KVFSD"); err = sched_register(thread); assert(err == 0); sched_add_created(thread); printk(INFO,"INFO: kvfsd has been created\n"); } } cpu_set_state(cpu,CPU_IDLE); while (true) { cpu_disable_all_irq(NULL); if((event_is_pending(&cpu->re_listner)) || (event_is_pending(&cpu->le_listner))) { wakeup_one(&cpu->event_mgr->info.wait_queue, WAIT_ANY); } sched_idle(this); count = sched_runnable_count(&cpu->scheduler); cpu_enable_all_irq(NULL); if(count != 0) sched_yield(this); //arch_set_power_state(cpu, ARCH_PWR_IDLE); } return NULL; }