Exemplo n.º 1
0
BD_t * wb_cache_bd(BD_t * disk, uint32_t blocks)
{
	uint32_t i;
	BD_t *bd;
	struct cache_info * info = malloc(sizeof(*info));
	if(!info)
		return NULL;
	bd = &info->my_bd;
	
	/* allocate an extra cache slot: hash maps return NULL on failure, so we
	 * can't have 0 be a valid index... besides, we need pointers to the
	 * head and tail of the LRU block queue */
	info->blocks = smalloc((blocks + 1) * sizeof(*info->blocks));
	if(!info->blocks)
	{
		free(info);
		return NULL;
	}
	/* set up the block cache pointers... this could all be in
	 * the loop, but it is unwound a bit for clarity here */
	info->blocks[0].free_index = 1;
	info->blocks[0].lru = &info->blocks[0];
	info->blocks[0].mru = &info->blocks[0];
	info->blocks[1].block = NULL;
	if(blocks > 1)
	{
		info->blocks[1].next_index = 2;
		info->blocks[1].next = &info->blocks[2];
		info->blocks[blocks].block = NULL;
		info->blocks[blocks].next_index = 0;
		info->blocks[blocks].next = NULL;
	}
	else
	{
		info->blocks[1].next_index = 0;
		info->blocks[1].next = NULL;
	}
	for(i = 2; i < blocks; i++)
	{
		info->blocks[i].block = NULL;
		info->blocks[i].next_index = i + 1;
		info->blocks[i].next = &info->blocks[i + 1];
	}
	
	info->block_map = hash_map_create();
	if(!info->block_map)
	{
		sfree(info->blocks, (blocks + 1) * sizeof(*info->blocks));
		free(info);
		return NULL;
	}
	
	BD_INIT(bd, wb_cache_bd);
	OBJMAGIC(bd) = WB_CACHE_MAGIC;
	
	info->bd = disk;
	info->size = blocks;
	bd->numblocks = disk->numblocks;
	bd->blocksize = disk->blocksize;
	bd->atomicsize = disk->atomicsize;
	
	/* we generally delay blocks, so our level goes up */
	bd->level = disk->level + 1;
	bd->graph_index = disk->graph_index + 1;
	if(bd->graph_index >= NBDINDEX)
	{
		DESTROY(bd);
		return NULL;
	}
	
	/* set up the callback */
	if(sched_register(wb_cache_bd_callback, bd, FLUSH_PERIOD) < 0)
	{
		DESTROY(bd);
		return NULL;
	}
	
	if(modman_add_anon_bd(bd, __FUNCTION__))
	{
		DESTROY(bd);
		return NULL;
	}
	if(modman_inc_bd(disk, bd, NULL) < 0)
	{
		modman_rem_bd(bd);
		DESTROY(bd);
		return NULL;
	}
	
	return bd;
}
Exemplo n.º 2
0
void* kvfsd(void *arg)
{
	uint_t tm_now, cntr;
	struct task_s *task;
	struct thread_s *this;
	struct cpu_s *cpu;
	struct alarm_info_s info;
	struct event_s event;
	uint_t fs_type;
	error_t err;
	
	cpu_enable_all_irq(NULL);

	printk(INFO, "INFO: Starting KVFSD on CPU %d [ %d ]\n", cpu_get_id(), cpu_time_stamp());

	task    = current_task;
	fs_type = VFS_TYPES_NR;

#if CONFIG_ROOTFS_IS_EXT2
	fs_type = VFS_EXT2_TYPE;
#endif
 
#if CONFIG_ROOTFS_IS_VFAT

#if CONFIG_ROOTFS_IS_EXT2
#error More than one root fs has been selected
#endif

	fs_type = VFS_VFAT_TYPE;
#endif  /* CONFIG_ROOTFS_IS_VFAT_TYPE */
  
	err = vfs_init(__sys_blk,
		       fs_type,
		       VFS_MAX_NODE_NUMBER,
		       VFS_MAX_FILE_NUMBER,
		       &task->vfs_root);

	task->vfs_cwd = task->vfs_root;

	printk(INFO, "INFO: Virtual File System (VFS) Is Ready\n");

	sysconf_init();

	if(err == 0)
	{
		if((err = task_load_init(task)))
		{
			printk(WARNING, "WARNING: failed to load user process, err %d [%u]\n", 
			       err,
			       cpu_time_stamp());
		}
	}

#if CONFIG_DEV_VERSION
	if(err != 0)
	{
		struct thread_s *thread;

		printk(INFO, "INFO: Creating kernel level terminal\n"); 

		thread = kthread_create(task, 
					&kMiniShelld, 
					NULL, 
					current_cluster->id,
					current_cpu->lid);
		thread->task = task;
		list_add_last(&task->th_root, &thread->rope);
		err = sched_register(thread);
		assert(err == 0);
		sched_add_created(thread);
	}
#endif

	this = current_thread;
	cpu  = current_cpu;

	event_set_senderId(&event, this);
	event_set_priority(&event, E_FUNC);
	event_set_handler(&event, &kvfsd_alarm_event_handler);
  
	info.event = &event;
	cntr       = 0;

	while(1)
	{
		alarm_wait(&info, 10);
		sched_sleep(this);
		tm_now = cpu_time_stamp();
		printk(INFO, "INFO: System Current TimeStamp %u\n", tm_now);
		sync_all_pages();

		if((cntr % 4) == 0)
			dqdt_print_summary(dqdt_root);

		cntr ++;
	}
	return NULL;
}
Exemplo n.º 3
0
error_t do_fork(fork_info_t *info)
{
	kmem_req_t req;
	struct dqdt_attr_s attr;
	struct thread_s *child_thread;
	struct task_s *child_task;
	struct page_s *page;
	uint_t cid;
	error_t err;
	sint_t order;
  
	fork_dmsg(1, "%s: cpu %d, started [%d]\n", 
		  __FUNCTION__, 
		  cpu_get_id(),
		  cpu_time_stamp());
  
	child_thread = NULL;
	child_task   = NULL;
	page         = NULL;
	cid	      = info->cpu->cluster->id;
	attr.cid      = cid;
	attr.cpu_id   = 0;
        attr.cid_exec = info->cid_exec;

	//dqdt_update_threads_number(attr.cluster->levels_tbl[0], attr.cpu->lid, 1);
	dqdt_update_threads_number(cid, attr.cpu_id, 1);

        //attr.cluster = info->current_clstr;
        attr.cid = cid;
	err = task_create(&child_task, &attr, CPU_USR_MODE);
  
        //attr.cluster = info->cpu->cluster;
        attr.cid = cid;

	if(err) goto fail_task;

	fork_dmsg(1, "%s: cpu %d, ppid %d, task @0x%x, pid %d, task @0x%x [%d]\n",
		  __FUNCTION__, 
		  cpu_get_id(), 
		  info->this_task->pid, 
		  info->this_task,
		  child_task->pid,
		  child_task,
		  cpu_time_stamp());
  
	req.type  = KMEM_PAGE;
	req.size  = ARCH_THREAD_PAGE_ORDER;
	req.flags = AF_KERNEL | AF_REMOTE;
	req.ptr   = info->cpu->cluster;
	req.ptr   = info->current_clstr;

	page = kmem_alloc(&req);

	if(page == NULL) 
		goto fail_mem;

	fork_dmsg(1, "%s: child pid will be %d on cluster %d, cpu %d [%d]\n", 
		  __FUNCTION__, 
		  child_task->pid, 
		  child_task->cpu->cluster->id, 
		  child_task->cpu->gid,
		  cpu_time_stamp());

	err = task_dup(child_task, info->this_task);
  
	if(err) goto fail_task_dup;

	signal_manager_destroy(child_task);
	signal_manager_init(child_task);
  
	fork_dmsg(1, "%s: parent task has been duplicated [%d]\n", 
		  __FUNCTION__, 
		  cpu_time_stamp());

	child_task->current_clstr = info->current_clstr;

	err = vmm_dup(&child_task->vmm, &info->this_task->vmm);

	if(err) goto fail_vmm_dup;
  
	fork_dmsg(1, "%s: parent vmm has been duplicated [%d]\n", 
		  __FUNCTION__, 
		  cpu_time_stamp());

	child_thread = (struct thread_s*) ppm_page2addr(page);

	/* Set the child page before calling thread_dup */
	child_thread->info.page = page;

	err = thread_dup(child_task,
			 child_thread,
			 info->cpu,
			 info->cpu->cluster,
			 info->this_thread);

	if(err) goto fail_thread_dup;

	/* Adjust child_thread attributes */
	if(info->flags & PT_FORK_USE_AFFINITY)
	{
		child_thread->info.attr.flags |= (info->flags & ~(PT_ATTR_LEGACY_MASK));

		if(!(info->flags & PT_ATTR_MEM_PRIO))
			child_thread->info.attr.flags &= ~(PT_ATTR_MEM_PRIO);

		if(!(info->flags & PT_ATTR_AUTO_MGRT))
			child_thread->info.attr.flags &= ~(PT_ATTR_AUTO_MGRT);

		if(!(info->flags & PT_ATTR_AUTO_NXTT))
			child_thread->info.attr.flags &= ~(PT_ATTR_AUTO_NXTT);
	}

	fork_dmsg(1, "%s: parent current thread has been duplicated, tid %x [%d]\n",
		  __FUNCTION__, 
		  child_thread, 
		  cpu_time_stamp());
	
	if(info->isPinned)
		thread_migration_disabled(child_thread);
	else
		thread_migration_enabled(child_thread);
	
	list_add_last(&child_task->th_root, &child_thread->rope);
	child_task->threads_count = 1;
	child_task->threads_nr ++;
	child_task->state = TASK_READY;

	order = bitmap_ffs2(child_task->bitmap, 0, sizeof(child_task->bitmap));

	if(order == -1) goto fail_order;

	bitmap_clear(child_task->bitmap, order);
	child_thread->info.attr.key = order;
	child_thread->info.order = order;
	child_task->next_order = order + 1;
	child_task->max_order = order;
	child_task->uid = info->this_task->uid;
	child_task->parent = info->this_task->pid;

	err = sched_register(child_thread);
  
	assert(err == 0);
    
	cpu_context_set_tid(&child_thread->info.pss, (reg_t)child_thread);
	cpu_context_set_pmm(&child_thread->info.pss, &child_task->vmm.pmm);
	cpu_context_dup_finlize(&child_thread->pws, &child_thread->info.pss);
  
	child_thread->info.retval = 0;
	child_thread->info.errno = 0;

	info->child_thread = child_thread;
	info->child_task = child_task;
	return 0;

fail_order:
fail_thread_dup:
fail_vmm_dup:
fail_task_dup:
	printk(WARNING, "WARNING: %s: destroy child thread\n", __FUNCTION__);
	req.ptr = page;
	kmem_free(&req);

fail_mem:
fail_task:
	//FIXME
	//dqdt_update_threads_number(attr.cluster->levels_tbl[0], attr.cpu->lid, -1);
	dqdt_update_threads_number(attr.cid, attr.cpu_id, -1);

	printk(WARNING, "WARNING: %s: destroy child task\n", __FUNCTION__);

	if(child_task != NULL)
		task_destroy(child_task);

	printk(WARNING, "WARNING: %s: fork err %d [%d]\n", 
	       __FUNCTION__, 
	       err, 
	       cpu_time_stamp());

	return err;
}
Exemplo n.º 4
0
void* thread_idle(void *arg)
{
	extern uint_t __ktext_start;
	register uint_t id;
	register uint_t cpu_nr;
	register struct thread_s *this;
	register struct cpu_s *cpu;
	struct thread_s *thread;
	register struct page_s *reserved_pg;
	register uint_t reserved;
	kthread_args_t *args;
	bool_t isBSCPU;
	uint_t tm_now;
	uint_t count;
	error_t err;

	this    = current_thread;
	cpu     = current_cpu;
	id      = cpu->gid;
	cpu_nr  = arch_onln_cpu_nr();
	args    = (kthread_args_t*) arg;
	isBSCPU = (cpu == cpu->cluster->bscpu);

	cpu_trace_write(cpu, thread_idle_func);

	if(isBSCPU)
		pmm_tlb_flush_vaddr((vma_t)&__ktext_start, PMM_UNKNOWN);

	cpu_set_state(cpu, CPU_ACTIVE);
	rt_timer_read(&tm_now);
	this->info.tm_born = tm_now;      
	this->info.tm_tmp  = tm_now;
	//// Reset stats /// 
	cpu_time_reset(cpu);
	////////////////////

	mcs_barrier_wait(&boot_sync);

	printk(INFO, "INFO: Starting Thread Idle On Core %d\tOK\n", cpu->gid);

	if(isBSCPU && (id == args->val[2]))
	{
		for(reserved = args->val[0]; reserved < args->val[1]; reserved += PMM_PAGE_SIZE)
		{
			reserved_pg = ppm_ppn2page(&cpu->cluster->ppm, reserved >> PMM_PAGE_SHIFT);
			page_state_set(reserved_pg, PGINIT);       
			ppm_free_pages(reserved_pg);
		}
	}

	thread = kthread_create(this->task, 
				&thread_event_manager, 
				NULL, 
				cpu->cluster->id, 
				cpu->lid);

	if(thread == NULL)
		PANIC("Failed to create default events handler Thread for CPU %d\n", id);

	thread->task   = this->task;
	cpu->event_mgr = thread;
	wait_queue_init(&thread->info.wait_queue, "Events");

	err = sched_register(thread);
	assert(err == 0);

	sched_add_created(thread);

	if(isBSCPU)
	{
		dqdt_update();
#if 0
		thread = kthread_create(this->task, 
					&cluster_manager_thread,
					cpu->cluster, 
					cpu->cluster->id, 
					cpu->lid);

		if(thread == NULL)
		{
			PANIC("Failed to create cluster manager thread, cid %d, cpu %d\n", 
			      cpu->cluster->id, 
			      cpu->gid);
		}

		thread->task          = this->task;
		cpu->cluster->manager = thread;
		wait_queue_init(&thread->info.wait_queue, "Cluster-Mgr");

		err = sched_register(thread);
		assert(err == 0);

		sched_add_created(thread);

#endif

		if(clusters_tbl[cpu->cluster->id].flags & CLUSTER_IO)
		{
			thread = kthread_create(this->task, 
						&kvfsd, 
						NULL, 
						cpu->cluster->id, 
						cpu->lid);
       
			if(thread == NULL)
			{
				PANIC("Failed to create KVFSD on cluster %d, cpu %d\n", 
				      cpu->cluster->id, 
				      cpu->gid);
			}

			thread->task  = this->task;
			wait_queue_init(&thread->info.wait_queue, "KVFSD");
			err           = sched_register(thread);
			assert(err == 0);
			sched_add_created(thread);
			printk(INFO,"INFO: kvfsd has been created\n");
		}
	}

	cpu_set_state(cpu,CPU_IDLE);

	while (true)
	{
		cpu_disable_all_irq(NULL);
     
		if((event_is_pending(&cpu->re_listner)) || (event_is_pending(&cpu->le_listner)))
		{
			wakeup_one(&cpu->event_mgr->info.wait_queue, WAIT_ANY);
		}
 
		sched_idle(this);

		count = sched_runnable_count(&cpu->scheduler);

		cpu_enable_all_irq(NULL);

		if(count != 0)
			sched_yield(this);
     
		//arch_set_power_state(cpu, ARCH_PWR_IDLE);
	}

	return NULL;
}