Пример #1
0
static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
	struct task_struct *t;
	unsigned long flags;
	cputime_t utime, stime;

	memset((char *) r, 0, sizeof *r);
	utime = stime = cputime_zero;

	rcu_read_lock();
	if (!lock_task_sighand(p, &flags)) {
		rcu_read_unlock();
		return;
	}

	switch (who) {
		case RUSAGE_BOTH:
		case RUSAGE_CHILDREN:
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;
			r->ru_inblock = p->signal->cinblock;
			r->ru_oublock = p->signal->coublock;

			if (who == RUSAGE_CHILDREN)
				break;

		case RUSAGE_SELF:
			utime = cputime_add(utime, p->signal->utime);
			stime = cputime_add(stime, p->signal->stime);
			r->ru_nvcsw += p->signal->nvcsw;
			r->ru_nivcsw += p->signal->nivcsw;
			r->ru_minflt += p->signal->min_flt;
			r->ru_majflt += p->signal->maj_flt;
			r->ru_inblock += p->signal->inblock;
			r->ru_oublock += p->signal->oublock;
			t = p;
			do {
				utime = cputime_add(utime, t->utime);
				stime = cputime_add(stime, t->stime);
				r->ru_nvcsw += t->nvcsw;
				r->ru_nivcsw += t->nivcsw;
				r->ru_minflt += t->min_flt;
				r->ru_majflt += t->maj_flt;
				r->ru_inblock += task_io_get_inblock(t);
				r->ru_oublock += task_io_get_oublock(t);
				t = next_thread(t);
			} while (t != p);
			break;

		default:
			BUG();
	}

	unlock_task_sighand(p, &flags);
	rcu_read_unlock();

	cputime_to_timeval(utime, &r->ru_utime);
	cputime_to_timeval(stime, &r->ru_stime);
}
Пример #2
0
void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
	struct task_struct *t;
	unsigned long flags;
	cputime_t utime, stime;

	memset((char *) r, 0, sizeof *r);

	if (unlikely(!p->signal))
		return;

	switch (who) {
		case RUSAGE_CHILDREN:
			spin_lock_irqsave(&p->sighand->siglock, flags);
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;
			spin_unlock_irqrestore(&p->sighand->siglock, flags);
			cputime_to_timeval(utime, &r->ru_utime);
			cputime_to_timeval(stime, &r->ru_stime);
			break;
		case RUSAGE_SELF:
			spin_lock_irqsave(&p->sighand->siglock, flags);
			utime = stime = cputime_zero;
			goto sum_group;
		case RUSAGE_BOTH:
			spin_lock_irqsave(&p->sighand->siglock, flags);
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;
		sum_group:
			utime = cputime_add(utime, p->signal->utime);
			stime = cputime_add(stime, p->signal->stime);
			r->ru_nvcsw += p->signal->nvcsw;
			r->ru_nivcsw += p->signal->nivcsw;
			r->ru_minflt += p->signal->min_flt;
			r->ru_majflt += p->signal->maj_flt;
			t = p;
			do {
				utime = cputime_add(utime, t->utime);
				stime = cputime_add(stime, t->stime);
				r->ru_nvcsw += t->nvcsw;
				r->ru_nivcsw += t->nivcsw;
				r->ru_minflt += t->min_flt;
				r->ru_majflt += t->maj_flt;
				t = next_thread(t);
			} while (t != p);
			spin_unlock_irqrestore(&p->sighand->siglock, flags);
			cputime_to_timeval(utime, &r->ru_utime);
			cputime_to_timeval(stime, &r->ru_stime);
			break;
		default:
			BUG();
	}
}
Пример #3
0
void htc_kernel_top(void)
{
	struct task_struct *p;
	int top_loading[NUM_BUSY_THREAD_CHECK], i;
	unsigned long user_time, system_time, io_time;
	unsigned long irq_time, idle_time, delta_time;
	ulong flags;
	struct task_cputime cputime;
	int dump_top_stack = 0;

	if (task_ptr_array == NULL ||
			curr_proc_delta == NULL ||
			prev_proc_stat == NULL)
		return;

	spin_lock_irqsave(&lock, flags);
	get_all_cpu_stat(&new_cpu_stat);

	/* calculate the cpu time of each process */
	for_each_process(p) {
		thread_group_cputime(p, &cputime);

		if (p->pid < MAX_PID) {
			curr_proc_delta[p->pid] =
				(cputime.utime + cputime.stime)
				- (prev_proc_stat[p->pid]);
			task_ptr_array[p->pid] = p;
		}
	}

	/* sorting to get the top cpu consumers */
	sorting(curr_proc_delta, top_loading);

	/* calculate the total delta time */
	user_time = (unsigned long)((new_cpu_stat.user + new_cpu_stat.nice)
			- (old_cpu_stat.user + old_cpu_stat.nice));
	system_time = (unsigned long)(new_cpu_stat.system - old_cpu_stat.system);
	io_time = (unsigned long)(new_cpu_stat.iowait - old_cpu_stat.iowait);
	irq_time = (unsigned long)((new_cpu_stat.irq + new_cpu_stat.softirq)
			- (old_cpu_stat.irq + old_cpu_stat.softirq));
	idle_time = (unsigned long)
	((new_cpu_stat.idle + new_cpu_stat.steal + new_cpu_stat.guest)
	 - (old_cpu_stat.idle + old_cpu_stat.steal + old_cpu_stat.guest));
	delta_time = user_time + system_time + io_time + irq_time + idle_time;

	/*
	 * Check if we need to dump the call stack of top CPU consumers
	 * If CPU usage keeps 100% for 90 secs
	 */
	if ((full_loading_counter >= 9) && (full_loading_counter % 3 == 0))
		 dump_top_stack = 1;

	/* print most time consuming processes */
	printk(KERN_INFO "[K] CPU Usage\tPID\t\tName\n");
	for (i = 0 ; i < NUM_BUSY_THREAD_CHECK ; i++) {
		printk(KERN_INFO "[K] %lu%%\t\t%d\t\t%s\t\t\t%d\n",
				curr_proc_delta[top_loading[i]] * 100 / delta_time,
				top_loading[i],
				task_ptr_array[top_loading[i]]->comm,
				curr_proc_delta[top_loading[i]]);
	}

	/* check if dump busy thread stack */
	if (dump_top_stack) {
	   struct task_struct *t;
	   for (i = 0 ; i < NUM_BUSY_THREAD_CHECK ; i++) {
		if (task_ptr_array[top_loading[i]] != NULL && task_ptr_array[top_loading[i]]->stime > 0) {
			t = task_ptr_array[top_loading[i]];
			/* dump all the thread stack of this process */
			do {
				printk(KERN_INFO "\n[K] ###pid:%d name:%s state:%lu ppid:%d stime:%lu utime:%lu\n",
				t->pid, t->comm, t->state, t->real_parent->pid, t->stime, t->utime);
				show_stack(t, t->stack);
				t = next_thread(t);
			} while (t != task_ptr_array[top_loading[i]]);
		}
	   }
	}
	/* save old values */
	for_each_process(p) {
		if (p->pid < MAX_PID) {
			thread_group_cputime(p, &cputime);
			prev_proc_stat[p->pid] = cputime.stime + cputime.utime;
		}
	}

	old_cpu_stat = new_cpu_stat;

	memset(curr_proc_delta, 0, sizeof(int) * MAX_PID);
	memset(task_ptr_array, 0, sizeof(int) * MAX_PID);

	spin_unlock_irqrestore(&lock, flags);

}
Пример #4
0
static inline int ltt_enumerate_process_states(void)
{
	struct task_struct *t = &init_task;
	struct task_struct *p = t;
	enum lttng_process_status status;
	enum lttng_thread_type type;
	enum lttng_execution_mode mode;
	enum lttng_execution_submode submode;
	
	do {
		mode = LTTNG_MODE_UNKNOWN;
		submode = LTTNG_UNKNOWN;

		read_lock(&tasklist_lock);
		if(t != &init_task) {
			atomic_dec(&t->usage);
			t = next_thread(t);
		}
		if(t == p) {
			t = p = next_task(t);
		}
		atomic_inc(&t->usage);
		read_unlock(&tasklist_lock);
		
		task_lock(t);
		
		if(t->exit_state == EXIT_ZOMBIE)
			status = LTTNG_ZOMBIE;
		else if(t->exit_state == EXIT_DEAD)
			status = LTTNG_DEAD;
		else if(t->state == TASK_RUNNING) {
			/* Is this a forked child that has not run yet? */
			if( list_empty(&t->run_list) )
				status = LTTNG_WAIT_FORK;
			else
				/* All tasks are considered as wait_cpu;
				 * the viewer will sort out if the task was
				 * relly running at this time. */
				status = LTTNG_WAIT_CPU;
		}
		else if(t->state & 
			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
			/* Task is waiting for something to complete */
			status = LTTNG_WAIT;
		}
		else status = LTTNG_UNNAMED;
		submode = LTTNG_NONE;

		/* Verification of t->mm is to filter out kernel threads;
		 * Viewer will further filter out if a user-space thread was
		 * in syscall mode or not */
		if(t->mm) type = LTTNG_USER_THREAD;
		else type = LTTNG_KERNEL_THREAD;
			
		trace_statedump_enumerate_process_state(
				t->pid, t->parent->pid, t->comm,
				type, mode, submode, status, t->tgid);
		task_unlock(t);
	} while( t != &init_task );

	return 0;
}
Пример #5
0
Файл: exit.c Проект: nhanh0/hah
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru)
{
	int flag, retval;
	DECLARE_WAITQUEUE(wait, current);
	struct task_struct *tsk;

	if (options & ~(WNOHANG|WUNTRACED|__WNOTHREAD|__WCLONE|__WALL))
		return -EINVAL;

	add_wait_queue(&current->wait_chldexit,&wait);
repeat:
	flag = 0;
	current->state = TASK_INTERRUPTIBLE;
	read_lock(&tasklist_lock);
	tsk = current;
	do {
		struct task_struct *p;
	 	for (p = tsk->p_cptr ; p ; p = p->p_osptr) {
			if (pid>0) {
				if (p->pid != pid)
					continue;
			} else if (!pid) {
				if (p->pgrp != current->pgrp)
					continue;
			} else if (pid != -1) {
				if (p->pgrp != -pid)
					continue;
			}
			/* Wait for all children (clone and not) if __WALL is set;
			 * otherwise, wait for clone children *only* if __WCLONE is
			 * set; otherwise, wait for non-clone children *only*.  (Note:
			 * A "clone" child here is one that reports to its parent
			 * using a signal other than SIGCHLD.) */
			if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
			    && !(options & __WALL))
				continue;
			flag = 1;
			switch (p->state) {
			case TASK_STOPPED:
				if (!p->exit_code)
					continue;
				if (!(options & WUNTRACED) && !(p->ptrace & PT_PTRACED))
					continue;
				read_unlock(&tasklist_lock);
				retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 
				if (!retval && stat_addr) 
					retval = put_user((p->exit_code << 8) | 0x7f, stat_addr);
				if (!retval) {
					p->exit_code = 0;
					retval = p->pid;
				}
				goto end_wait4;
			case TASK_ZOMBIE:
				current->times.tms_cutime += p->times.tms_utime + p->times.tms_cutime;
				current->times.tms_cstime += p->times.tms_stime + p->times.tms_cstime;
				read_unlock(&tasklist_lock);
				retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
				if (!retval && stat_addr)
					retval = put_user(p->exit_code, stat_addr);
				if (retval)
					goto end_wait4; 
				retval = p->pid;
				if (p->p_opptr != p->p_pptr) {
					write_lock_irq(&tasklist_lock);
					REMOVE_LINKS(p);
					p->p_pptr = p->p_opptr;
					SET_LINKS(p);
					do_notify_parent(p, SIGCHLD);
					write_unlock_irq(&tasklist_lock);
				} else
					release_task(p);
				goto end_wait4;
			default:
				continue;
			}
		}
		if (options & __WNOTHREAD)
			break;
		tsk = next_thread(tsk);
	} while (tsk != current);
	read_unlock(&tasklist_lock);
	if (flag) {
		retval = 0;
		if (options & WNOHANG)
			goto end_wait4;
		retval = -ERESTARTSYS;
		if (signal_pending(current))
			goto end_wait4;
		schedule();
		goto repeat;
	}
	retval = -ECHILD;
end_wait4:
	current->state = TASK_RUNNING;
	remove_wait_queue(&current->wait_chldexit,&wait);
	return retval;
}
Пример #6
0
int do_getitimer(int which, struct itimerval *value)
{
	struct task_struct *tsk = current;
	cputime_t cinterval, cval;

	switch (which) {
	case ITIMER_REAL:
		spin_lock_irq(&tsk->sighand->siglock);
		value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
		value->it_interval =
			ktime_to_timeval(tsk->signal->it_real_incr);
		spin_unlock_irq(&tsk->sighand->siglock);
		break;
	case ITIMER_VIRTUAL:
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_virt_expires;
		cinterval = tsk->signal->it_virt_incr;
		if (!cputime_eq(cval, cputime_zero)) {
			struct task_struct *t = tsk;
			cputime_t utime = tsk->signal->utime;
			do {
				utime = cputime_add(utime, t->utime);
				t = next_thread(t);
			} while (t != tsk);
			if (cputime_le(cval, utime)) { /* about to fire */
				cval = jiffies_to_cputime(1);
			} else {
				cval = cputime_sub(cval, utime);
			}
		}
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		cputime_to_timeval(cval, &value->it_value);
		cputime_to_timeval(cinterval, &value->it_interval);
		break;
	case ITIMER_PROF:
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_prof_expires;
		cinterval = tsk->signal->it_prof_incr;
		if (!cputime_eq(cval, cputime_zero)) {
			struct task_struct *t = tsk;
			cputime_t ptime = cputime_add(tsk->signal->utime,
						      tsk->signal->stime);
			do {
				ptime = cputime_add(ptime,
						    cputime_add(t->utime,
								t->stime));
				t = next_thread(t);
			} while (t != tsk);
			if (cputime_le(cval, ptime)) { /* about to fire */
				cval = jiffies_to_cputime(1);
			} else {
				cval = cputime_sub(cval, ptime);
			}
		}
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		cputime_to_timeval(cval, &value->it_value);
		cputime_to_timeval(cinterval, &value->it_interval);
		break;
	default:
		return(-EINVAL);
	}
	return 0;
}
Пример #7
0
void htc_kernel_top(void)
{
	struct task_struct *p;
	int top_loading[NUM_BUSY_THREAD_CHECK], i;
	unsigned long user_time, system_time, io_time;
	unsigned long irq_time, idle_time, delta_time;
	ulong flags;
	struct task_cputime cputime;
	int dump_top_stack = 0;

	if (task_ptr_array == NULL ||
			curr_proc_delta == NULL ||
			prev_proc_stat == NULL)
		return;

	spin_lock_irqsave(&lock, flags);
	get_all_cpu_stat(&new_cpu_stat);

	
	for_each_process(p) {
		thread_group_cputime(p, &cputime);

		if (p->pid < MAX_PID) {
			curr_proc_delta[p->pid] =
				(cputime.utime + cputime.stime)
				- (prev_proc_stat[p->pid]);
			task_ptr_array[p->pid] = p;
		}
	}

	
	sorting(curr_proc_delta, top_loading);

	
	user_time = (unsigned long)((new_cpu_stat.cpustat[CPUTIME_USER] + new_cpu_stat.cpustat[CPUTIME_NICE])
			- (old_cpu_stat.cpustat[CPUTIME_USER] + old_cpu_stat.cpustat[CPUTIME_NICE]));
	system_time = (unsigned long)(new_cpu_stat.cpustat[CPUTIME_SYSTEM] - old_cpu_stat.cpustat[CPUTIME_SYSTEM]);
	io_time = (unsigned long)(new_cpu_stat.cpustat[CPUTIME_IOWAIT] - old_cpu_stat.cpustat[CPUTIME_IOWAIT]);
	irq_time = (unsigned long)((new_cpu_stat.cpustat[CPUTIME_IRQ] + new_cpu_stat.cpustat[CPUTIME_SOFTIRQ])
			- (old_cpu_stat.cpustat[CPUTIME_IRQ] + old_cpu_stat.cpustat[CPUTIME_SOFTIRQ]));
	idle_time = (unsigned long)
	((new_cpu_stat.cpustat[CPUTIME_IDLE] + new_cpu_stat.cpustat[CPUTIME_STEAL] + new_cpu_stat.cpustat[CPUTIME_GUEST])
	 - (old_cpu_stat.cpustat[CPUTIME_IDLE] + old_cpu_stat.cpustat[CPUTIME_STEAL] + old_cpu_stat.cpustat[CPUTIME_GUEST]));
	delta_time = user_time + system_time + io_time + irq_time + idle_time;

	if ((full_loading_counter >= 9) && (full_loading_counter % 3 == 0))
		 dump_top_stack = 1;

	
	printk(KERN_INFO "[K] CPU Usage\t\tPID\t\tName\n");
	for (i = 0 ; i < NUM_BUSY_THREAD_CHECK ; i++) {
		printk(KERN_INFO "[K] %8lu%%\t\t%d\t\t%s\t\t%d\n",
				curr_proc_delta[top_loading[i]] * 100 / delta_time,
				top_loading[i],
				task_ptr_array[top_loading[i]]->comm,
				curr_proc_delta[top_loading[i]]);
	}

	
	if (dump_top_stack) {
	   struct task_struct *t;
	   for (i = 0 ; i < NUM_BUSY_THREAD_CHECK ; i++) {
		if (task_ptr_array[top_loading[i]] != NULL && task_ptr_array[top_loading[i]]->stime > 0) {
			t = task_ptr_array[top_loading[i]];
			
			do {
				printk(KERN_INFO "\n[K] ###pid:%d name:%s state:%lu ppid:%d stime:%lu utime:%lu\n",
				t->pid, t->comm, t->state, t->real_parent->pid, t->stime, t->utime);
				show_stack(t, t->stack);
				t = next_thread(t);
			} while (t != task_ptr_array[top_loading[i]]);
		}
	   }
	}
	
	for_each_process(p) {
		if (p->pid < MAX_PID) {
			thread_group_cputime(p, &cputime);
			prev_proc_stat[p->pid] = cputime.stime + cputime.utime;
		}
	}

	old_cpu_stat = new_cpu_stat;

	memset(curr_proc_delta, 0, sizeof(int) * MAX_PID);
	memset(task_ptr_array, 0, sizeof(int) * MAX_PID);

	spin_unlock_irqrestore(&lock, flags);
}
Пример #8
0
int do_linux_waitpid(int pid, int *code_store)
{
	struct mm_struct *mm = current->mm;
	if (code_store != NULL) {
		if (!user_mem_check(mm, (uintptr_t) code_store, sizeof(int), 1)) {
			return -E_INVAL;
		}
	}

	struct proc_struct *proc, *cproc;
	bool intr_flag, haskid;
repeat:
	cproc = current;
	haskid = 0;
	if (pid > 0) {
		proc = find_proc(pid);
		if (proc != NULL) {
			do {
				if (proc->parent == cproc) {
					haskid = 1;
					if (proc->state == PROC_ZOMBIE) {
						goto found;
					}
					break;
				}
				cproc = next_thread(cproc);
			} while (cproc != current);
		}
	}
	/* we do NOT have group id, so.. */
	else if (pid == 0 || pid == -1) {	/* pid == 0 */
		do {
			proc = cproc->cptr;
			for (; proc != NULL; proc = proc->optr) {
				haskid = 1;
				if (proc->state == PROC_ZOMBIE) {
					goto found;
				}
			}
			cproc = next_thread(cproc);
		} while (cproc != current);
	} else {		//pid<-1
		//TODO
		return -E_INVAL;
	}
	if (haskid) {
		current->state = PROC_SLEEPING;
		current->wait_state = WT_CHILD;
		schedule();
		may_killed();
		goto repeat;
	}
	return -E_BAD_PROC;

found:
	if (proc == idleproc || proc == initproc) {
		panic("wait idleproc or initproc.\n");
	}
	int exit_code = proc->exit_code;
	int return_pid = proc->pid;
	local_intr_save(intr_flag);
	{
		unhash_proc(proc);
		remove_links(proc);
	}
	local_intr_restore(intr_flag);
	put_kstack(proc);
	kfree(proc);

	int ret = 0;
	if (code_store != NULL) {
		lock_mm(mm);
		{
			int status = exit_code << 8;
			if (!copy_to_user(mm, code_store, &status, sizeof(int))) {
				ret = -E_INVAL;
			}
		}
		unlock_mm(mm);
	}
	return (ret == 0) ? return_pid : ret;
}
Пример #9
0
// __do_exit - cause a thread exit (use do_exit, do_exit_thread instead)
//   1. call exit_mmap & put_pgdir & mm_destroy to free the almost all memory space of process
//   2. set process' state as PROC_ZOMBIE, then call wakeup_proc(parent) to ask parent reclaim itself.
//   3. call scheduler to switch to other process
static int __do_exit(void)
{
	if (current == idleproc) {
		panic("idleproc exit.\n");
	}
	if (current == initproc) {
		panic("initproc exit.\n");
	}

	struct mm_struct *mm = current->mm;
	if (mm != NULL) {
		mm->cpuid = -1;
		mp_set_mm_pagetable(NULL);
		if (mm_count_dec(mm) == 0) {
			exit_mmap(mm);
			put_pgdir(mm);
			bool intr_flag;
			local_intr_save(intr_flag);
			{
				list_del(&(mm->proc_mm_link));
			}
			local_intr_restore(intr_flag);
			mm_destroy(mm);
		}
		current->mm = NULL;
	}
	put_sighand(current);
	put_signal(current);
	put_fs(current);
	put_sem_queue(current);
	current->state = PROC_ZOMBIE;

	bool intr_flag;
	struct proc_struct *proc, *parent;
	local_intr_save(intr_flag);
	{
		proc = parent = current->parent;
		do {
			if (proc->wait_state == WT_CHILD) {
				wakeup_proc(proc);
			}
			proc = next_thread(proc);
		} while (proc != parent);

		if ((parent = next_thread(current)) == current) {
			parent = initproc;
		}
		de_thread(current);
		while (current->cptr != NULL) {
			proc = current->cptr;
			current->cptr = proc->optr;

			proc->yptr = NULL;
			if ((proc->optr = parent->cptr) != NULL) {
				parent->cptr->yptr = proc;
			}
			proc->parent = parent;
			parent->cptr = proc;
			if (proc->state == PROC_ZOMBIE) {
				if (parent->wait_state == WT_CHILD) {
					wakeup_proc(parent);
				}
			}
		}
	}

	wakeup_queue(&(current->event_box.wait_queue), WT_INTERRUPTED, 1);

	local_intr_restore(intr_flag);

	schedule();
	panic("__do_exit will not return!! %d %d.\n", current->pid,
	      current->exit_code);
}
Пример #10
0
    HeapAjust(0);
    heapLenght++;
  }
  return readyHeap[heapLenght - 1];
}

void schedule()
{

  if (!heapLenght)
  {
    terminal_writestring("No more tasks!");
    __asm__ volatile("jmp loop\n\t");
  }

  TCB *next = next_thread();

  TCB *temp = curThread;
  curThread = next;
  __asm__ volatile("call switch_to\n\t"::"S"(temp), "D"(next));
}


void updatePrioriy()
{
  if (FIFO)
    curThread->comeT += N;
}


void yield()
Пример #11
0
/***********************************************************************
 *		Thread32Next    (KERNEL32.@)
 *
 * Return info about the first thread in a toolhelp32 snapshot
 */
BOOL WINAPI Thread32Next( HANDLE hSnapShot, LPTHREADENTRY32 lpte )
{
    return next_thread( hSnapShot, lpte, FALSE );
}
Пример #12
0
/***********************************************************************
 *		Thread32First    (KERNEL32.@)
 *
 * Return info about the first thread in a toolhelp32 snapshot
 */
BOOL WINAPI Thread32First( HANDLE hSnapShot, LPTHREADENTRY32 lpte )
{
    return next_thread( hSnapShot, lpte, TRUE );
}
Пример #13
0
asmlinkage long sys_times(struct tms __user * tbuf)
{
    /*
     *	In the SMP world we might just be unlucky and have one of
     *	the times increment as we use it. Since the value is an
     *	atomically safe type this is just fine. Conceptually its
     *	as if the syscall took an instant longer to occur.
     */
    if (tbuf) {
        struct tms tmp;
        cputime_t utime, stime, cutime, cstime;

#ifdef CONFIG_SMP
        if (thread_group_empty(current)) {
            /*
             * Single thread case without the use of any locks.
             *
             * We may race with release_task if two threads are
             * executing. However, release task first adds up the
             * counters (__exit_signal) before  removing the task
             * from the process tasklist (__unhash_process).
             * __exit_signal also acquires and releases the
             * siglock which results in the proper memory ordering
             * so that the list modifications are always visible
             * after the counters have been updated.
             *
             * If the counters have been updated by the second thread
             * but the thread has not yet been removed from the list
             * then the other branch will be executing which will
             * block on tasklist_lock until the exit handling of the
             * other task is finished.
             *
             * This also implies that the sighand->siglock cannot
             * be held by another processor. So we can also
             * skip acquiring that lock.
             */
            utime = cputime_add(current->signal->utime, current->utime);
            stime = cputime_add(current->signal->utime, current->stime);
            cutime = current->signal->cutime;
            cstime = current->signal->cstime;
        } else
#endif
        {

            /* Process with multiple threads */
            struct task_struct *tsk = current;
            struct task_struct *t;

            read_lock(&tasklist_lock);
            utime = tsk->signal->utime;
            stime = tsk->signal->stime;
            t = tsk;
            do {
                utime = cputime_add(utime, t->utime);
                stime = cputime_add(stime, t->stime);
                t = next_thread(t);
            } while (t != tsk);

            /*
             * While we have tasklist_lock read-locked, no dying thread
             * can be updating current->signal->[us]time.  Instead,
             * we got their counts included in the live thread loop.
             * However, another thread can come in right now and
             * do a wait call that updates current->signal->c[us]time.
             * To make sure we always see that pair updated atomically,
             * we take the siglock around fetching them.
             */
            spin_lock_irq(&tsk->sighand->siglock);
            cutime = tsk->signal->cutime;
            cstime = tsk->signal->cstime;
            spin_unlock_irq(&tsk->sighand->siglock);
            read_unlock(&tasklist_lock);
        }
        tmp.tms_utime = cputime_to_clock_t(utime);
        tmp.tms_stime = cputime_to_clock_t(stime);
        tmp.tms_cutime = cputime_to_clock_t(cutime);
        tmp.tms_cstime = cputime_to_clock_t(cstime);
        if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
            return -EFAULT;
    }
    return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
Пример #14
0
// do_wait - wait one OR any children with PROC_ZOMBIE state, and free memory space of kernel stack
//         - proc struct of this child.
// NOTE: only after do_wait function, all resources of the child proces are free.
int do_wait(int pid, int *code_store)
{
	struct mm_struct *mm = current->mm;
	if (code_store != NULL) {
		if (!user_mem_check(mm, (uintptr_t) code_store, sizeof(int), 1)) {
			return -E_INVAL;
		}
	}

	struct proc_struct *proc, *cproc;
	bool intr_flag, haskid;
repeat:
	cproc = current;
	haskid = 0;
	if (pid != 0) {
		proc = find_proc(pid);
		if (proc != NULL) {
			do {
				if (proc->parent == cproc) {
					haskid = 1;
					if (proc->state == PROC_ZOMBIE) {
						goto found;
					}
					break;
				}
				cproc = next_thread(cproc);
			} while (cproc != current);
		}
	} else {
		do {
			proc = cproc->cptr;
			for (; proc != NULL; proc = proc->optr) {
				haskid = 1;
				if (proc->state == PROC_ZOMBIE) {
					goto found;
				}
			}
			cproc = next_thread(cproc);
		} while (cproc != current);
	}
	if (haskid) {
		current->state = PROC_SLEEPING;
		current->wait_state = WT_CHILD;
		schedule();
		may_killed();
		goto repeat;
	}
	return -E_BAD_PROC;

found:
	if (proc == idleproc || proc == initproc) {
		panic("wait idleproc or initproc.\n");
	}
	int exit_code = proc->exit_code;
	spin_lock_irqsave(&proc_lock, intr_flag);
	{
		unhash_proc(proc);
		remove_links(proc);
	}
	spin_unlock_irqrestore(&proc_lock, intr_flag);
	put_kstack(proc);
	kfree(proc);

	int ret = 0;
	if (code_store != NULL) {
		lock_mm(mm);
		{
			if (!copy_to_user
			    (mm, code_store, &exit_code, sizeof(int))) {
				ret = -E_INVAL;
			}
		}
		unlock_mm(mm);
	}
	return ret;
}