コード例 #1
0
ファイル: spin.c プロジェクト: AjeyBohare/minix
int spin_check(spin_t *s)
{
	/* Check whether a timeout has taken place. Return TRUE if the caller
	 * should continue spinning, and FALSE if a timeout has occurred. The
	 * implementation assumes that it is okay to spin a little bit too long
	 * (up to a full clock tick extra).
	 */
	u64_t cur_tsc, tsc_delta;
	clock_t now, micro_delta;

	switch (s->s_state) {
	case STATE_INIT:
		s->s_state = STATE_BASE_TS;
		break;

	case STATE_BASE_TS:
		s->s_state = STATE_TS;
		read_tsc_64(&s->s_base_tsc);
		break;

	case STATE_TS:
		read_tsc_64(&cur_tsc);

		tsc_delta = sub64(cur_tsc, s->s_base_tsc);

		micro_delta = tsc_64_to_micros(tsc_delta);

		if (micro_delta >= s->s_usecs) {
			s->s_timeout = TRUE;
			return FALSE;
		}

		if (micro_delta >= TSC_SPIN) {
			s->s_usecs -= micro_delta;
			getticks(&s->s_base_uptime);
			s->s_state = STATE_UPTIME;
		}

		break;

	case STATE_UPTIME:
		getticks(&now);

		/* We assume that sys_hz() caches its return value. */
		micro_delta = ((now - s->s_base_uptime) * 1000 / sys_hz()) *
			1000;

		if (micro_delta >= s->s_usecs) {
			s->s_timeout = TRUE;
			return FALSE;
		}

		break;

	default:
		panic("spin_check: invalid state %d", s->s_state);
	}

	return TRUE;
}
コード例 #2
0
ファイル: arch_clock.c プロジェクト: CesarBallardini/minix3
PRIVATE void estimate_cpu_freq(void)
{
	u64_t tsc_delta;
	u64_t cpu_freq;

	irq_hook_t calib_cpu;

	/* set the probe, we use the legacy timer, IRQ 0 */
	put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler);

	/* just in case we are in an SMP single cpu fallback mode */
	BKL_UNLOCK();
	/* set the PIC timer to get some time */
	intr_enable();

	/* loop for some time to get a sample */
	while(probe_ticks < PROBE_TICKS) {
		intr_enable();
	}

	intr_disable();
	/* just in case we are in an SMP single cpu fallback mode */
	BKL_LOCK();

	/* remove the probe */
	rm_irq_handler(&calib_cpu);

	tsc_delta = sub64(tsc1, tsc0);

	cpu_freq = mul64(div64u64(tsc_delta, PROBE_TICKS - 1), make64(system_hz, 0));
	cpu_set_freq(cpuid, cpu_freq);
	cpu_info[cpuid].freq = div64u(cpu_freq, 1000000);
	BOOT_VERBOSE(cpu_print_freq(cpuid));
}
コード例 #3
0
ファイル: proc.c プロジェクト: egall/OS-code
/*===========================================================================*
 *				idle					     * 
 *===========================================================================*/
PRIVATE void idle()
{
	/* This function is called whenever there is no work to do.
	 * Halt the CPU, and measure how many timestamp counter ticks are
	 * spent not doing anything. This allows test setups to measure
	 * the CPU utiliziation of certain workloads with high precision.
	 */
#ifdef CONFIG_IDLE_TSC
	u64_t idle_start;

	read_tsc_64(&idle_start);
	idle_active = 1;
#endif

	halt_cpu();

#ifdef CONFIG_IDLE_TSC
	if (idle_active) {
		IDLE_STOP;
		printf("Kernel: idle active after resuming CPU\n");
	}

	idle_tsc = add64(idle_tsc, sub64(idle_stop, idle_start));
#endif
}
コード例 #4
0
ファイル: getidle.c プロジェクト: biswajit1983/minix-nbsd
double getidle(void)
{
  u64_t stop, idle2;
  u64_t idelta, tdelta;
  double ifp, tfp, rfp;
  int r;

  if (!running) {
	if ((r = sys_getidletsc(&idle)) != OK)
		return -1.0;

	running = 1;

	read_tsc_64(&start);

	return 0.0;
  }
  else {
	read_tsc_64(&stop);

	running = 0;

	if ((r = sys_getidletsc(&idle2)) != OK)
		return -1.0;

	idelta = sub64(idle2, idle);
	tdelta = sub64(stop, start);

	if (cmp64(idelta, tdelta) >= 0)
		return 100.0;

	ifp = make_double(idelta);
	tfp = make_double(tdelta);

	rfp = ifp / tfp * 100.0;

	if (rfp < 0.0) rfp = 0.0;
	else if (rfp > 100.0) rfp = 100.0;

	return rfp;
  }

  running = !running;
}
コード例 #5
0
ファイル: action.c プロジェクト: AgamAgarwal/minix
/*===========================================================================*
 *				get_range				     *
 *===========================================================================*/
static size_t get_range(struct fbd_rule *rule, u64_t pos, size_t *size,
	u64_t *skip)
{
	/* Compute the range within the given request range that is affected
	 * by the given rule, and optionally the number of bytes preceding
	 * the range that are also affected by the rule.
	 */
	u64_t delta;
	size_t off;
	int to_eof;

	to_eof = cmp64(rule->start, rule->end) >= 0;

	if (cmp64(pos, rule->start) > 0) {
		if (skip != NULL) *skip = sub64(pos, rule->start);

		off = 0;
	}
	else {
		if (skip != NULL) *skip = cvu64(0);

		delta = sub64(rule->start, pos);

		assert(ex64hi(delta) == 0);

		off = ex64lo(delta);
	}

	if (!to_eof) {
		assert(cmp64(pos, rule->end) < 0);

		delta = sub64(rule->end, pos);

		if (cmp64u(delta, *size) < 0)
			*size = ex64lo(delta);
	}

	assert(*size > off);

	*size -= off;

	return off;
}
コード例 #6
0
ファイル: cpu.c プロジェクト: kbarsalo/ASG2
/* returns 0 on exit */
int main(int argc, char *argv[]) {
    /* argument 1 is the number of iterations to run */
    /* argument 2 is the argument to nice() */

    /* PI = 4 * (1/1 - 1/3 + 1/5 - 1/7 + 1/9 - 1/11 ...) */
    int i;
    double sum = 0;
    int denom, numer;
    time_t current;
    time_t start = time(NULL);
    int seconds = 0;
    pid_t process_id = getpid();
    int retval, nice_val, iters;
    u64_t s,e,diff;
    double elapsed;
    unsigned long max;

    if (argc < 3) {
        printf("usage: cpu iterations tickets\n");
        return 1;
    }

    iters = atoi(argv[1]);
    nice_val = atoi(argv[2]);
    retval = nice(nice_val);
    if (retval == -1) {
        printf("Error calling nice()\n");
        return 1;
    }
    printf("Process %d at nice %d\n", process_id, nice_val);

    read_tsc_64(&s);
    for (i = 1; i<iters; ++i) {
        denom = 2 * i - 1;
        numer;
        if (i % 2)
            numer = -1;
        else
            numer = 1;
        sum += ((double)numer / (double)denom);
        current = time(NULL);
        if (current - start > seconds) {
            seconds = current - start;
            printf("Process %d has been running for %d seconds.\n", process_id, seconds);
            /*if (seconds == 10)
                break;*/
        }
    }
    read_tsc_64(&e);
    diff = sub64(e, s);
    max = -1;
    elapsed = (double)diff.hi + (double)(diff.lo/100000) / (double)(max/100000);
    printf("CPU process %d (nice %d) calculated pi as %f at %f time units\n", process_id, nice_val, -4 * sum, elapsed);
    return 0;
}
コード例 #7
0
ファイル: arch_clock.c プロジェクト: CesarBallardini/minix3
PUBLIC short cpu_load(void)
{
	u64_t current_tsc, *current_idle;
	u64_t tsc_delta, idle_delta, busy;
	struct proc *idle;
	short load;
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;
#endif

	u64_t *last_tsc, *last_idle;

	last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
	last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);

	idle = get_cpu_var_ptr(cpu, idle_proc);;
	read_tsc_64(&current_tsc);
	current_idle = &idle->p_cycles; /* ptr to idle proc */

	/* calculate load since last cpu_load invocation */
	if (!is_zero64(*last_tsc)) {
		tsc_delta = sub64(current_tsc, *last_tsc);
		idle_delta = sub64(*current_idle, *last_idle);

		busy = sub64(tsc_delta, idle_delta);
		busy = mul64(busy, make64(100, 0));
		load = ex64lo(div64(busy, tsc_delta));

		if (load > 100)
			load = 100;
	} else
		load = 0;
	
	*last_tsc = current_tsc;
	*last_idle = *current_idle;
	return load;
}
コード例 #8
0
ファイル: action.c プロジェクト: AgamAgarwal/minix
/*===========================================================================*
 *				action_pre_misdir			     *
 *===========================================================================*/
static void action_pre_misdir(struct fbd_rule *rule, iovec_t *UNUSED(iov),
	unsigned *UNUSED(count), size_t *UNUSED(size), u64_t *pos)
{
	/* Randomize the request position to fall within the range (and have
	 * the alignment) given by the rule.
	 */
	u32_t range, choice;

	/* Unfortunately, we cannot interpret 0 as end as "up to end of disk"
	 * here, because we have no idea about the actual disk size, and the
	 * resulting address must of course be valid..
	 */
	range = div64u(add64u(sub64(rule->params.misdir.end,
		rule->params.misdir.start), 1), rule->params.misdir.align);

	if (range > 0)
		choice = get_rand(range - 1);
	else
		choice = 0;

	*pos = add64(rule->params.misdir.start,
		mul64u(choice, rule->params.misdir.align));
}
コード例 #9
0
ファイル: com.c プロジェクト: KevinEmery/csci442project3
int startsim(void){

message m;
int i,j=0,piReady = -1;
u64_t cpuTimeDiff;
FILE *fp;

fp = fopen("/home/out","w");

for(i=0;i<HISTORY;i++){
	pInfoPtrs[i] = (struct pi *) &pInfo[i][0]; 
}
for(i=0;i<HISTORY;i++){
	pQhPtrs[i] = (struct qh*) &pQh[i][0];
}

/* Copy the pointer arrays so that you can go backward and forward through the history */
struct pi *pInfoPtrsCopy[HISTORY];
struct qh *pQhPtrsCopy[HISTORY];
for(i=0;i<HISTORY;i++){
	pInfoPtrsCopy[i] = pInfoPtrs[i];
	pQhPtrsCopy[i] = pQhPtrs[i];
}
m.m1_p1 = (char *) &pInfoPtrs;
m.m1_p2 = (char *) &piReady;
m.m1_i2 = SELF;
m.m1_i3 = HISTORY;
m.m2_p1 = (char *) &pQhPtrs;
m.m3_p1 = (char *) &cpuFreq;

int error = _syscall(PM_PROC_NR,STARTRECORD,&m);
procs();
for(j;j<HISTORY;j++){
		while(piReady < j){
		}
		if(j==0){
			fprintf(fp,"CPU frequency is: %lu Mhz\n",div64u(cpuFreq, 1000000));
		}
		printf("Simulation is %d%% complete.\n",(j*2)+2);
		fprintf(fp,"Proc Table %d\n\n",j);
		fprintf(fp,"Queue heads: ");
		for(i=0;i<NR_SCHED_QUEUES;i++){
				if(pQhPtrsCopy[j]->p_endpoint!=-1){
					fprintf(fp,"Queue: %d %d ",i,pQhPtrsCopy[j]->p_endpoint);
				}
				pQhPtrsCopy[j]++;
		}
		fprintf(fp,"\n\n");
		pQhPtrsCopy[j] = pQhPtrs[j];
	
	/*Write out the runable queues in order */
	for(i=0; i<NR_SCHED_QUEUES; i++){
			fprintf(fp,"Priority Queue %d: ",i);
			if(pQhPtrsCopy[j]->p_endpoint != -1){
				printQ(pInfoPtrsCopy[j],pQhPtrsCopy[j]->p_endpoint,fp);
			}
			else{
				fprintf(fp,"\n");
			}
			pQhPtrsCopy[j]++;
	}
	pQhPtrsCopy[j] = pQhPtrs[j]; /* Reset the Qh Pointers */

	for (i=0;i<ALL_PROCS;i++){ 
		if (!(pInfoPtrsCopy[j]->p_rts_flags == RTS_SLOT_FREE)){ 
			if(j>0){
				cpuTimeDiff = sub64(pInfoPtrsCopy[j]->p_cycles,pInfoPtrsCopy[j-1]->p_cycles);
				}
		fprintf(fp,"Process: %s, Endpoint: %d, Enter queue: %lu%lu, Time in Queue: %lu%lu, Dequeues: %lu, IPC Sync: %lu, IPC Async: %lu,Preempted: %lu,RTS: %x\n\t\t, Priority: %d, Next: %s Endpoint: %d, User time: %d, Sys Time: %d, CPU Cycles Elaps: %lu%lu\n",
				pInfoPtrsCopy[j]->p_name,pInfoPtrsCopy[j]->p_endpoint,ex64hi(pInfoPtrsCopy[j]->p_times.enter_queue),ex64lo(pInfoPtrsCopy[j]->p_times.enter_queue),
				ex64hi(pInfoPtrsCopy[j]->p_times.time_in_queue),ex64lo(pInfoPtrsCopy[j]->p_times.time_in_queue),
				pInfoPtrsCopy[j]->p_times.dequeues,pInfoPtrsCopy[j]->p_times.ipc_sync,pInfoPtrsCopy[j]->p_times.ipc_async,
				pInfoPtrsCopy[j]->p_times.preempted,pInfoPtrsCopy[j]->p_rts_flags,pInfoPtrsCopy[j]->p_priority, pInfoPtrsCopy[j]->p_nextready,
				pInfoPtrsCopy[j]->p_nextready_endpoint,pInfoPtrsCopy[j]->p_user_time,pInfoPtrsCopy[j]->p_sys_time,ex64hi(cpuTimeDiff),
				ex64lo(cpuTimeDiff));
		}
		pInfoPtrsCopy[j]++;
		if(j>0){
			pInfoPtrsCopy[j-1]++;
		}
	}
	pInfoPtrsCopy[j] = pInfoPtrs[j];	
}

m.m1_i3 = -1;
_syscall(PM_PROC_NR,STARTRECORD,&m);
return(0);
}
コード例 #10
0
ファイル: requestor.c プロジェクト: DragonQuan/minix3
/*===========================================================================*
 *				    main				     *
 *===========================================================================*/
int main(int argc, char **argv)
{
	endpoint_t ep_self, ep_child;
	size_t size = BUF_SIZE;
	int i, r, pid;
	int status;
	u64_t start, end, diff;
	double micros;
	char nr_pages_str[10], is_map_str[2], is_write_str[2];
	int nr_pages, is_map, is_write;

	/* SEF local startup. */
	env_setargs(argc, argv);
	sef_local_startup();

	/* Parse the command line. */
	r = env_get_param("pages", nr_pages_str, sizeof(nr_pages_str));
	errno = 0;
	nr_pages = atoi(nr_pages_str);
	if (r != OK || errno || nr_pages <=0) {
		exit_usage();
	}
	if(nr_pages > TEST_PAGE_NUM) {
		printf("REQUESTOR: too many pages. Max allowed: %d\n",
			TEST_PAGE_NUM);
		exit_usage();
	}
	r = env_get_param("map", is_map_str, sizeof(is_map_str));
	errno = 0;
	is_map = atoi(is_map_str);
	if (r != OK || errno || (is_map!=0 && is_map!=1)) {
		exit_usage();
	}
	r = env_get_param("write", is_write_str, sizeof(is_write_str));
	errno = 0;
	is_write = atoi(is_write_str);
	if (r != OK || errno || (is_write!=0 && is_write!=1)) {
		exit_usage();
	}
	printf("REQUESTOR: Running tests with pages=%d map=%d write=%d...\n",
		nr_pages, is_map, is_write);

	/* Prepare work. */
	buf = (char*) CLICK_CEIL(buf_buf);
	fid_get = open(FIFO_GRANTOR, O_RDONLY);
	fid_send = open(FIFO_REQUESTOR, O_WRONLY);
	if(fid_get < 0 || fid_send < 0) {
		printf("REQUESTOR: can't open fifo files.\n");
		return 1;
	}

	/* Send the endpoint to the granter, in order to let him to
	 * create the grant.
	 */
	ep_self = getprocnr();
	write(fid_send, &ep_self, sizeof(ep_self));
	dprint("REQUESTOR: sending my endpoint: %d\n", ep_self);

	/* Get the granter's endpoint and gid. */
	read(fid_get, &ep_granter, sizeof(ep_granter));
	read(fid_get, &gid, sizeof(gid));
	dprint("REQUESTOR: getting granter's endpoint %d and gid %d\n",
		ep_granter, gid);

	FIFO_WAIT(fid_get);
	diff = make64(0, 0);

	if(is_map) {
		/* Test safemap. */
		for(i=0;i<NR_TEST_ITERATIONS;i++) {
			read_tsc_64(&start);
			r = sys_safemap(ep_granter, gid, 0, (long)buf,
				nr_pages*CLICK_SIZE, D, 1);
			if(r != OK) {
				printf("REQUESTOR: safemap error: %d\n", r);
				return 1;
			}
			read_write_buff(buf, nr_pages*CLICK_SIZE, is_write);
			read_tsc_64(&end);
			diff = add64(diff, (sub64(end, start)));
			r = sys_safeunmap(D, (long)buf);
			if(r != OK) {
				printf("REQUESTOR: safeunmap error: %d\n", r);
				return 1;
			}
		}
		micros = ((double)tsc_64_to_micros(diff))
			/ (NR_TEST_ITERATIONS*nr_pages);
		REPORT_TEST("REQUESTOR", "SAFEMAP", micros);
	}
	else {
		/* Test safecopy. */
		for(i=0;i<NR_TEST_ITERATIONS;i++) {
			read_tsc_64(&start);
			r = sys_safecopyfrom(ep_granter, gid, 0, (long)buf,
				nr_pages*CLICK_SIZE, D);
			if(r != OK) {
				printf("REQUESTOR: safecopy error: %d\n", r);
				return 1;
			}
			read_write_buff(buf, nr_pages*CLICK_SIZE, is_write);
			read_tsc_64(&end);
			diff = add64(diff, (sub64(end, start)));
		}
		micros = ((double)tsc_64_to_micros(diff))
			/ (NR_TEST_ITERATIONS*nr_pages);
		REPORT_TEST("REQUESTOR", "SAFECOPY", micros);
	}

	FIFO_NOTIFY(fid_send);

	return 0;
}
コード例 #11
0
ファイル: arch_clock.c プロジェクト: CesarBallardini/minix3
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));

#ifdef CONFIG_SMP
		/*
		 * Since at the time we got a scheduling IPI we might have been
		 * waiting for BKL already, we may miss it due to a similar IPI to
		 * the cpu which is already waiting for us to handle its. This
		 * results in a live-lock of these two cpus.
		 *
		 * Therefore we always check if there is one pending and if so,
		 * we handle it straight away so the other cpu can continue and
		 * we do not deadlock.
		 */
		smp_sched_handler();
#endif
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}
コード例 #12
0
void print_procs(int maxlines,
	struct proc *proc1, struct proc *proc2,
	struct mproc *mproc)
{
	int p, nprocs, tot=0;
	u64_t idleticks = cvu64(0);
	u64_t kernelticks = cvu64(0);
	u64_t systemticks = cvu64(0);
	u64_t userticks = cvu64(0);
	u64_t total_ticks = cvu64(0);
	unsigned long tcyc;
	unsigned long tmp;
	int blockedseen = 0;
	struct tp tick_procs[PROCS];

	for(p = nprocs = 0; p < PROCS; p++) {
		if(isemptyp(&proc2[p]))
			continue;
		tick_procs[nprocs].p = proc2 + p;
		if(proc1[p].p_endpoint == proc2[p].p_endpoint) {
			tick_procs[nprocs].ticks =
				sub64(proc2[p].p_cycles, proc1[p].p_cycles);
		} else {
			tick_procs[nprocs].ticks =
				proc2[p].p_cycles;
		}
		total_ticks = add64(total_ticks, tick_procs[nprocs].ticks);
		if(p-NR_TASKS == IDLE) {
			idleticks = tick_procs[nprocs].ticks;
			continue;
		}
		if(p-NR_TASKS == KERNEL) {
			kernelticks = tick_procs[nprocs].ticks;
			continue;
		}
		if(mproc[proc2[p].p_nr].mp_procgrp == 0)
			systemticks = add64(systemticks, tick_procs[nprocs].ticks);
		else if (p > NR_TASKS)
			userticks = add64(userticks, tick_procs[nprocs].ticks);

		nprocs++;
	}

	if (!cmp64u(total_ticks, 0))
		return;

	qsort(tick_procs, nprocs, sizeof(tick_procs[0]), cmp_ticks);

	tcyc = div64u(total_ticks, SCALE);

	tmp = div64u(userticks, SCALE);
	printf("CPU states: %6.2f%% user, ", 100.0*(tmp)/tcyc);

	tmp = div64u(systemticks, SCALE);
	printf("%6.2f%% system, ", 100.0*tmp/tcyc);

	tmp = div64u(kernelticks, SCALE);
	printf("%6.2f%% kernel, ", 100.0*tmp/tcyc);

	tmp = div64u(idleticks, SCALE);
	printf("%6.2f%% idle", 100.0*tmp/tcyc);

#define NEWLINE do { printf("\n"); if(--maxlines <= 0) { return; } } while(0) 
	NEWLINE;
	NEWLINE;

	printf("  PID USERNAME PRI NICE   SIZE STATE   TIME     CPU COMMAND");
	NEWLINE;
	for(p = 0; p < nprocs; p++) {
		struct proc *pr;
		int pnr;
		int level = 0;

		pnr = tick_procs[p].p->p_nr;

		if(pnr < 0) {
			/* skip old kernel tasks as they don't run anymore */
			continue;
		}

		pr = tick_procs[p].p;

		/* If we're in blocked verbose mode, indicate start of
		 * blocked processes.
		 */
		if(blockedverbose && pr->p_rts_flags && !blockedseen) {
			NEWLINE;
			printf("Blocked processes:");
			NEWLINE;
			blockedseen = 1;
		}

		print_proc(&tick_procs[p], &mproc[pnr], tcyc);
		NEWLINE;

		if(!blockedverbose)
			continue;

		/* Traverse dependency chain if blocked. */
		while(pr->p_rts_flags) {
			endpoint_t dep = NONE;
			struct tp *tpdep;
			level += 5;

			if((dep = P_BLOCKEDON(pr)) == NONE) {
				printf("not blocked on a process");
				NEWLINE;
				break;
			}

			if(dep == ANY)
				break;

			tpdep = lookup(dep, tick_procs, nprocs);
			pr = tpdep->p;
			printf("%*s> ", level, "");
			print_proc(tpdep, &mproc[pr->p_nr], tcyc);
			NEWLINE;
		}
	}
}
コード例 #13
0
ファイル: arch_clock.c プロジェクト: pikpik/Shlib4MINIX3
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}