Exemple #1
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && arguments == m_codeBlock->argumentsRegister().offset()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis

        move(regT0, regT1);
        neg64(regT1);
        add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1);
        lshift64(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1));

        // Initialize ArgumentCount.
        store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        signExtend32ToPtr(regT0, regT0);
        end.append(branchSub64(Zero, TrustedImm32(1), regT0));
        // regT0: argumentCount

        Label copyLoop = label();
        load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
        store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    emitGetVirtualRegister(thisValue, regT0);
    emitGetVirtualRegister(arguments, regT1);
    callOperation(operationLoadVarargs, regT0, regT1, firstFreeRegister);
    move(returnValueGPR, regT1);

    if (canOptimize)
        end.link(this);
}
Exemple #2
0
/*===========================================================================*
 *				idle					     * 
 *===========================================================================*/
PRIVATE void idle()
{
	/* This function is called whenever there is no work to do.
	 * Halt the CPU, and measure how many timestamp counter ticks are
	 * spent not doing anything. This allows test setups to measure
	 * the CPU utiliziation of certain workloads with high precision.
	 */
#ifdef CONFIG_IDLE_TSC
	u64_t idle_start;

	read_tsc_64(&idle_start);
	idle_active = 1;
#endif

	halt_cpu();

#ifdef CONFIG_IDLE_TSC
	if (idle_active) {
		IDLE_STOP;
		printf("Kernel: idle active after resuming CPU\n");
	}

	idle_tsc = add64(idle_tsc, sub64(idle_stop, idle_start));
#endif
}
Exemple #3
0
static void testdiv(void)
{
	u64_t q, r;
#if TIMED
	struct timeval tvstart, tvend;

	printf("i=0x%.8x%.8x; j=0x%.8x%.8x\n", 
		ex64hi(i), ex64lo(i), 
		ex64hi(j), ex64lo(j));
	fflush(stdout);
	if (gettimeofday(&tvstart, NULL) < 0) ERR;
#endif

	/* division by zero has a separate test */
	if (cmp64u(j, 0) == 0) {
		testdiv0();
		return;
	}

	/* perform division, store q in k to make ERR more informative */
	q = div64(i, j);
	r = rem64(i, j);
	k = q;

#if TIMED
	if (gettimeofday(&tvend, NULL) < 0) ERR;
	tvend.tv_sec -= tvstart.tv_sec;
	tvend.tv_usec -= tvstart.tv_usec;
	if (tvend.tv_usec < 0) {
		tvend.tv_sec -= 1;
		tvend.tv_usec += 1000000;
	}
	printf("q=0x%.8x%.8x; r=0x%.8x%.8x; time=%d.%.6d\n", 
		ex64hi(q), ex64lo(q), 
		ex64hi(r), ex64lo(r), 
		tvend.tv_sec, tvend.tv_usec);
	fflush(stdout);
#endif

	/* compare to 64/32-bit division if possible */
	if (!ex64hi(j)) {
		if (cmp64(q, div64u64(i, ex64lo(j))) != 0) ERR;
		if (!ex64hi(q)) {
			if (cmp64u(q, div64u(i, ex64lo(j))) != 0) ERR;
		}
		if (cmp64u(r, rem64u(i, ex64lo(j))) != 0) ERR;

		/* compare to 32-bit division if possible */
		if (!ex64hi(i)) {
			if (cmp64u(q, ex64lo(i) / ex64lo(j)) != 0) ERR;
			if (cmp64u(r, ex64lo(i) % ex64lo(j)) != 0) ERR;
		}
	}

	/* check results using i = q j + r and r < j */
	if (cmp64(i, add64(mul64(q, j), r)) != 0) ERR;
	if (cmp64(r, j) >= 0) ERR;
}
Exemple #4
0
static void testmul(void)
{
	int kdone, kidx;
	u32_t ilo = ex64lo(i), jlo = ex64lo(j);
	u64_t prod = mul64(i, j);
	int prodbits;
		
	/* compute maximum index of highest-order bit */
	prodbits = bsr64(i) + bsr64(j) + 1;
	if (cmp64u(i, 0) == 0 || cmp64u(j, 0) == 0) prodbits = -1;
	if (bsr64(prod) > prodbits) ERR;

	/* compare to 32-bit multiplication if possible */	
	if (ex64hi(i) == 0 && ex64hi(j) == 0) {
		if (cmp64(prod, mul64u(ilo, jlo)) != 0) ERR;
		
		/* if there is no overflow we can check against pure 32-bit */
		if (prodbits < 32 && cmp64u(prod, ilo * jlo) != 0) ERR;
	}

	/* in 32-bit arith low-order DWORD matches regardless of overflow */
	if (ex64lo(prod) != ilo * jlo) ERR;

	/* multiplication by zero yields zero */
	if (prodbits < 0 && cmp64u(prod, 0) != 0) ERR;

	/* if there is no overflow, check absence of zero divisors */
	if (prodbits >= 0 && prodbits < 64 && cmp64u(prod, 0) == 0) ERR;

	/* commutativity */
	if (cmp64(prod, mul64(j, i)) != 0) ERR;

	/* loop though all argument value combinations for third argument */
	for (kdone = 0, kidx = 0; k = getargval(kidx, &kdone), !kdone; kidx++) {
		/* associativity */
		if (cmp64(mul64(mul64(i, j), k), mul64(i, mul64(j, k))) != 0) ERR;

		/* left and right distributivity */
		if (cmp64(mul64(add64(i, j), k), add64(mul64(i, k), mul64(j, k))) != 0) ERR;
		if (cmp64(mul64(i, add64(j, k)), add64(mul64(i, j), mul64(i, k))) != 0) ERR;
	}
}
Exemple #5
0
/*===========================================================================*
 *				do_llseek				     *
 *===========================================================================*/
int do_llseek()
{
/* Perform the llseek(ls_fd, offset, whence) system call. */
  register struct filp *rfilp;
  u64_t pos, newpos;
  int r = OK, seekfd, seekwhence;
  long off_hi, off_lo;

  seekfd = job_m_in.ls_fd;
  seekwhence = job_m_in.whence;
  off_hi = job_m_in.offset_high;
  off_lo = job_m_in.offset_lo;

  /* Check to see if the file descriptor is valid. */
  if ( (rfilp = get_filp(seekfd, VNODE_READ)) == NULL) return(err_code);

  /* No lseek on pipes. */
  if (S_ISFIFO(rfilp->filp_vno->v_mode)) {
	unlock_filp(rfilp);
	return(ESPIPE);
  }

  /* The value of 'whence' determines the start position to use. */
  switch(seekwhence) {
    case SEEK_SET: pos = cvu64(0);	break;
    case SEEK_CUR: pos = rfilp->filp_pos;	break;
    case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size);	break;
    default: unlock_filp(rfilp); return(EINVAL);
  }

  newpos = add64(pos, make64(off_lo, off_hi));

  /* Check for overflow. */
  if ((off_hi > 0) && cmp64(newpos, pos) < 0)
      r = EINVAL;
  else if ((off_hi < 0) && cmp64(newpos, pos) > 0)
      r = EINVAL;
  else {
	/* insert the new position into the output message */
	m_out.reply_l1 = ex64lo(newpos);
	m_out.reply_l2 = ex64hi(newpos);

	if (cmp64(newpos, rfilp->filp_pos) != 0) {
		rfilp->filp_pos = newpos;

		/* Inhibit read ahead request */
		r = req_inhibread(rfilp->filp_vno->v_fs_e,
				  rfilp->filp_vno->v_inode_nr);
	}
  }

  unlock_filp(rfilp);
  return(r);
}
Exemple #6
0
/*===========================================================================*
 *			        update_idle_time			     *
 *===========================================================================*/
static void update_idle_time(void)
{
	int i;
	struct proc * idl = proc_addr(IDLE);

	idl->p_cycles = make64(0, 0);

	for (i = 0; i < CONFIG_MAX_CPUS ; i++) {
		idl->p_cycles = add64(idl->p_cycles,
				get_cpu_var(i, idle_proc).p_cycles);
	}
}
Exemple #7
0
void context_stop(struct proc * p)
{
	u64_t tsc;
	u32_t tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);

	read_tsc_64(&tsc);
	assert(tsc >= *__tsc_ctr_switch);
	tsc_delta = tsc - *__tsc_ctr_switch;
	p->p_cycles += tsc_delta;

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		p->p_cpu_time_left = 0;
#else
		if (tsc_delta < p->p_cpu_time_left) {
			p->p_cpu_time_left -= tsc_delta;
		} else p->p_cpu_time_left = 0;
#endif
	}

	*__tsc_ctr_switch = tsc;
}
Exemple #8
0
/*===========================================================================*
 *				do_llseek				     *
 *===========================================================================*/
PUBLIC int do_llseek()
{
/* Perform the llseek(ls_fd, offset, whence) system call. */
  register struct filp *rfilp;
  u64_t pos, newpos;
  int r;

  /* Check to see if the file descriptor is valid. */
  if ( (rfilp = get_filp(m_in.ls_fd)) == NULL) return(err_code);

  /* No lseek on pipes. */
  if (rfilp->filp_vno->v_pipe == I_PIPE) return(ESPIPE);

  /* The value of 'whence' determines the start position to use. */
  switch(m_in.whence) {
      case SEEK_SET: pos = cvu64(0);	break;
      case SEEK_CUR: pos = rfilp->filp_pos;	break;
      case SEEK_END: pos = cvul64(rfilp->filp_vno->v_size);	break;
      default: return(EINVAL);
  }

  newpos = add64(pos, make64(m_in.offset_lo, m_in.offset_high));

  /* Check for overflow. */
  if (((long)m_in.offset_high > 0) && cmp64(newpos, pos) < 0) 
      return(EINVAL);
  if (((long)m_in.offset_high < 0) && cmp64(newpos, pos) > 0) 
      return(EINVAL);

  if (cmp64(newpos, rfilp->filp_pos) != 0) { /* Inhibit read ahead request */
      r = req_inhibread(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr);
      if (r != OK) return(r);
  }

  rfilp->filp_pos = newpos;
  m_out.reply_l1 = ex64lo(newpos);
  m_out.reply_l2 = ex64hi(newpos);
  return(OK);
}
Exemple #9
0
/*===========================================================================*
 *				action_pre_misdir			     *
 *===========================================================================*/
static void action_pre_misdir(struct fbd_rule *rule, iovec_t *UNUSED(iov),
	unsigned *UNUSED(count), size_t *UNUSED(size), u64_t *pos)
{
	/* Randomize the request position to fall within the range (and have
	 * the alignment) given by the rule.
	 */
	u32_t range, choice;

	/* Unfortunately, we cannot interpret 0 as end as "up to end of disk"
	 * here, because we have no idea about the actual disk size, and the
	 * resulting address must of course be valid..
	 */
	range = div64u(add64u(sub64(rule->params.misdir.end,
		rule->params.misdir.start), 1), rule->params.misdir.align);

	if (range > 0)
		choice = get_rand(range - 1);
	else
		choice = 0;

	*pos = add64(rule->params.misdir.start,
		mul64u(choice, rule->params.misdir.align));
}
Exemple #10
0
/*===========================================================================*
 *				    main				     *
 *===========================================================================*/
int main(int argc, char **argv)
{
	endpoint_t ep_self, ep_child;
	size_t size = BUF_SIZE;
	int i, r, pid;
	int status;
	u64_t start, end, diff;
	double micros;
	char nr_pages_str[10], is_map_str[2], is_write_str[2];
	int nr_pages, is_map, is_write;

	/* SEF local startup. */
	env_setargs(argc, argv);
	sef_local_startup();

	/* Parse the command line. */
	r = env_get_param("pages", nr_pages_str, sizeof(nr_pages_str));
	errno = 0;
	nr_pages = atoi(nr_pages_str);
	if (r != OK || errno || nr_pages <=0) {
		exit_usage();
	}
	if(nr_pages > TEST_PAGE_NUM) {
		printf("REQUESTOR: too many pages. Max allowed: %d\n",
			TEST_PAGE_NUM);
		exit_usage();
	}
	r = env_get_param("map", is_map_str, sizeof(is_map_str));
	errno = 0;
	is_map = atoi(is_map_str);
	if (r != OK || errno || (is_map!=0 && is_map!=1)) {
		exit_usage();
	}
	r = env_get_param("write", is_write_str, sizeof(is_write_str));
	errno = 0;
	is_write = atoi(is_write_str);
	if (r != OK || errno || (is_write!=0 && is_write!=1)) {
		exit_usage();
	}
	printf("REQUESTOR: Running tests with pages=%d map=%d write=%d...\n",
		nr_pages, is_map, is_write);

	/* Prepare work. */
	buf = (char*) CLICK_CEIL(buf_buf);
	fid_get = open(FIFO_GRANTOR, O_RDONLY);
	fid_send = open(FIFO_REQUESTOR, O_WRONLY);
	if(fid_get < 0 || fid_send < 0) {
		printf("REQUESTOR: can't open fifo files.\n");
		return 1;
	}

	/* Send the endpoint to the granter, in order to let him to
	 * create the grant.
	 */
	ep_self = getprocnr();
	write(fid_send, &ep_self, sizeof(ep_self));
	dprint("REQUESTOR: sending my endpoint: %d\n", ep_self);

	/* Get the granter's endpoint and gid. */
	read(fid_get, &ep_granter, sizeof(ep_granter));
	read(fid_get, &gid, sizeof(gid));
	dprint("REQUESTOR: getting granter's endpoint %d and gid %d\n",
		ep_granter, gid);

	FIFO_WAIT(fid_get);
	diff = make64(0, 0);

	if(is_map) {
		/* Test safemap. */
		for(i=0;i<NR_TEST_ITERATIONS;i++) {
			read_tsc_64(&start);
			r = sys_safemap(ep_granter, gid, 0, (long)buf,
				nr_pages*CLICK_SIZE, D, 1);
			if(r != OK) {
				printf("REQUESTOR: safemap error: %d\n", r);
				return 1;
			}
			read_write_buff(buf, nr_pages*CLICK_SIZE, is_write);
			read_tsc_64(&end);
			diff = add64(diff, (sub64(end, start)));
			r = sys_safeunmap(D, (long)buf);
			if(r != OK) {
				printf("REQUESTOR: safeunmap error: %d\n", r);
				return 1;
			}
		}
		micros = ((double)tsc_64_to_micros(diff))
			/ (NR_TEST_ITERATIONS*nr_pages);
		REPORT_TEST("REQUESTOR", "SAFEMAP", micros);
	}
	else {
		/* Test safecopy. */
		for(i=0;i<NR_TEST_ITERATIONS;i++) {
			read_tsc_64(&start);
			r = sys_safecopyfrom(ep_granter, gid, 0, (long)buf,
				nr_pages*CLICK_SIZE, D);
			if(r != OK) {
				printf("REQUESTOR: safecopy error: %d\n", r);
				return 1;
			}
			read_write_buff(buf, nr_pages*CLICK_SIZE, is_write);
			read_tsc_64(&end);
			diff = add64(diff, (sub64(end, start)));
		}
		micros = ((double)tsc_64_to_micros(diff))
			/ (NR_TEST_ITERATIONS*nr_pages);
		REPORT_TEST("REQUESTOR", "SAFECOPY", micros);
	}

	FIFO_NOTIFY(fid_send);

	return 0;
}
Exemple #11
0
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));

#ifdef CONFIG_SMP
		/*
		 * Since at the time we got a scheduling IPI we might have been
		 * waiting for BKL already, we may miss it due to a similar IPI to
		 * the cpu which is already waiting for us to handle its. This
		 * results in a live-lock of these two cpus.
		 *
		 * Therefore we always check if there is one pending and if so,
		 * we handle it straight away so the other cpu can continue and
		 * we do not deadlock.
		 */
		smp_sched_handler();
#endif
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}
Exemple #12
0
void print_procs(int maxlines,
	struct proc *proc1, struct proc *proc2,
	struct mproc *mproc)
{
	int p, nprocs, tot=0;
	u64_t idleticks = cvu64(0);
	u64_t kernelticks = cvu64(0);
	u64_t systemticks = cvu64(0);
	u64_t userticks = cvu64(0);
	u64_t total_ticks = cvu64(0);
	unsigned long tcyc;
	unsigned long tmp;
	int blockedseen = 0;
	struct tp tick_procs[PROCS];

	for(p = nprocs = 0; p < PROCS; p++) {
		if(isemptyp(&proc2[p]))
			continue;
		tick_procs[nprocs].p = proc2 + p;
		if(proc1[p].p_endpoint == proc2[p].p_endpoint) {
			tick_procs[nprocs].ticks =
				sub64(proc2[p].p_cycles, proc1[p].p_cycles);
		} else {
			tick_procs[nprocs].ticks =
				proc2[p].p_cycles;
		}
		total_ticks = add64(total_ticks, tick_procs[nprocs].ticks);
		if(p-NR_TASKS == IDLE) {
			idleticks = tick_procs[nprocs].ticks;
			continue;
		}
		if(p-NR_TASKS == KERNEL) {
			kernelticks = tick_procs[nprocs].ticks;
			continue;
		}
		if(mproc[proc2[p].p_nr].mp_procgrp == 0)
			systemticks = add64(systemticks, tick_procs[nprocs].ticks);
		else if (p > NR_TASKS)
			userticks = add64(userticks, tick_procs[nprocs].ticks);

		nprocs++;
	}

	if (!cmp64u(total_ticks, 0))
		return;

	qsort(tick_procs, nprocs, sizeof(tick_procs[0]), cmp_ticks);

	tcyc = div64u(total_ticks, SCALE);

	tmp = div64u(userticks, SCALE);
	printf("CPU states: %6.2f%% user, ", 100.0*(tmp)/tcyc);

	tmp = div64u(systemticks, SCALE);
	printf("%6.2f%% system, ", 100.0*tmp/tcyc);

	tmp = div64u(kernelticks, SCALE);
	printf("%6.2f%% kernel, ", 100.0*tmp/tcyc);

	tmp = div64u(idleticks, SCALE);
	printf("%6.2f%% idle", 100.0*tmp/tcyc);

#define NEWLINE do { printf("\n"); if(--maxlines <= 0) { return; } } while(0) 
	NEWLINE;
	NEWLINE;

	printf("  PID USERNAME PRI NICE   SIZE STATE   TIME     CPU COMMAND");
	NEWLINE;
	for(p = 0; p < nprocs; p++) {
		struct proc *pr;
		int pnr;
		int level = 0;

		pnr = tick_procs[p].p->p_nr;

		if(pnr < 0) {
			/* skip old kernel tasks as they don't run anymore */
			continue;
		}

		pr = tick_procs[p].p;

		/* If we're in blocked verbose mode, indicate start of
		 * blocked processes.
		 */
		if(blockedverbose && pr->p_rts_flags && !blockedseen) {
			NEWLINE;
			printf("Blocked processes:");
			NEWLINE;
			blockedseen = 1;
		}

		print_proc(&tick_procs[p], &mproc[pnr], tcyc);
		NEWLINE;

		if(!blockedverbose)
			continue;

		/* Traverse dependency chain if blocked. */
		while(pr->p_rts_flags) {
			endpoint_t dep = NONE;
			struct tp *tpdep;
			level += 5;

			if((dep = P_BLOCKEDON(pr)) == NONE) {
				printf("not blocked on a process");
				NEWLINE;
				break;
			}

			if(dep == ANY)
				break;

			tpdep = lookup(dep, tick_procs, nprocs);
			pr = tpdep->p;
			printf("%*s> ", level, "");
			print_proc(tpdep, &mproc[pr->p_nr], tcyc);
			NEWLINE;
		}
	}
}
Exemple #13
0
void JIT::compileLoadVarargs(Instruction* instruction)
{
    int thisValue = instruction[3].u.operand;
    int arguments = instruction[4].u.operand;
    int firstFreeRegister = instruction[5].u.operand;
    int firstVarArgOffset = instruction[6].u.operand;

    JumpList slowCase;
    JumpList end;
    bool canOptimize = m_codeBlock->usesArguments()
        && arguments == m_codeBlock->argumentsRegister().offset()
        && !m_codeBlock->symbolTable()->slowArguments();

    if (canOptimize) {
        emitGetVirtualRegister(arguments, regT0);
        slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));

        emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
        if (firstVarArgOffset) {
            Jump sufficientArguments = branch32(GreaterThan, regT0, TrustedImm32(firstVarArgOffset + 1));
            move(TrustedImm32(1), regT0);
            Jump endVarArgs = jump();
            sufficientArguments.link(this);
            sub32(TrustedImm32(firstVarArgOffset), regT0);
            endVarArgs.link(this);
        }
        slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
        // regT0: argumentCountIncludingThis
        move(regT0, regT1);
        add64(TrustedImm32(-firstFreeRegister + JSStack::CallFrameHeaderSize), regT1);
        // regT1 now has the required frame size in Register units
        // Round regT1 to next multiple of stackAlignmentRegisters()
        add64(TrustedImm32(stackAlignmentRegisters() - 1), regT1);
        and64(TrustedImm32(~(stackAlignmentRegisters() - 1)), regT1);

        neg64(regT1);
        lshift64(TrustedImm32(3), regT1);
        addPtr(callFrameRegister, regT1);
        // regT1: newCallFrame

        slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1));

        // Initialize ArgumentCount.
        store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));

        // Initialize 'this'.
        emitGetVirtualRegister(thisValue, regT2);
        store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));

        // Copy arguments.
        signExtend32ToPtr(regT0, regT0);
        end.append(branchSub64(Zero, TrustedImm32(1), regT0));
        // regT0: argumentCount

        Label copyLoop = label();
        load64(BaseIndex(callFrameRegister, regT0, TimesEight, (CallFrame::thisArgumentOffset() + firstVarArgOffset) * static_cast<int>(sizeof(Register))), regT2);
        store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
        branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);

        end.append(jump());
    }

    if (canOptimize)
        slowCase.link(this);

    emitGetVirtualRegister(arguments, regT1);
    callOperation(operationSizeFrameForVarargs, regT1, firstFreeRegister, firstVarArgOffset);
    move(returnValueGPR, stackPointerRegister);
    emitGetVirtualRegister(thisValue, regT1);
    emitGetVirtualRegister(arguments, regT2);
    callOperation(operationLoadVarargs, returnValueGPR, regT1, regT2, firstVarArgOffset);
    move(returnValueGPR, regT1);

    if (canOptimize)
        end.link(this);
    
    addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
}
Exemple #14
0
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}
int crypto_stream_xor(unsigned char *out, const unsigned char *in,
		      unsigned long long inlen, const unsigned char *n,
		      const unsigned char *k)
{
#define PTR_ALIGN(ptr, mask) ((void *)((((long)(ptr)) + (mask)) & ~((long)(mask))))
	const unsigned long align = 16;
	char ctxbuf[sizeof(struct blowfish_ctx) + align];
	struct blowfish_ctx *ctx = PTR_ALIGN(ctxbuf, align - 1);
	uint64_t iv;
	uint64_t ivs[16];
	unsigned int i;

	blowfish_init(ctx, k, CRYPTO_KEYBYTES);
	bswap64(&iv, (const uint64_t *)n); /* be => le */

	while (likely(inlen >= BLOCKSIZE * 16)) {
		bswap64(&ivs[0], &iv); /* le => be */
		for (i = 1; i < 16; i++) {
			add64(&ivs[i], &iv, i);
			bswap64(&ivs[i], &ivs[i]); /* le => be */
		}
		add64(&iv, &iv, 16);

		__blowfish_enc_blk_16way(ctx, out, (uint8_t *)ivs, 0);

		if (unlikely(in)) {
			for (i = 0; i < 16; i+=2)
				xor128(&((uint64_t *)out)[i], &((uint64_t *)out)[i], &((uint64_t *)in)[i]);
			in += BLOCKSIZE * 16;
		}

		out += BLOCKSIZE * 16;
		inlen -= BLOCKSIZE * 16;
	}

	if (unlikely(inlen > 0)) {
		unsigned int nblock = inlen / BLOCKSIZE;
		unsigned int lastlen = inlen % BLOCKSIZE;
		unsigned int j;

		for (i = 0; i < nblock + !!lastlen; i++) {
			bswap64(&ivs[i], &iv); /* le => be */
			inc64(&iv);
		}
		for (; i < 16; i++) {
			ivs[i] = 0;
		}

		__blowfish_enc_blk_16way(ctx, (uint8_t *)ivs, (uint8_t *)ivs, 0);

		if (in) {
			for (i = 0; inlen >= 2*BLOCKSIZE; i+=2) {
				xor128((uint64_t *)out, (uint64_t *)in, (uint64_t *)&ivs[i]);

				inlen -= 2*BLOCKSIZE;
				in += 2*BLOCKSIZE;
				out += 2*BLOCKSIZE;
			}

			for (j = 0; j < inlen; j++)
				out[j] = in[j] ^ ((uint8_t*)&ivs[i])[j];
		} else {
			for (i = 0; inlen >= 2*BLOCKSIZE; i+=2) {
				mov128((uint64_t *)out, (uint64_t *)&ivs[i]);

				inlen -= 2*BLOCKSIZE;
				out += 2*BLOCKSIZE;
			}

			for (j = 0; j < inlen; j++)
				out[j] = ((uint8_t*)&ivs[i])[j];
		}
	}

	return 0;
}
static inline void inc64(uint64_t *dst)
{
	add64(dst, dst, 1);
}