Example #1
0
void
runtime·notetsleep(Note *n, int64 ns)
{
	int64 deadline, now;

	if(ns < 0) {
		runtime·notesleep(n);
		return;
	}

	if(runtime·atomicload(&n->key) != 0)
		return;

	if(m->profilehz > 0)
		runtime·setprof(false);
	deadline = runtime·nanotime() + ns;
	for(;;) {
		runtime·futexsleep(&n->key, 0, ns);
		if(runtime·atomicload(&n->key) != 0)
			break;
		now = runtime·nanotime();
		if(now >= deadline)
			break;
		ns = deadline - now;
	}
	if(m->profilehz > 0)
		runtime·setprof(true);
}
Example #2
0
void
runtime·notesleep(Note *n)
{
	if(m->profilehz > 0)
		runtime·setprof(false);
	while(runtime·atomicload(&n->key) == 0)
		runtime·futexsleep(&n->key, 0, -1);
	if(m->profilehz > 0)
		runtime·setprof(true);
}
Example #3
0
int32
runtime·semasleep(int64 ns)
{
	int32 v;

	if(m->profilehz > 0)
		runtime·setprof(false);
	v = runtime·mach_semacquire(m->waitsema, ns);
	if(m->profilehz > 0)
		runtime·setprof(true);
	return v;
}
Example #4
0
void
runtime·notesleep(Note *n)
{
	if(m->waitsema == 0)
		m->waitsema = runtime·semacreate();
	if(!runtime·casp((void**)&n->key, nil, m)) {  // must be LOCKED (got wakeup)
		if(n->key != LOCKED)
			runtime·throw("notesleep - waitm out of sync");
		return;
	}
	// Queued.  Sleep.
	if(m->profilehz > 0)
		runtime·setprof(false);
	runtime·semasleep(-1);
	if(m->profilehz > 0)
		runtime·setprof(true);
}
Example #5
0
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
void
runtime·minit(void)
{
	// Initialize signal handling.
	runtime·signalstack((byte*)m->gsignal->stackguard - StackGuard, 32*1024);

	runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
	runtime·setprof(m->profilehz > 0);
}
Example #6
0
void
runtime·entersyscall(void)
{
	uint32 v;

	if(m->profilehz > 0)
		runtime·setprof(false);

	// Leave SP around for gc and traceback.
	runtime·gosave(&g->sched);
	g->gcsp = g->sched.sp;
	g->gcstack = g->stackbase;
	g->gcguard = g->stackguard;
	g->status = Gsyscall;
	if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) {
		// runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
		//	g->gcsp, g->gcguard-StackGuard, g->gcstack);
		runtime·throw("entersyscall");
	}

	// Fast path.
	// The slow path inside the schedlock/schedunlock will get
	// through without stopping if it does:
	//	mcpu--
	//	gwait not true
	//	waitstop && mcpu <= mcpumax not true
	// If we can do the same with a single atomic add,
	// then we can skip the locks.
	v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift);
	if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
		return;

	schedlock();
	v = runtime·atomicload(&runtime·sched.atomic);
	if(atomic_gwaiting(v)) {
		matchmg();
		v = runtime·atomicload(&runtime·sched.atomic);
	}
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
		runtime·notewakeup(&runtime·sched.stopped);
	}

	// Re-save sched in case one of the calls
	// (notewakeup, matchmg) triggered something using it.
	runtime·gosave(&g->sched);

	schedunlock();
}
Example #7
0
/*
 * Reset the kernel profiling date structures.
 */
void
reset(struct kvmvars *kvp)
{
	char *zbuf;
	u_long biggest;
	int mib[3];

	setprof(kvp, GMON_PROF_OFF);

	biggest = kvp->gpm.kcountsize;
	if (kvp->gpm.fromssize > biggest)
		biggest = kvp->gpm.fromssize;
	if (kvp->gpm.tossize > biggest)
		biggest = kvp->gpm.tossize;
	if ((zbuf = (char *)malloc(biggest)) == NULL)
		errx(12, "cannot allocate zbuf space");
	bzero(zbuf, biggest);
	if (kflag) {
		if (kvm_write(kvp->kd, (u_long)kvp->gpm.kcount, zbuf,
		    kvp->gpm.kcountsize) != (ssize_t)kvp->gpm.kcountsize)
			errx(13, "tickbuf zero: %s", kvm_geterr(kvp->kd));
		if (kvm_write(kvp->kd, (u_long)kvp->gpm.froms, zbuf,
		    kvp->gpm.fromssize) != (ssize_t)kvp->gpm.fromssize)
			errx(14, "froms zero: %s", kvm_geterr(kvp->kd));
		if (kvm_write(kvp->kd, (u_long)kvp->gpm.tos, zbuf,
		    kvp->gpm.tossize) != (ssize_t)kvp->gpm.tossize)
			errx(15, "tos zero: %s", kvm_geterr(kvp->kd));
		return;
	}
	(void)seteuid(0);
	mib[0] = CTL_KERN;
	mib[1] = KERN_PROF;
	mib[2] = GPROF_COUNT;
	if (sysctl(mib, 3, NULL, NULL, zbuf, kvp->gpm.kcountsize) < 0)
		err(13, "tickbuf zero");
	mib[2] = GPROF_FROMS;
	if (sysctl(mib, 3, NULL, NULL, zbuf, kvp->gpm.fromssize) < 0)
		err(14, "froms zero");
	mib[2] = GPROF_TOS;
	if (sysctl(mib, 3, NULL, NULL, zbuf, kvp->gpm.tossize) < 0)
		err(15, "tos zero");
	(void)seteuid(getuid());
	free(zbuf);
}
Example #8
0
// The goroutine g exited its system call.
// Arrange for it to run on a cpu again.
// This is called only from the go syscall library, not
// from the low-level system calls used by the runtime.
void
runtime·exitsyscall(void)
{
	uint32 v;

	// Fast path.
	// If we can do the mcpu++ bookkeeping and
	// find that we still have mcpu <= mcpumax, then we can
	// start executing Go code immediately, without having to
	// schedlock/schedunlock.
	v = runtime·xadd(&runtime·sched.atomic, (1<<mcpuShift));
	if(m->profilehz == runtime·sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		// There's a cpu for us, so we can run.
		g->status = Grunning;
		// Garbage collector isn't running (since we are),
		// so okay to clear gcstack.
		g->gcstack = nil;

		if(m->profilehz > 0)
			runtime·setprof(true);
		return;
	}

	// Tell scheduler to put g back on the run queue:
	// mostly equivalent to g->status = Grunning,
	// but keeps the garbage collector from thinking
	// that g is running right now, which it's not.
	g->readyonstop = 1;

	// All the cpus are taken.
	// The scheduler will ready g and put this m to sleep.
	// When the scheduler takes g away from m,
	// it will undo the runtime·sched.mcpu++ above.
	runtime·gosched();

	// Gosched returned, so we're allowed to run now.
	// Delete the gcstack information that we left for
	// the garbage collector during the system call.
	// Must wait until now because until gosched returns
	// we don't know for sure that the garbage collector
	// is not running.
	g->gcstack = nil;
}
Example #9
0
int main(int argc, string argv[])
{
  float tmp1, tmp2;

  initparam(argv, defv);
  if (sscanf(getparam("mdisk"), "%f,%f", &tmp1, &tmp2) == 2) {
    mdisk1 = tmp1;
    mdisk2 = tmp2;
    if (sscanf(getparam("alpha"), "%f,%f", &tmp1, &tmp2) != 2)
      error("%s: must specify two alpha values\n", getargv0());
    alpha1 = tmp1;
    alpha2 = tmp2;
    if (sscanf(getparam("epsilon"), "%f,%f", &tmp1, &tmp2) != 2)
      error("%s: must specify two epsilon values\n", getargv0());
    epsilon1 = tmp1;
    epsilon2 = tmp2;
  } else {
    mdisk1 = getdparam("mdisk");
    mdisk2 = 0.0;
    alpha1 = getdparam("alpha");
    alpha2 = 12.0;			 /* nonzero value stops div by zero */
    epsilon1 = getdparam("epsilon");
    epsilon2 = -1.0;
  }
  zdisk = getdparam("zdisk");
  mu = getdparam("mu");
  r_mu = getdparam("r_mu");
  eta = getdparam("eta");
  rcut = getdparam("rcut");
  readgsp();
  setprof();
  layout_body(bodyfields, Precision, NDIM);
  ndisk = getiparam("ndisk");
  disk = (bodyptr) allocate(ndisk * SizeofBody);
  init_random(getiparam("seed"));
  if (getiparam("nlist") > 0)
    listdisk(getiparam("nlist"));
  makedisk();
  writemodel();
  return (0);
}
Example #10
0
int main(int argc, string argv[])
{
  double alpha, rcut;
  int model;

  initparam(argv, defv);
  alpha = getdparam("alpha");
  rcut = getdparam("rcut");
  model = (alpha <= 0.0 ? -1 : getiparam("model"));
  ndisk = getiparam("ndisk");
  init_random(getiparam("seed"));
  readgsp();
  setprof(model, alpha, rcut);
  layout_body(bodyfields, Precision, NDIM);
  disk = (bodyptr) allocate(ndisk * SizeofBody);
  makedisk(getbparam("randspin"));
  if (! getbparam("randspin"))			// if spins not random
    bodyfields[2] = NULL;			// don't write AuxVec field
  writemodel();
  fflush(NULL);
  return 0;
}
Example #11
0
int
main(int argc, char **argv)
{
	int ch, mode, disp, accessmode;
	struct kvmvars kvmvars;
	const char *systemname;
	char *kmemf;

	if (seteuid(getuid()) != 0) {
		err(1, "seteuid failed\n");
	}
	kmemf = NULL;
	systemname = NULL;
	while ((ch = getopt(argc, argv, "M:N:Bbhpr")) != -1) {
		switch((char)ch) {

		case 'M':
			kmemf = optarg;
			kflag = 1;
			break;

		case 'N':
			systemname = optarg;
			break;

		case 'B':
			Bflag = 1;
			break;

		case 'b':
			bflag = 1;
			break;

		case 'h':
			hflag = 1;
			break;

		case 'p':
			pflag = 1;
			break;

		case 'r':
			rflag = 1;
			break;

		default:
			usage();
		}
	}
	argc -= optind;
	argv += optind;

#define BACKWARD_COMPATIBILITY
#ifdef	BACKWARD_COMPATIBILITY
	if (*argv) {
		systemname = *argv;
		if (*++argv) {
			kmemf = *argv;
			++kflag;
		}
	}
#endif
	if (systemname == NULL)
		systemname = getbootfile();
	accessmode = openfiles(systemname, kmemf, &kvmvars);
	mode = getprof(&kvmvars);
	if (hflag)
		disp = GMON_PROF_OFF;
	else if (Bflag)
		disp = GMON_PROF_HIRES;
	else if (bflag)
		disp = GMON_PROF_ON;
	else
		disp = mode;
	if (pflag)
		dumpstate(&kvmvars);
	if (rflag)
		reset(&kvmvars);
	if (accessmode == O_RDWR)
		setprof(&kvmvars, disp);
	(void)fprintf(stdout, "kgmon: kernel profiling is %s.\n",
		      disp == GMON_PROF_OFF ? "off" :
		      disp == GMON_PROF_HIRES ? "running (high resolution)" :
		      disp == GMON_PROF_ON ? "running" :
		      disp == GMON_PROF_BUSY ? "busy" :
		      disp == GMON_PROF_ERROR ? "off (error)" :
		      "in an unknown state");
	return (0);
}
Example #12
0
/*
 * Build the gmon.out file.
 */
void
dumpstate(struct kvmvars *kvp)
{
	register FILE *fp;
	struct rawarc rawarc;
	struct tostruct *tos;
	u_long frompc;
	u_short *froms, *tickbuf;
	size_t i;
	int mib[3];
	struct gmonhdr h;
	int fromindex, endfrom, toindex;

	setprof(kvp, GMON_PROF_OFF);
	fp = fopen("gmon.out", "w");
	if (fp == NULL) {
		warn("gmon.out");
		return;
	}

	/*
	 * Build the gmon header and write it to a file.
	 */
	bzero(&h, sizeof(h));
	h.lpc = kvp->gpm.lowpc;
	h.hpc = kvp->gpm.highpc;
	h.ncnt = kvp->gpm.kcountsize + sizeof(h);
	h.version = GMONVERSION;
	h.profrate = kvp->gpm.profrate;
	h.histcounter_type = kvp->gpm.histcounter_type;
	fwrite((char *)&h, sizeof(h), 1, fp);

	/*
	 * Write out the tick buffer.
	 */
	mib[0] = CTL_KERN;
	mib[1] = KERN_PROF;
	if ((tickbuf = (u_short *)malloc(kvp->gpm.kcountsize)) == NULL)
		errx(5, "cannot allocate kcount space");
	if (kflag) {
		i = kvm_read(kvp->kd, (u_long)kvp->gpm.kcount, (void *)tickbuf,
		    kvp->gpm.kcountsize);
	} else {
		mib[2] = GPROF_COUNT;
		i = kvp->gpm.kcountsize;
		if (sysctl(mib, 3, tickbuf, &i, NULL, 0) < 0)
			i = 0;
	}
	if (i != kvp->gpm.kcountsize)
		errx(6, "read ticks: read %lu, got %ld: %s",
		    kvp->gpm.kcountsize, (long)i,
		    kflag ? kvm_geterr(kvp->kd) : strerror(errno));
	if ((fwrite(tickbuf, kvp->gpm.kcountsize, 1, fp)) != 1)
		err(7, "writing tocks to gmon.out");
	free(tickbuf);

	/*
	 * Write out the arc info.
	 */
	if ((froms = (u_short *)malloc(kvp->gpm.fromssize)) == NULL)
		errx(8, "cannot allocate froms space");
	if (kflag) {
		i = kvm_read(kvp->kd, (u_long)kvp->gpm.froms, (void *)froms,
		    kvp->gpm.fromssize);
	} else {
		mib[2] = GPROF_FROMS;
		i = kvp->gpm.fromssize;
		if (sysctl(mib, 3, froms, &i, NULL, 0) < 0)
			i = 0;
	}
	if (i != kvp->gpm.fromssize)
		errx(9, "read froms: read %lu, got %ld: %s",
		    kvp->gpm.fromssize, (long)i,
		    kflag ? kvm_geterr(kvp->kd) : strerror(errno));
	if ((tos = (struct tostruct *)malloc(kvp->gpm.tossize)) == NULL)
		errx(10, "cannot allocate tos space");
	if (kflag) {
		i = kvm_read(kvp->kd, (u_long)kvp->gpm.tos, (void *)tos,
		    kvp->gpm.tossize);
	} else {
		mib[2] = GPROF_TOS;
		i = kvp->gpm.tossize;
		if (sysctl(mib, 3, tos, &i, NULL, 0) < 0)
			i = 0;
	}
	if (i != kvp->gpm.tossize)
		errx(11, "read tos: read %lu, got %ld: %s",
		    kvp->gpm.tossize, (long)i,
		    kflag ? kvm_geterr(kvp->kd) : strerror(errno));
	if (debug)
		warnx("lowpc 0x%lx, textsize 0x%lx",
		    (unsigned long)kvp->gpm.lowpc, kvp->gpm.textsize);
	endfrom = kvp->gpm.fromssize / sizeof(*froms);
	for (fromindex = 0; fromindex < endfrom; ++fromindex) {
		if (froms[fromindex] == 0)
			continue;
		frompc = (u_long)kvp->gpm.lowpc +
		    (fromindex * kvp->gpm.hashfraction * sizeof(*froms));
		for (toindex = froms[fromindex]; toindex != 0;
		     toindex = tos[toindex].link) {
			if (debug)
				warnx("[mcleanup] frompc 0x%lx selfpc 0x%lx "
				    "count %ld", frompc, tos[toindex].selfpc,
				    tos[toindex].count);
			rawarc.raw_frompc = frompc;
			rawarc.raw_selfpc = (u_long)tos[toindex].selfpc;
			rawarc.raw_count = tos[toindex].count;
			fwrite((char *)&rawarc, sizeof(rawarc), 1, fp);
		}
	}
	fclose(fp);
}
Example #13
0
void
runtime·notetsleep(Note *n, int64 ns)
{
	M *mp;
	int64 deadline, now;

	if(ns < 0) {
		runtime·notesleep(n);
		return;
	}

	if(m->waitsema == 0)
		m->waitsema = runtime·semacreate();

	// Register for wakeup on n->waitm.
	if(!runtime·casp((void**)&n->key, nil, m)) {  // must be LOCKED (got wakeup already)
		if(n->key != LOCKED)
			runtime·throw("notetsleep - waitm out of sync");
		return;
	}

	if(m->profilehz > 0)
		runtime·setprof(false);
	deadline = runtime·nanotime() + ns;
	for(;;) {
		// Registered.  Sleep.
		if(runtime·semasleep(ns) >= 0) {
			// Acquired semaphore, semawakeup unregistered us.
			// Done.
			if(m->profilehz > 0)
				runtime·setprof(true);
			return;
		}

		// Interrupted or timed out.  Still registered.  Semaphore not acquired.
		now = runtime·nanotime();
		if(now >= deadline)
			break;

		// Deadline hasn't arrived.  Keep sleeping.
		ns = deadline - now;
	}

	if(m->profilehz > 0)
		runtime·setprof(true);

	// Deadline arrived.  Still registered.  Semaphore not acquired.
	// Want to give up and return, but have to unregister first,
	// so that any notewakeup racing with the return does not
	// try to grant us the semaphore when we don't expect it.
	for(;;) {
		mp = runtime·atomicloadp((void**)&n->key);
		if(mp == m) {
			// No wakeup yet; unregister if possible.
			if(runtime·casp((void**)&n->key, mp, nil))
				return;
		} else if(mp == (M*)LOCKED) {
			// Wakeup happened so semaphore is available.
			// Grab it to avoid getting out of sync.
			if(runtime·semasleep(-1) < 0)
				runtime·throw("runtime: unable to acquire - semaphore out of sync");
			return;
		} else {
			runtime·throw("runtime: unexpected waitm - semaphore out of sync");
		}
	}
}