Esempio n. 1
0
void
runtime_notetsleep(Note *n, int64 ns)
{
	int64 deadline, now;

	if(ns < 0) {
		runtime_notesleep(n);
		return;
	}

	if(runtime_atomicload((uint32*)&n->key) != 0)
		return;

	if(runtime_m()->profilehz > 0)
		runtime_setprof(false);
	deadline = runtime_nanotime() + ns;
	for(;;) {
		runtime_futexsleep((uint32*)&n->key, 0, ns);
		if(runtime_atomicload((uint32*)&n->key) != 0)
			break;
		now = runtime_nanotime();
		if(now >= deadline)
			break;
		ns = deadline - now;
	}
	if(runtime_m()->profilehz > 0)
		runtime_setprof(true);
}
Esempio n. 2
0
void
runtime_notesleep(Note *n)
{
	if(runtime_m()->profilehz > 0)
		runtime_setprof(false);
	while(runtime_atomicload((uint32*)&n->key) == 0)
		runtime_futexsleep((uint32*)&n->key, 0, -1);
	if(runtime_m()->profilehz > 0)
		runtime_setprof(true);
}
Esempio n. 3
0
// The goroutine g exited its system call.
// Arrange for it to run on a cpu again.
// This is called only from the go syscall library, not
// from the low-level system calls used by the runtime.
void
runtime_exitsyscall(void)
{
	G *gp;
	uint32 v;

	// Fast path.
	// If we can do the mcpu++ bookkeeping and
	// find that we still have mcpu <= mcpumax, then we can
	// start executing Go code immediately, without having to
	// schedlock/schedunlock.
	// Also do fast return if any locks are held, so that
	// panic code can use syscalls to open a file.
	gp = g;
	v = runtime_xadd(&runtime_sched.atomic, (1<<mcpuShift));
	if((m->profilehz == runtime_sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) || m->locks > 0) {
		// There's a cpu for us, so we can run.
		gp->status = Grunning;
		// Garbage collector isn't running (since we are),
		// so okay to clear gcstack.
#ifdef USING_SPLIT_STACK
		gp->gcstack = nil;
#endif
		gp->gcnext_sp = nil;
		runtime_memclr(&gp->gcregs, sizeof gp->gcregs);

		if(m->profilehz > 0)
			runtime_setprof(true);
		return;
	}

	// Tell scheduler to put g back on the run queue:
	// mostly equivalent to g->status = Grunning,
	// but keeps the garbage collector from thinking
	// that g is running right now, which it's not.
	gp->readyonstop = 1;

	// All the cpus are taken.
	// The scheduler will ready g and put this m to sleep.
	// When the scheduler takes g away from m,
	// it will undo the runtime_sched.mcpu++ above.
	runtime_gosched();

	// Gosched returned, so we're allowed to run now.
	// Delete the gcstack information that we left for
	// the garbage collector during the system call.
	// Must wait until now because until gosched returns
	// we don't know for sure that the garbage collector
	// is not running.
#ifdef USING_SPLIT_STACK
	gp->gcstack = nil;
#endif
	gp->gcnext_sp = nil;
	runtime_memclr(&gp->gcregs, sizeof gp->gcregs);
}
Esempio n. 4
0
void
runtime_notesleep(Note *n)
{
    M *m;

    m = runtime_m();
    if(m->waitsema == 0)
        m->waitsema = runtime_semacreate();
    if(!runtime_casp((void**)&n->key, nil, m)) {  // must be LOCKED (got wakeup)
        if(n->key != LOCKED)
            runtime_throw("notesleep - waitm out of sync");
        return;
    }
    // Queued.  Sleep.
    if(m->profilehz > 0)
        runtime_setprof(false);
    runtime_semasleep(-1);
    if(m->profilehz > 0)
        runtime_setprof(true);
}
Esempio n. 5
0
void
runtime_entersyscall(void)
{
    uint32 v;

    if(m->profilehz > 0)
        runtime_setprof(false);

    // Leave SP around for gc and traceback.
#ifdef USING_SPLIT_STACK
    g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
                                   &g->gcnext_segment, &g->gcnext_sp,
                                   &g->gcinitial_sp);
#else
    g->gcnext_sp = (byte *) &v;
#endif

    // Save the registers in the g structure so that any pointers
    // held in registers will be seen by the garbage collector.
    // We could use getcontext here, but setjmp is more efficient
    // because it doesn't need to save the signal mask.
    setjmp(g->gcregs);

    g->status = Gsyscall;

    // Fast path.
    // The slow path inside the schedlock/schedunlock will get
    // through without stopping if it does:
    //	mcpu--
    //	gwait not true
    //	waitstop && mcpu <= mcpumax not true
    // If we can do the same with a single atomic add,
    // then we can skip the locks.
    v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
    if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
        return;

    schedlock();
    v = runtime_atomicload(&runtime_sched.atomic);
    if(atomic_gwaiting(v)) {
        matchmg();
        v = runtime_atomicload(&runtime_sched.atomic);
    }
    if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
        runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
        runtime_notewakeup(&runtime_sched.stopped);
    }

    schedunlock();
}
Esempio n. 6
0
void
runtime_notetsleep(Note *n, int64 ns)
{
    M *m;
    M *mp;
    int64 deadline, now;

    if(ns < 0) {
        runtime_notesleep(n);
        return;
    }

    m = runtime_m();
    if(m->waitsema == 0)
        m->waitsema = runtime_semacreate();

    // Register for wakeup on n->waitm.
    if(!runtime_casp((void**)&n->key, nil, m)) {  // must be LOCKED (got wakeup already)
        if(n->key != LOCKED)
            runtime_throw("notetsleep - waitm out of sync");
        return;
    }

    if(m->profilehz > 0)
        runtime_setprof(false);
    deadline = runtime_nanotime() + ns;
    for(;;) {
        // Registered.  Sleep.
        if(runtime_semasleep(ns) >= 0) {
            // Acquired semaphore, semawakeup unregistered us.
            // Done.
            if(m->profilehz > 0)
                runtime_setprof(true);
            return;
        }

        // Interrupted or timed out.  Still registered.  Semaphore not acquired.
        now = runtime_nanotime();
        if(now >= deadline)
            break;

        // Deadline hasn't arrived.  Keep sleeping.
        ns = deadline - now;
    }

    if(m->profilehz > 0)
        runtime_setprof(true);

    // Deadline arrived.  Still registered.  Semaphore not acquired.
    // Want to give up and return, but have to unregister first,
    // so that any notewakeup racing with the return does not
    // try to grant us the semaphore when we don't expect it.
    for(;;) {
        mp = runtime_atomicloadp((void**)&n->key);
        if(mp == m) {
            // No wakeup yet; unregister if possible.
            if(runtime_casp((void**)&n->key, mp, nil))
                return;
        } else if(mp == (M*)LOCKED) {
            // Wakeup happened so semaphore is available.
            // Grab it to avoid getting out of sync.
            if(runtime_semasleep(-1) < 0)
                runtime_throw("runtime: unable to acquire - semaphore out of sync");
            return;
        } else {
            runtime_throw("runtime: unexpected waitm - semaphore out of sync");
        }
    }
}