Пример #1
0
void
runtime_notetsleep(Note *n, int64 ns)
{
	int64 deadline, now;

	if(ns < 0) {
		runtime_notesleep(n);
		return;
	}

	if(runtime_atomicload((uint32*)&n->key) != 0)
		return;

	if(runtime_m()->profilehz > 0)
		runtime_setprof(false);
	deadline = runtime_nanotime() + ns;
	for(;;) {
		runtime_futexsleep((uint32*)&n->key, 0, ns);
		if(runtime_atomicload((uint32*)&n->key) != 0)
			break;
		now = runtime_nanotime();
		if(now >= deadline)
			break;
		ns = deadline - now;
	}
	if(runtime_m()->profilehz > 0)
		runtime_setprof(true);
}
Пример #2
0
int64
runtime_tickspersecond(void)
{
	int64 res, t0, t1, c0, c1;

	res = (int64)runtime_atomicload64((uint64*)&ticks);
	if(res != 0)
		return ticks;
	runtime_lock(&ticksLock);
	res = ticks;
	if(res == 0) {
		t0 = runtime_nanotime();
		c0 = runtime_cputicks();
		runtime_usleep(100*1000);
		t1 = runtime_nanotime();
		c1 = runtime_cputicks();
		if(t1 == t0)
			t1++;
		res = (c1-c0)*1000*1000*1000/(t1-t0);
		if(res == 0)
			res++;
		runtime_atomicstore64((uint64*)&ticks, res);
	}
	runtime_unlock(&ticksLock);
	return res;
}
Пример #3
0
void
runtime_notetsleep(Note *n, int64 ns)
{
	int64 deadline, now;

	if(ns < 0) {
		runtime_notesleep(n);
		return;
	}

	if(runtime_atomicload(&n->key) != 0)
		return;

	deadline = runtime_nanotime() + ns;
	for(;;) {
		runtime_futexsleep(&n->key, 0, ns);
		if(runtime_atomicload(&n->key) != 0)
			return;
		now = runtime_nanotime();
		if(now >= deadline)
			return;
		ns = deadline - now;
	}
}
Пример #4
0
void
runtime_notetsleep(Note *n, int64 ns)
{
    M *m;
    M *mp;
    int64 deadline, now;

    if(ns < 0) {
        runtime_notesleep(n);
        return;
    }

    m = runtime_m();
    if(m->waitsema == 0)
        m->waitsema = runtime_semacreate();

    // Register for wakeup on n->waitm.
    if(!runtime_casp((void**)&n->key, nil, m)) {  // must be LOCKED (got wakeup already)
        if(n->key != LOCKED)
            runtime_throw("notetsleep - waitm out of sync");
        return;
    }

    if(m->profilehz > 0)
        runtime_setprof(false);
    deadline = runtime_nanotime() + ns;
    for(;;) {
        // Registered.  Sleep.
        if(runtime_semasleep(ns) >= 0) {
            // Acquired semaphore, semawakeup unregistered us.
            // Done.
            if(m->profilehz > 0)
                runtime_setprof(true);
            return;
        }

        // Interrupted or timed out.  Still registered.  Semaphore not acquired.
        now = runtime_nanotime();
        if(now >= deadline)
            break;

        // Deadline hasn't arrived.  Keep sleeping.
        ns = deadline - now;
    }

    if(m->profilehz > 0)
        runtime_setprof(true);

    // Deadline arrived.  Still registered.  Semaphore not acquired.
    // Want to give up and return, but have to unregister first,
    // so that any notewakeup racing with the return does not
    // try to grant us the semaphore when we don't expect it.
    for(;;) {
        mp = runtime_atomicloadp((void**)&n->key);
        if(mp == m) {
            // No wakeup yet; unregister if possible.
            if(runtime_casp((void**)&n->key, mp, nil))
                return;
        } else if(mp == (M*)LOCKED) {
            // Wakeup happened so semaphore is available.
            // Grab it to avoid getting out of sync.
            if(runtime_semasleep(-1) < 0)
                runtime_throw("runtime: unable to acquire - semaphore out of sync");
            return;
        } else {
            runtime_throw("runtime: unexpected waitm - semaphore out of sync");
        }
    }
}
Пример #5
0
int32
runtime_semasleep (int64 ns)
{
  M *m;
  struct go_sem *sem;
  int r;

  m = runtime_m ();
  sem = (struct go_sem *) m->waitsema;
  if (ns >= 0)
    {
      int64 abs;
      struct timespec ts;
      int err;

      abs = ns + runtime_nanotime ();
      ts.tv_sec = abs / 1000000000LL;
      ts.tv_nsec = abs % 1000000000LL;

      err = 0;

#ifdef HAVE_SEM_TIMEDWAIT
      r = sem_timedwait (&sem->sem, &ts);
      if (r != 0)
	err = errno;
#else
      if (pthread_mutex_lock (&sem->mutex) != 0)
	runtime_throw ("pthread_mutex_lock");

      while ((r = sem_trywait (&sem->sem)) != 0)
	{
	  r = pthread_cond_timedwait (&sem->cond, &sem->mutex, &ts);
	  if (r != 0)
	    {
	      err = r;
	      break;
	    }
	}

      if (pthread_mutex_unlock (&sem->mutex) != 0)
	runtime_throw ("pthread_mutex_unlock");
#endif

      if (err != 0)
	{
	  if (err == ETIMEDOUT || err == EAGAIN || err == EINTR)
	    return -1;
	  runtime_throw ("sema_timedwait");
	}
      return 0;
    }

  while (sem_wait (&sem->sem) != 0)
    {
      if (errno == EINTR)
	continue;
      runtime_throw ("sem_wait");
    }

  return 0;
}
Пример #6
0
void
runtime_gc(int32 force __attribute__ ((unused)))
{
	int64 t0, t1;
	char *p;
	Finalizer *fp;

	// The gc is turned off (via enablegc) until
	// the bootstrap has completed.
	// Also, malloc gets called in the guts
	// of a number of libraries that might be
	// holding locks.  To avoid priority inversion
	// problems, don't bother trying to run gc
	// while holding a lock.  The next mallocgc
	// without a lock will do the gc instead.
	if(!mstats.enablegc || m->locks > 0 /* || runtime_panicking */)
		return;

	if(gcpercent == -2) {	// first time through
		p = runtime_getenv("GOGC");
		if(p == nil || p[0] == '\0')
			gcpercent = 100;
		else if(runtime_strcmp(p, "off") == 0)
			gcpercent = -1;
		else
			gcpercent = runtime_atoi(p);
	}
	if(gcpercent < 0)
		return;

	pthread_mutex_lock(&finqlock);
	pthread_mutex_lock(&gcsema);
	m->locks++;	// disable gc during the mallocs in newproc
	t0 = runtime_nanotime();
	runtime_stoptheworld();
	if(force || mstats.heap_alloc >= mstats.next_gc) {
		__go_cachestats();
		mark();
		sweep();
		__go_stealcache();
		mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100;
	}

	t1 = runtime_nanotime();
	mstats.numgc++;
	mstats.pause_ns += t1 - t0;
	if(mstats.debuggc)
		runtime_printf("pause %llu\n", (unsigned long long)t1-t0);
	pthread_mutex_unlock(&gcsema);
	runtime_starttheworld();

	// finqlock is still held.
	fp = finq;
	if(fp != nil) {
		// kick off or wake up goroutine to run queued finalizers
		if(!finstarted) {
			__go_go(runfinq, nil);
			finstarted = 1;
		}
		else if(fingwait) {
			fingwait = 0;
			pthread_cond_signal(&finqcond);
		}
	}
	m->locks--;
	pthread_mutex_unlock(&finqlock);
}