Ejemplo n.º 1
0
void
runtime_notetsleep(Note *n, int64 ns)
{
	int64 deadline, now;

	if(ns < 0) {
		runtime_notesleep(n);
		return;
	}

	if(runtime_atomicload((uint32*)&n->key) != 0)
		return;

	if(runtime_m()->profilehz > 0)
		runtime_setprof(false);
	deadline = runtime_nanotime() + ns;
	for(;;) {
		runtime_futexsleep((uint32*)&n->key, 0, ns);
		if(runtime_atomicload((uint32*)&n->key) != 0)
			break;
		now = runtime_nanotime();
		if(now >= deadline)
			break;
		ns = deadline - now;
	}
	if(runtime_m()->profilehz > 0)
		runtime_setprof(true);
}
Ejemplo n.º 2
0
Archivo: mcache.c Proyecto: kraj/gcc
// Gets a span that has a free object in it and assigns it
// to be the cached span for the given sizeclass.  Returns this span.
MSpan*
runtime_MCache_Refill(MCache *c, int32 sizeclass)
{
	MCacheList *l;
	MSpan *s;

	runtime_m()->locks++;
	// Return the current cached span to the central lists.
	s = c->alloc[sizeclass];
	if(s->freelist != nil)
		runtime_throw("refill on a nonempty span");
	if(s != &emptymspan)
		runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass], s);

	// Push any explicitly freed objects to the central lists.
	// Not required, but it seems like a good time to do it.
	l = &c->free[sizeclass];
	if(l->nlist > 0) {
		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
		l->list = nil;
		l->nlist = 0;
	}

	// Get a new cached span from the central lists.
	s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass]);
	if(s == nil)
		runtime_throw("out of memory");
	if(s->freelist == nil) {
		runtime_printf("%d %d\n", s->ref, (int32)((s->npages << PageShift) / s->elemsize));
		runtime_throw("empty span");
	}
	c->alloc[sizeclass] = s;
	runtime_m()->locks--;
	return s;
}
Ejemplo n.º 3
0
// Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache.
MSpan*
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{
	MSpan *s;

	runtime_lock(h);
	mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
	runtime_m()->mcache->local_cachealloc = 0;
	s = MHeap_AllocLocked(h, npage, sizeclass);
	if(s != nil) {
		mstats.heap_inuse += npage<<PageShift;
		if(large) {
			mstats.heap_objects++;
			mstats.heap_alloc += npage<<PageShift;
			// Swept spans are at the end of lists.
			if(s->npages < nelem(h->free))
				runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
			else
				runtime_MSpanList_InsertBack(&h->busylarge, s);
		}
	}
	runtime_unlock(h);
	if(s != nil) {
		if(needzero && s->needzero)
			runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
		s->needzero = 0;
	}
	return s;
}
Ejemplo n.º 4
0
// The GOTRACEBACK environment variable controls the
// behavior of a Go program that is crashing and exiting.
//	GOTRACEBACK=0   suppress all tracebacks
//	GOTRACEBACK=1   default behavior - show tracebacks but exclude runtime frames
//	GOTRACEBACK=2   show tracebacks including runtime frames
//	GOTRACEBACK=crash   show tracebacks including runtime frames, then crash (core dump etc)
int32
runtime_gotraceback(bool *crash)
{
	const byte *p;
	uint32 x;

	if(crash != nil)
		*crash = false;
	if(runtime_m()->traceback != 0)
		return runtime_m()->traceback;
	x = runtime_atomicload(&traceback_cache);
	if(x == ~(uint32)0) {
		p = runtime_getenv("GOTRACEBACK");
		if(p == nil)
			p = (const byte*)"";
		if(p[0] == '\0')
			x = 1<<1;
		else if(runtime_strcmp((const char *)p, "crash") == 0)
			x = (2<<1) | 1;
		else
			x = runtime_atoi(p)<<1;	
		runtime_atomicstore(&traceback_cache, x);
	}
	if(crash != nil)
		*crash = x&1;
	return x>>1;
}
Ejemplo n.º 5
0
void
syscall_cgocallback ()
{
  M *mp;

  mp = runtime_m ();
  if (mp == NULL)
    {
      runtime_needm ();
      mp = runtime_m ();
      mp->dropextram = true;
    }

  runtime_exitsyscall (0);

  if (runtime_m ()->ncgo == 0)
    {
      /* The C call to Go came from a thread not currently running any
	 Go.  In the case of -buildmode=c-archive or c-shared, this
	 call may be coming in before package initialization is
	 complete.  Wait until it is.  */
      chanrecv1 (NULL, runtime_main_init_done, NULL);
    }

  mp = runtime_m ();
  if (mp->needextram)
    {
      mp->needextram = 0;
      runtime_newextram ();
    }
}
Ejemplo n.º 6
0
void
runtime_notesleep(Note *n)
{
	if(runtime_m()->profilehz > 0)
		runtime_setprof(false);
	while(runtime_atomicload((uint32*)&n->key) == 0)
		runtime_futexsleep((uint32*)&n->key, 0, -1);
	if(runtime_m()->profilehz > 0)
		runtime_setprof(true);
}
Ejemplo n.º 7
0
void
runtime_unlock(Lock *l)
{
    uintptr v;
    M *mp;

    if(--runtime_m()->locks < 0)
        runtime_throw("runtime_unlock: lock count");

    for(;;) {
        v = (uintptr)runtime_atomicloadp((void**)&l->key);
        if(v == LOCKED) {
            if(runtime_casp((void**)&l->key, (void*)LOCKED, nil))
                break;
        } else {
            // Other M's are waiting for the lock.
            // Dequeue an M.
            mp = (void*)(v&~LOCKED);
            if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) {
                // Dequeued an M.  Wake it.
                runtime_semawakeup(mp);
                break;
            }
        }
    }
}
Ejemplo n.º 8
0
void
runtime_panicstring(const char *s)
{
	Eface err;

	if(runtime_m()->mallocing) {
		runtime_printf("panic: %s\n", s);
		runtime_throw("panic during malloc");
	}
	if(runtime_m()->gcing) {
		runtime_printf("panic: %s\n", s);
		runtime_throw("panic during gc");
	}
	runtime_newErrorCString(s, &err);
	runtime_panic(err);
}
Ejemplo n.º 9
0
static void
runtime_mcall(void (*pfn)(G*))
{
	M *mp;
	G *gp;
#ifndef USING_SPLIT_STACK
	int i;
#endif

	// Ensure that all registers are on the stack for the garbage
	// collector.
	__builtin_unwind_init();

	mp = m;
	gp = g;
	if(gp == mp->g0)
		runtime_throw("runtime: mcall called on m->g0 stack");

	if(gp != nil) {

#ifdef USING_SPLIT_STACK
		__splitstack_getcontext(&g->stack_context[0]);
#else
		gp->gcnext_sp = &i;
#endif
		gp->fromgogo = false;
		getcontext(&gp->context);

		// When we return from getcontext, we may be running
		// in a new thread.  That means that m and g may have
		// changed.  They are global variables so we will
		// reload them, but the addresses of m and g may be
		// cached in our local stack frame, and those
		// addresses may be wrong.  Call functions to reload
		// the values for this thread.
		mp = runtime_m();
		gp = runtime_g();

		if(gp->traceback != nil)
			gtraceback(gp);
	}
	if (gp == nil || !gp->fromgogo) {
#ifdef USING_SPLIT_STACK
		__splitstack_setcontext(&mp->g0->stack_context[0]);
#endif
		mp->g0->entry = (byte*)pfn;
		mp->g0->param = gp;

		// It's OK to set g directly here because this case
		// can not occur if we got here via a setcontext to
		// the getcontext call just above.
		g = mp->g0;

		fixcontext(&mp->g0->context);
		setcontext(&mp->g0->context);
		runtime_throw("runtime: mcall function returned");
	}
}
Ejemplo n.º 10
0
void
runtime_lock(Lock *l)
{
    M *m;
    uintptr v;
    uint32 i, spin;

    m = runtime_m();
    if(m->locks++ < 0)
        runtime_throw("runtime_lock: lock count");

    // Speculative grab for lock.
    if(runtime_casp((void**)&l->key, nil, (void*)LOCKED))
        return;

    if(m->waitsema == 0)
        m->waitsema = runtime_semacreate();

    // On uniprocessor's, no point spinning.
    // On multiprocessors, spin for ACTIVE_SPIN attempts.
    spin = 0;
    if(runtime_ncpu > 1)
        spin = ACTIVE_SPIN;

    for(i=0;; i++) {
        v = (uintptr)runtime_atomicloadp((void**)&l->key);
        if((v&LOCKED) == 0) {
unlocked:
            if(runtime_casp((void**)&l->key, (void*)v, (void*)(v|LOCKED)))
                return;
            i = 0;
        }
        if(i<spin)
            runtime_procyield(ACTIVE_SPIN_CNT);
        else if(i<spin+PASSIVE_SPIN)
            runtime_osyield();
        else {
            // Someone else has it.
            // l->waitm points to a linked list of M's waiting
            // for this lock, chained through m->nextwaitm.
            // Queue this M.
            for(;;) {
                m->nextwaitm = (void*)(v&~LOCKED);
                if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED)))
                    break;
                v = (uintptr)runtime_atomicloadp((void**)&l->key);
                if((v&LOCKED) == 0)
                    goto unlocked;
            }
            if(v&LOCKED) {
                // Queued.  Wait.
                runtime_semasleep(-1);
                i = 0;
            }
        }
    }
}
Ejemplo n.º 11
0
Archivo: panic.c Proyecto: 0day-ci/gcc
void
runtime_dopanic(int32 unused __attribute__ ((unused)))
{
	G *g;
	static bool didothers;
	bool crash;
	int32 t;

	g = runtime_g();
	if(g->sig != 0)
		runtime_printf("[signal %x code=%p addr=%p]\n",
			       g->sig, (void*)g->sigcode0, (void*)g->sigcode1);

	if((t = runtime_gotraceback(&crash)) > 0){
		if(g != runtime_m()->g0) {
			runtime_printf("\n");
			runtime_goroutineheader(g);
			runtime_traceback();
			runtime_printcreatedby(g);
		} else if(t >= 2 || runtime_m()->throwing > 0) {
			runtime_printf("\nruntime stack:\n");
			runtime_traceback();
		}
		if(!didothers) {
			didothers = true;
			runtime_tracebackothers(g);
		}
	}
	runtime_unlock(&paniclk);
	if(runtime_xadd(&runtime_panicking, -1) != 0) {
		// Some other m is panicking too.
		// Let it print what it needs to print.
		// Wait forever without chewing up cpu.
		// It will exit when it's done.
		static Lock deadlock;
		runtime_lock(&deadlock);
		runtime_lock(&deadlock);
	}
	
	if(crash)
		runtime_crash();

	runtime_exit(2);
}
Ejemplo n.º 12
0
int
main (int argc, char **argv)
{
  runtime_initsig (0);
  runtime_args (argc, (byte **) argv);
  runtime_osinit ();
  runtime_schedinit ();
  __go_go (mainstart, NULL);
  runtime_mstart (runtime_m ());
  abort ();
}
Ejemplo n.º 13
0
bool
runtime_showframe(String s, bool current)
{
	static int32 traceback = -1;

	if(current && runtime_m()->throwing > 0)
		return 1;
	if(traceback < 0)
		traceback = runtime_gotraceback(nil);
	return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0);
}
Ejemplo n.º 14
0
Archivo: go-cgo.c Proyecto: Lao16/gcc
void
syscall_cgocall ()
{
  M* m;
  G* g;

  m = runtime_m ();
  ++m->ncgocall;
  g = runtime_g ();
  ++g->ncgo;
  runtime_entersyscall ();
}
Ejemplo n.º 15
0
// Possible lock states are MUTEX_UNLOCKED, MUTEX_LOCKED and MUTEX_SLEEPING.
// MUTEX_SLEEPING means that there is presumably at least one sleeping thread.
// Note that there can be spinning threads during all states - they do not
// affect mutex's state.
void
runtime_lock(Lock *l)
{
	uint32 i, v, wait, spin;

	if(runtime_m()->locks++ < 0)
		runtime_throw("runtime_lock: lock count");

	// Speculative grab for lock.
	v = runtime_xchg(&l->key, MUTEX_LOCKED);
	if(v == MUTEX_UNLOCKED)
		return;

	// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
	// depending on whether there is a thread sleeping
	// on this mutex.  If we ever change l->key from
	// MUTEX_SLEEPING to some other value, we must be
	// careful to change it back to MUTEX_SLEEPING before
	// returning, to ensure that the sleeping thread gets
	// its wakeup call.
	wait = v;

	// On uniprocessor's, no point spinning.
	// On multiprocessors, spin for ACTIVE_SPIN attempts.
	spin = 0;
	if(runtime_ncpu > 1)
		spin = ACTIVE_SPIN;

	for(;;) {
		// Try for lock, spinning.
		for(i = 0; i < spin; i++) {
			while(l->key == MUTEX_UNLOCKED)
				if(runtime_cas(&l->key, MUTEX_UNLOCKED, wait))
					return;
			runtime_procyield(ACTIVE_SPIN_CNT);
		}

		// Try for lock, rescheduling.
		for(i=0; i < PASSIVE_SPIN; i++) {
			while(l->key == MUTEX_UNLOCKED)
				if(runtime_cas(&l->key, MUTEX_UNLOCKED, wait))
					return;
			runtime_osyield();
		}

		// Sleep.
		v = runtime_xchg(&l->key, MUTEX_SLEEPING);
		if(v == MUTEX_UNLOCKED)
			return;
		wait = MUTEX_SLEEPING;
		runtime_futexsleep(&l->key, MUTEX_SLEEPING, -1);
	}
}
Ejemplo n.º 16
0
// Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache.
MSpan*
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed)
{
	MSpan *s;

	runtime_lock(h);
	mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
	runtime_m()->mcache->local_cachealloc = 0;
	s = MHeap_AllocLocked(h, npage, sizeclass);
	if(s != nil) {
		mstats.heap_inuse += npage<<PageShift;
		if(acct) {
			mstats.heap_objects++;
			mstats.heap_alloc += npage<<PageShift;
		}
	}
	runtime_unlock(h);
	if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed)
		runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
	return s;
}
Ejemplo n.º 17
0
void
syscall_cgocallbackdone ()
{
  M *mp;

  runtime_entersyscall (0);
  mp = runtime_m ();
  if (mp->dropextram && mp->ncgo == 0)
    {
      mp->dropextram = false;
      runtime_dropm ();
    }
}
Ejemplo n.º 18
0
Archivo: panic.c Proyecto: 0day-ci/gcc
// Free the given defer.
// The defer cannot be used after this call.
void
runtime_freedefer(Defer *d)
{
	P *p;

	if(d->__special)
		return;
	p = runtime_m()->p;
	d->__next = p->deferpool;
	p->deferpool = d;
	// No need to wipe out pointers in argp/pc/fn/args,
	// because we empty the pool before GC.
}
Ejemplo n.º 19
0
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
void
runtime_minit(void)
{
	M* m;
	sigset_t sigs;

	// Initialize signal handling.
	m = runtime_m();
	runtime_signalstack(m->gsignalstack, m->gsignalstacksize);
	if (sigemptyset(&sigs) != 0)
		runtime_throw("sigemptyset");
	pthread_sigmask(SIG_SETMASK, &sigs, nil);
}
Ejemplo n.º 20
0
uint32
runtime_fastrand1(void)
{
	M *m;
	uint32 x;

	m = runtime_m();
	x = m->fastrand;
	x += x;
	if(x & 0x80000000L)
		x ^= 0x88888eefUL;
	m->fastrand = x;
	return x;
}
Ejemplo n.º 21
0
void
runtime_unlock(Lock *l)
{
	uint32 v;

	if(--runtime_m()->locks < 0)
		runtime_throw("runtime_unlock: lock count");

	v = runtime_xchg(&l->key, MUTEX_UNLOCKED);
	if(v == MUTEX_UNLOCKED)
		runtime_throw("unlock of unlocked lock");
	if(v == MUTEX_SLEEPING)
		runtime_futexwakeup(&l->key, 1);
}
Ejemplo n.º 22
0
Archivo: panic.c Proyecto: 0day-ci/gcc
void
runtime_throw(const char *s)
{
	M *mp;

	mp = runtime_m();
	if(mp->throwing == 0)
		mp->throwing = 1;
	runtime_startpanic();
	runtime_printf("fatal error: %s\n", s);
	runtime_dopanic(0);
	*(int32*)0 = 0;	// not reached
	runtime_exit(1);	// even more not reached
}
Ejemplo n.º 23
0
Archivo: go-cgo.c Proyecto: axw/llgo
void
syscall_cgocall ()
{
  M* m;
  G* g;

  if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0))
    runtime_newextram ();

  m = runtime_m ();
  ++m->ncgocall;
  g = runtime_g ();
  ++g->ncgo;
  runtime_entersyscall ();
}
Ejemplo n.º 24
0
void
syscall_cgocall ()
{
  M* m;

  if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0))
    runtime_newextram ();

  runtime_lockOSThread();

  m = runtime_m ();
  ++m->ncgocall;
  ++m->ncgo;
  runtime_entersyscall (0);
}
Ejemplo n.º 25
0
void
runtime_notesleep(Note *n)
{
	M *m;

	m = runtime_m();
	if(m->waitsema == 0)
		m->waitsema = runtime_semacreate();
	if(!runtime_casp(&n->waitm, nil, m)) {  // must be LOCKED (got wakeup)
		if(n->waitm != (void*)LOCKED)
			runtime_throw("notesleep - waitm out of sync");
		return;
	}
	// Queued.  Sleep.
	runtime_semasleep(-1);
}
Ejemplo n.º 26
0
void
runtime_resetcpuprofiler(int32 hz)
{
	struct itimerval it;

	runtime_memclr((byte*)&it, sizeof it);
	if(hz == 0) {
		runtime_setitimer(ITIMER_PROF, &it, nil);
	} else {
		it.it_interval.tv_sec = 0;
		it.it_interval.tv_usec = 1000000 / hz;
		it.it_value = it.it_interval;
		runtime_setitimer(ITIMER_PROF, &it, nil);
	}
	runtime_m()->profilehz = hz;
}
Ejemplo n.º 27
0
Archivo: panic.c Proyecto: 0day-ci/gcc
// Allocate a Defer, usually using per-P pool.
// Each defer must be released with freedefer.
Defer*
runtime_newdefer()
{
	Defer *d;
	P *p;

	d = nil;
	p = runtime_m()->p;
	d = p->deferpool;
	if(d)
		p->deferpool = d->__next;
	if(d == nil) {
		// deferpool is empty
		d = runtime_malloc(sizeof(Defer));
	}
	return d;
}
Ejemplo n.º 28
0
void *
alloc_saved (size_t n)
{
  void *ret;
  M *m;
  CgoMal *c;

  ret = __go_alloc (n);

  m = runtime_m ();
  c = (CgoMal *) __go_alloc (sizeof (CgoMal));
  c->next = m->cgomal;
  c->alloc = ret;
  m->cgomal = c;

  return ret;
}
Ejemplo n.º 29
0
// Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache.
MSpan*
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
{
	MSpan *s;

	runtime_lock(h);
	runtime_purgecachedstats(runtime_m());
	s = MHeap_AllocLocked(h, npage, sizeclass);
	if(s != nil) {
		mstats.heap_inuse += npage<<PageShift;
		if(acct) {
			mstats.heap_objects++;
			mstats.heap_alloc += npage<<PageShift;
		}
	}
	runtime_unlock(h);
	return s;
}
Ejemplo n.º 30
0
static void
sig_panic_leadin (int sig)
{
  int i;
  sigset_t clear;

  if (runtime_m ()->mallocing)
    {
      runtime_printf ("caught signal while mallocing: %d\n", sig);
      runtime_throw ("caught signal while mallocing");
    }

  /* The signal handler blocked signals; unblock them.  */
  i = sigfillset (&clear);
  __go_assert (i == 0);
  i = sigprocmask (SIG_UNBLOCK, &clear, NULL);
  __go_assert (i == 0);
}