Beispiel #1
0
int32
runtime·semasleep(int64 ns)
{
	Timespec ts;
	int64 secs;

	// spin-mutex lock
	while(runtime·xchg(&m->waitsemalock, 1))
		runtime·osyield();

	for(;;) {
		// lock held
		if(m->waitsemacount == 0) {
			// sleep until semaphore != 0 or timeout.
			// thrsleep unlocks m->waitsemalock.
			if(ns < 0)
				runtime·thrsleep(&m->waitsemacount, 0, nil, &m->waitsemalock, nil);
			else {
				ns += runtime·nanotime();
				secs = ns/1000000000LL;
				// Avoid overflow
				if(secs >= 1LL<<31)
					secs = (1LL<<31) - 1;
				ts.tv_sec = secs;
				ts.tv_nsec = ns%1000000000LL;
				runtime·thrsleep(&m->waitsemacount, CLOCK_REALTIME, &ts, &m->waitsemalock, nil);
			}
			// reacquire lock
			while(runtime·xchg(&m->waitsemalock, 1))
				runtime·osyield();
		}

		// lock held (again)
		if(m->waitsemacount != 0) {
			// semaphore is available.
			m->waitsemacount--;
			// spin-mutex unlock
			runtime·atomicstore(&m->waitsemalock, 0);
			return 0;  // semaphore acquired
		}

		// semaphore not available.
		// if there is a timeout, stop now.
		// otherwise keep trying.
		if(ns >= 0)
			break;
	}

	// lock held but giving up
	// spin-mutex unlock
	runtime·atomicstore(&m->waitsemalock, 0);
	return -1;
}
Beispiel #2
0
int32
runtime·semasleep(int64 ns)
{
	Timespec ts;

	// spin-mutex lock
	while(runtime·xchg(&m->waitsemalock, 1))
		runtime·osyield();

	for(;;) {
		// lock held
		if(m->waitsemacount == 0) {
			// sleep until semaphore != 0 or timeout.
			// thrsleep unlocks m->waitsemalock.
			if(ns < 0)
				runtime·thrsleep(&m->waitsemacount, 0, nil, &m->waitsemalock, nil);
			else {
				ns += runtime·nanotime();
				// NOTE: tv_nsec is int64 on amd64, so this assumes a little-endian system.
				ts.tv_nsec = 0;
				ts.tv_sec = runtime·timediv(ns, 1000000000, (int32*)&ts.tv_nsec);
				runtime·thrsleep(&m->waitsemacount, CLOCK_MONOTONIC, &ts, &m->waitsemalock, nil);
			}
			// reacquire lock
			while(runtime·xchg(&m->waitsemalock, 1))
				runtime·osyield();
		}

		// lock held (again)
		if(m->waitsemacount != 0) {
			// semaphore is available.
			m->waitsemacount--;
			// spin-mutex unlock
			runtime·atomicstore(&m->waitsemalock, 0);
			return 0;  // semaphore acquired
		}

		// semaphore not available.
		// if there is a timeout, stop now.
		// otherwise keep trying.
		if(ns >= 0)
			break;
	}

	// lock held but giving up
	// spin-mutex unlock
	runtime·atomicstore(&m->waitsemalock, 0);
	return -1;
}
Beispiel #3
0
// Free n objects from a span s back into the central free list c.
// Called during sweep.
// Returns true if the span was returned to heap.  Sets sweepgen to
// the latest generation.
bool
runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
{
	if(s->incache)
		runtime·throw("freespan into cached span");
	runtime·lock(c);

	// Move to nonempty if necessary.
	if(s->freelist == nil) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}

	// Add the objects back to s's free list.
	end->next = s->freelist;
	s->freelist = start;
	s->ref -= n;
	c->nfree += n;
	
	// delay updating sweepgen until here.  This is the signal that
	// the span may be used in an MCache, so it must come after the
	// linked list operations above (actually, just after the
	// lock of c above.)
	runtime·atomicstore(&s->sweepgen, runtime·mheap.sweepgen);

	if(s->ref != 0) {
		runtime·unlock(c);
		return false;
	}

	// s is completely freed, return it to the heap.
	MCentral_ReturnToHeap(c, s); // unlocks c
	return true;
}
Beispiel #4
0
void
runtime·resetcpuprofiler(int32 hz)
{
	static Lock lock;
	void *timer, *thread;
	int32 ms;
	int64 due;

	runtime·lock(&lock);
	if(profiletimer == nil) {
		timer = runtime·stdcall(runtime·CreateWaitableTimer, 3, nil, nil, nil);
		runtime·atomicstorep(&profiletimer, timer);
		thread = runtime·stdcall(runtime·CreateThread, 6,
			nil, nil, runtime·profileloop, nil, nil, nil);
		runtime·stdcall(runtime·CloseHandle, 1, thread);
	}
	runtime·unlock(&lock);

	ms = 0;
	due = 1LL<<63;
	if(hz > 0) {
		ms = 1000 / hz;
		if(ms == 0)
			ms = 1;
		due = ms * -10000;
	}
	runtime·stdcall(runtime·SetWaitableTimer, 6,
		profiletimer, &due, (uintptr)ms, nil, nil, nil);
	runtime·atomicstore((uint32*)&m->profilehz, hz);
}
Beispiel #5
0
runtime·findfunc(uintptr addr)
{
	Func *f;
	int32 nf, n;

	// Use atomic double-checked locking,
	// because when called from pprof signal
	// handler, findfunc must run without
	// grabbing any locks.
	// (Before enabling the signal handler,
	// SetCPUProfileRate calls findfunc to trigger
	// the initialization outside the handler.)
	// Avoid deadlock on fault during malloc
	// by not calling buildfuncs if we're already in malloc.
	if(!m->mallocing && !m->gcing) {
		if(runtime·atomicload(&funcinit) == 0) {
			runtime·lock(&funclock);
			if(funcinit == 0) {
				buildfuncs();
				runtime·atomicstore(&funcinit, 1);
			}
			runtime·unlock(&funclock);
		}
	}

	if(nfunc == 0)
		return nil;
	if(addr < func[0].entry || addr >= func[nfunc].entry)
		return nil;

	// binary search to find func with entry <= addr.
	f = func;
	nf = nfunc;
	while(nf > 0) {
		n = nf/2;
		if(f[n].entry <= addr && addr < f[n+1].entry)
			return &f[n];
		else if(addr < f[n].entry)
			nf = n;
		else {
			f += n+1;
			nf -= n+1;
		}
	}

	// can't get here -- we already checked above
	// that the address was in the table bounds.
	// this can only happen if the table isn't sorted
	// by address or if the binary search above is buggy.
	runtime·prints("findfunc unreachable\n");
	return nil;
}
Beispiel #6
0
void
runtime·semawakeup(M *mp)
{
	uint32 ret;

	// spin-mutex lock
	while(runtime·xchg(&mp->waitsemalock, 1))
		runtime·osyield();
	mp->waitsemacount++;
	ret = runtime·thrwakeup(&mp->waitsemacount, 1);
	if(ret != 0 && ret != ESRCH)
		runtime·printf("thrwakeup addr=%p sem=%d ret=%d\n", &mp->waitsemacount, mp->waitsemacount, ret);
	// spin-mutex unlock
	runtime·atomicstore(&mp->waitsemalock, 0);
}
Beispiel #7
0
void
runtime·semawakeup(M *mp)
{
	uint32 ret;

	// spin-mutex lock
	while(runtime·xchg(&mp->waitsemalock, 1))
		runtime·osyield();
	mp->waitsemacount++;
	// TODO(jsing) - potential deadlock, see semasleep() for details.
	// Confirm that LWP is parked before unparking...
	ret = runtime·lwp_unpark(mp->procid, &mp->waitsemacount);
	if(ret != 0 && ret != ESRCH)
		runtime·printf("thrwakeup addr=%p sem=%d ret=%d\n", &mp->waitsemacount, mp->waitsemacount, ret);
	// spin-mutex unlock
	runtime·atomicstore(&mp->waitsemalock, 0);
}
Beispiel #8
0
// Allocate a new span of npage pages from the heap for GC'd memory
// and record its size class in the HeapMap and HeapMapCache.
static MSpan*
mheap_alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large)
{
	MSpan *s;

	if(g != g->m->g0)
		runtime·throw("mheap_alloc not on M stack");
	runtime·lock(&h->lock);

	// To prevent excessive heap growth, before allocating n pages
	// we need to sweep and reclaim at least n pages.
	if(!h->sweepdone)
		MHeap_Reclaim(h, npage);

	// transfer stats from cache to global
	mstats.heap_alloc += g->m->mcache->local_cachealloc;
	g->m->mcache->local_cachealloc = 0;

	s = MHeap_AllocSpanLocked(h, npage);
	if(s != nil) {
		// Record span info, because gc needs to be
		// able to map interior pointer to containing span.
		runtime·atomicstore(&s->sweepgen, h->sweepgen);
		s->state = MSpanInUse;
		s->freelist = nil;
		s->ref = 0;
		s->sizeclass = sizeclass;
		s->elemsize = (sizeclass==0 ? s->npages<<PageShift : runtime·class_to_size[sizeclass]);

		// update stats, sweep lists
		if(large) {
			mstats.heap_objects++;
			mstats.heap_alloc += npage<<PageShift;
			// Swept spans are at the end of lists.
			if(s->npages < nelem(h->free))
				runtime·MSpanList_InsertBack(&h->busy[s->npages], s);
			else
				runtime·MSpanList_InsertBack(&h->busylarge, s);
		}
	}
	runtime·unlock(&h->lock);
	return s;
}
Beispiel #9
0
int32
runtime·semasleep(int64 ns)
{
	Timespec ts;

	// spin-mutex lock
	while(runtime·xchg(&m->waitsemalock, 1))
		runtime·osyield();

	for(;;) {
		// lock held
		if(m->waitsemacount == 0) {
			// sleep until semaphore != 0 or timeout.
			// thrsleep unlocks m->waitsemalock.
			if(ns < 0) {
				// TODO(jsing) - potential deadlock!
				//
				// There is a potential deadlock here since we
				// have to release the waitsemalock mutex
				// before we call lwp_park() to suspend the
				// thread. This allows another thread to
				// release the lock and call lwp_unpark()
				// before the thread is actually suspended.
				// If this occurs the current thread will end
				// up sleeping indefinitely. Unfortunately
				// the NetBSD kernel does not appear to provide
				// a mechanism for unlocking the userspace
				// mutex once the thread is actually parked.
				runtime·atomicstore(&m->waitsemalock, 0);
				runtime·lwp_park(nil, 0, &m->waitsemacount, nil);
			} else {
				ns += runtime·nanotime();
				ts.tv_sec = ns/1000000000LL;
				ts.tv_nsec = ns%1000000000LL;
				// TODO(jsing) - potential deadlock!
				// See above for details.
				runtime·atomicstore(&m->waitsemalock, 0);
				runtime·lwp_park(&ts, 0, &m->waitsemacount, nil);
			}
			// reacquire lock
			while(runtime·xchg(&m->waitsemalock, 1))
				runtime·osyield();
		}

		// lock held (again)
		if(m->waitsemacount != 0) {
			// semaphore is available.
			m->waitsemacount--;
			// spin-mutex unlock
			runtime·atomicstore(&m->waitsemalock, 0);
			return 0;  // semaphore acquired
		}

		// semaphore not available.
		// if there is a timeout, stop now.
		// otherwise keep trying.
		if(ns >= 0)
			break;
	}

	// lock held but giving up
	// spin-mutex unlock
	runtime·atomicstore(&m->waitsemalock, 0);
	return -1;
}