void runtime·SysMap(void *v, uintptr n, uint64 *stat) { void *p; runtime·xadd64(stat, n); // On 64-bit, we don't actually have v reserved, so tread carefully. if(sizeof(void*) == 8) { p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); if(p == (void*)ENOMEM) runtime·throw("runtime: out of memory"); if(p != v) { runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p); runtime·throw("runtime: address space conflict"); } return; } p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0); if(p == (void*)ENOMEM) runtime·throw("runtime: out of memory"); if(p != v) runtime·throw("runtime: cannot map pages in arena address space"); }
// stackcacherefill/stackcacherelease implement a global cache of stack segments. // The cache is required to prevent unlimited growth of per-thread caches. static void stackcacherefill(void) { StackCacheNode *n; int32 i, pos; runtime·lock(&stackcachemu); n = stackcache; if(n) stackcache = n->next; runtime·unlock(&stackcachemu); if(n == nil) { n = (StackCacheNode*)runtime·SysAlloc(FixedStack*StackCacheBatch); if(n == nil) runtime·throw("out of memory (stackcacherefill)"); runtime·xadd64(&mstats.stacks_sys, FixedStack*StackCacheBatch); for(i = 0; i < StackCacheBatch-1; i++) n->batch[i] = (byte*)n + (i+1)*FixedStack; } pos = m->stackcachepos; for(i = 0; i < StackCacheBatch-1; i++) { m->stackcache[pos] = n->batch[i]; pos = (pos + 1) % StackCacheSize; } m->stackcache[pos] = n; pos = (pos + 1) % StackCacheSize; m->stackcachepos = pos; m->stackcachecnt += StackCacheBatch; }
static void TestAtomic64(void) { uint64 z64, x64; z64 = 42; x64 = 0; PREFETCH(&z64); if(runtime·cas64(&z64, &x64, 1)) runtime·throw("cas64 failed"); if(x64 != 42) runtime·throw("cas64 failed"); if(!runtime·cas64(&z64, &x64, 1)) runtime·throw("cas64 failed"); if(x64 != 42 || z64 != 1) runtime·throw("cas64 failed"); if(runtime·atomicload64(&z64) != 1) runtime·throw("load64 failed"); runtime·atomicstore64(&z64, (1ull<<40)+1); if(runtime·atomicload64(&z64) != (1ull<<40)+1) runtime·throw("store64 failed"); if(runtime·xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2) runtime·throw("xadd64 failed"); if(runtime·atomicload64(&z64) != (2ull<<40)+2) runtime·throw("xadd64 failed"); if(runtime·xchg64(&z64, (3ull<<40)+3) != (2ull<<40)+2) runtime·throw("xchg64 failed"); if(runtime·atomicload64(&z64) != (3ull<<40)+3) runtime·throw("xchg64 failed"); }
void runtime·SysFree(void *v, uintptr n, uint64 *stat) { if(Debug) runtime·printf("SysFree(%p, %p)\n", v, n); runtime·xadd64(stat, -(uint64)n); runtime·munmap(v, n); }
bool disruptorPublish( disruptor* d, char* ptr ) { int64_t claim; sendBuffer* buf; volatile sharedSlot* slot; (void)d; (void)ptr; buf = &d->buffers[ d->id ]; /* increment the claim cursor. */ claim = xadd64( &d->ringbuffer->claimCursor.v, 1 ); /* block until the slot is ready. */ if ( !waitUntilAvailable( d, claim ) ) return false; /* fill out the slot. */ { slot = getSlot( d, claim ); assert( slot ); if ( !slot ) return false; slot->sender = d->id; slot->size = (buf->tail - ptr); slot->offset = (ptr - buf->start); slot->timestamp = rdtsc(); /* handleInfo( d, "slot %lld sender=%lld size=%lld offset=%lld timestamp=%lld", claim, slot->sender, slot->size, slot->offset, slot->timestamp ); */ } /* wait until any other producers have published. */ { int64_t expectedCursor = ( claim - 1 ); while ( d->ringbuffer->publishCursor.v < expectedCursor ) { atomicYield(); } } /* increment the publish cursor. */ d->ringbuffer->publishCursor.v = claim; handleInfo( d, "publish %d", (int)claim ); return true; }
void runtime·SysMap(void *v, uintptr n, uint64 *stat) { void *p; runtime·xadd64(stat, n); p = runtime·stdcall(runtime·VirtualAlloc, 4, v, n, (uintptr)MEM_COMMIT, (uintptr)PAGE_READWRITE); if(p != v) runtime·throw("runtime: cannot map pages in arena address space"); }
void runtime·SysFree(void *v, uintptr n, uint64 *stat) { uintptr r; runtime·xadd64(stat, -(uint64)n); r = (uintptr)runtime·stdcall(runtime·VirtualFree, 3, v, (uintptr)0, (uintptr)MEM_RELEASE); if(r == 0) runtime·throw("runtime: failed to release pages"); }
runtime·SysAlloc(uintptr n, uint64 *stat) { void *v; v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); if(v < (void*)4096) return nil; runtime·xadd64(stat, n); return v; }
void runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat) { void *p; USED(reserved); runtime·xadd64(stat, n); p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0); if(p == (void*)ENOMEM) runtime·throw("runtime: out of memory"); if(p != v) runtime·throw("runtime: cannot map pages in arena address space"); }
runtime·sysAlloc(uintptr n, uint64 *stat) { void *v; v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); if(v < (void*)4096) { if(Debug) runtime·printf("sysAlloc(%p): %p\n", n, v); return nil; } runtime·xadd64(stat, n); if(Debug) runtime·printf("sysAlloc(%p) = %p\n", n, v); return v; }
runtime·SysAlloc(uintptr n, uint64 *stat) { void *p; p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); if(p < (void*)4096) { if(p == (void*)EACCES) { runtime·printf("runtime: mmap: access denied\n"); runtime·printf("if you're running SELinux, enable execmem for this process.\n"); runtime·exit(2); } if(p == (void*)EAGAIN) { runtime·printf("runtime: mmap: too much locked memory (check 'ulimit -l').\n"); runtime·exit(2); } return nil; } runtime·xadd64(stat, n); return p; }
runtime·SysAlloc(uintptr n, uint64 *stat) { runtime·xadd64(stat, n); return runtime·stdcall(runtime·VirtualAlloc, 4, nil, n, (uintptr)(MEM_COMMIT|MEM_RESERVE), (uintptr)PAGE_READWRITE); }
void runtime·SysFree(void *v, uintptr n, uint64 *stat) { runtime·xadd64(stat, -(uint64)n); runtime·munmap(v, n); }
runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc) { byte *sp; G *newg; int32 siz; int64 goid; //printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret); siz = narg + nret; siz = (siz+7) & ~7; // We could instead create a secondary stack frame // and make it look like goexit was on the original but // the call to the actual goroutine function was split. // Not worth it: this is almost always an error. if(siz > StackMin - 1024) runtime·throw("runtime.newproc: function arguments too large for new goroutine"); goid = runtime·xadd64((uint64*)&runtime·sched.goidgen, 1); if(raceenabled) runtime·racegostart(goid, callerpc); schedlock(); if((newg = gfget()) != nil) { if(newg->stackguard - StackGuard != newg->stack0) runtime·throw("invalid stack in newg"); } else { newg = runtime·malg(StackMin); if(runtime·lastg == nil) runtime·allg = newg; else runtime·lastg->alllink = newg; runtime·lastg = newg; } newg->status = Gwaiting; newg->waitreason = "new goroutine"; sp = (byte*)newg->stackbase; sp -= siz; runtime·memmove(sp, argp, narg); if(thechar == '5') { // caller's LR sp -= sizeof(void*); *(void**)sp = nil; } newg->sched.sp = (uintptr)sp; newg->sched.pc = (byte*)runtime·goexit; newg->sched.g = newg; newg->entry = fn; newg->gopc = (uintptr)callerpc; runtime·sched.gcount++; newg->goid = goid; newprocreadylocked(newg); schedunlock(); return newg; //printf(" goid=%d\n", newg->goid); }