void runtime·unlock(Lock *l) { uintptr v; M *mp; if(--m->locks < 0) runtime·throw("runtime·unlock: lock count"); for(;;) { v = (uintptr)runtime·atomicloadp((void**)&l->key); if(v == LOCKED) { if(runtime·casp((void**)&l->key, (void*)LOCKED, nil)) break; } else { // Other M's are waiting for the lock. // Dequeue an M. mp = (void*)(v&~LOCKED); if(runtime·casp((void**)&l->key, (void*)v, mp->nextwaitm)) { // Dequeued an M. Wake it. runtime·semawakeup(mp); break; } } } }
void runtime·lock(Lock *l) { uintptr v; uint32 i, spin; if(m->locks++ < 0) runtime·throw("runtime·lock: lock count"); // Speculative grab for lock. if(runtime·casp((void**)&l->key, nil, (void*)LOCKED)) return; if(m->waitsema == 0) m->waitsema = runtime·semacreate(); // On uniprocessor's, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin = 0; if(runtime·ncpu > 1) spin = ACTIVE_SPIN; for(i=0;; i++) { v = (uintptr)runtime·atomicloadp((void**)&l->key); if((v&LOCKED) == 0) { unlocked: if(runtime·casp((void**)&l->key, (void*)v, (void*)(v|LOCKED))) return; i = 0; } if(i<spin) runtime·procyield(ACTIVE_SPIN_CNT); else if(i<spin+PASSIVE_SPIN) runtime·osyield(); else { // Someone else has it. // l->waitm points to a linked list of M's waiting // for this lock, chained through m->nextwaitm. // Queue this M. for(;;) { m->nextwaitm = (void*)(v&~LOCKED); if(runtime·casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED))) break; v = (uintptr)runtime·atomicloadp((void**)&l->key); if((v&LOCKED) == 0) goto unlocked; } if(v&LOCKED) { // Queued. Wait. runtime·semasleep(-1); i = 0; } } } }
bool interlocked_queue_pop(interlocked_queue_t* q, void** output) { interlocked_queue_node_t* h = nullptr; interlocked_queue_node_t* t = nullptr; interlocked_queue_node_t* next = nullptr; void* data = nullptr; void* volatile* hazards[2] = { nullptr }; void* key = allocate_hazard_pointers(2, hazards); if(!hazards[0] || !hazards[1]) { RaiseException(ERROR_NOT_ENOUGH_MEMORY, 0, 0, nullptr); return false; } for(;;) { h = q->head; *hazards[0] = h; MemoryBarrier(); if(q->head != h) { continue; } t = q->tail; next = h->next; *hazards[1] = next; MemoryBarrier(); if(q->head != h) { continue; } if(next == nullptr) { *hazards[0] = nullptr; *hazards[1] = nullptr; if(output) { *output = nullptr; } return false; } if(h == t) { casp((void* volatile*)&q->tail, t, next); continue; } data = next->data; if(casp((void* volatile*)&q->head, h, next)) { break; } } h->next = nullptr; smr_retire(h); if(output) { *output = data; } deallocate_hazard_pointers(key); return data != nullptr; }
static bool netpollblock ( PollDesc *pd , int32 mode , bool waitio ) { G **gpp , *old; #line 328 "C:\Users\gopher\AppData\Local\Temp\1\makerelease745458658\go\src\pkg\runtime\netpoll.goc" gpp = &pd->rg; if ( mode == 'w' ) gpp = &pd->wg; #line 333 "C:\Users\gopher\AppData\Local\Temp\1\makerelease745458658\go\src\pkg\runtime\netpoll.goc" for ( ;; ) { old = *gpp; if ( old == READY ) { *gpp = nil; return true; } if ( old != nil ) runtime·throw ( "netpollblock: double wait" ) ; if ( runtime·casp ( gpp , nil , WAIT ) ) break; } #line 348 "C:\Users\gopher\AppData\Local\Temp\1\makerelease745458658\go\src\pkg\runtime\netpoll.goc" if ( waitio || checkerr ( pd , mode ) == 0 ) runtime·park ( ( bool ( * ) ( G* , void* ) ) blockcommit , gpp , "IO wait" ) ; #line 351 "C:\Users\gopher\AppData\Local\Temp\1\makerelease745458658\go\src\pkg\runtime\netpoll.goc" old = runtime·xchgp ( gpp , nil ) ; if ( old > WAIT ) runtime·throw ( "netpollblock: corrupted state" ) ; return old == READY; }
static bool netpollblock ( PollDesc *pd , int32 mode , bool waitio ) { G **gpp , *old; #line 328 "/home/14/ren/source/golang/go/src/pkg/runtime/netpoll.goc" gpp = &pd->rg; if ( mode == 'w' ) gpp = &pd->wg; #line 333 "/home/14/ren/source/golang/go/src/pkg/runtime/netpoll.goc" for ( ;; ) { old = *gpp; if ( old == READY ) { *gpp = nil; return true; } if ( old != nil ) runtime·throw ( "netpollblock: double wait" ) ; if ( runtime·casp ( gpp , nil , WAIT ) ) break; } #line 348 "/home/14/ren/source/golang/go/src/pkg/runtime/netpoll.goc" if ( waitio || checkerr ( pd , mode ) == 0 ) runtime·park ( ( bool ( * ) ( G* , void* ) ) blockcommit , gpp , "IO wait" ) ; #line 351 "/home/14/ren/source/golang/go/src/pkg/runtime/netpoll.goc" old = runtime·xchgp ( gpp , nil ) ; if ( old > WAIT ) runtime·throw ( "netpollblock: corrupted state" ) ; return old == READY; }
/* allocate a BCHandle for bco_array[which] */ static tmpi_BCHandle *tmpi_bch_alloc(tmpi_BcastObj *bco, int which) { register tmpi_BCHandle **pfree; tmpi_BCHandle *ret; register tmpi_BCHandle *localfree; if (bco->group_size<2) return NULL; /* a group of one proc does not need handle */ pfree=&(bco->bco_array[which]->free); ret=*pfree; while (ret) { localfree=ret->next; if (casp((void **)pfree, (void *)ret, (void *)localfree)) break; ret=*pfree; } if (!ret) { /* no more cached handles */ /* the following should never happen */ int i; ret=localfree=(tmpi_BCHandle *)calloc(bco->group_size, sizeof(tmpi_BCHandle)); if (!localfree) return NULL; localfree++; /* link the chunk of handles */ for (i=0; i<bco->group_size-2; i++) localfree[i].next=&(localfree[i+1]); /* now localfree[i] is the last one */ do { localfree[i].next=*pfree; } while (!casp((void **)pfree, (void *)localfree[i].next, (void *)localfree)); } thr_mtx_init(&ret->lock); thr_cnd_init(&ret->wait); return ret; }
// Thread-safe allocation of an event. static void initevent(void **pevent) { void *event; event = runtime·stdcall(runtime·CreateEvent, 4, (uintptr)0, (uintptr)0, (uintptr)0, (uintptr)0); if(!runtime·casp(pevent, 0, event)) { // Someone else filled it in. Use theirs. runtime·stdcall(runtime·CloseHandle, 1, event); } }
// Thread-safe allocation of an event. static void initevent(void **pevent) { void *event; event = stdcall(CreateEvent, 0, 0, 0, 0); if(!casp(pevent, 0, event)) { // Someone else filled it in. Use theirs. stdcall(CloseHandle, event); } }
void runtime·atomicstorep(void* volatile* addr, void* v) { void *old; for(;;) { old = *addr; if(runtime·casp(addr, old, v)) return; } }
void interlocked_queue_push(interlocked_queue_t* q, void* data) { interlocked_queue_node_t* node = new_interlocked_queue_node(); interlocked_queue_node_t* t = nullptr; interlocked_queue_node_t* next = nullptr; void* volatile* hazards[1] = { nullptr }; void* key = allocate_hazard_pointers(1, hazards); if(!hazards[0]) { RaiseException(ERROR_NOT_ENOUGH_MEMORY, 0, 0, nullptr); return; } node->data = data; for(;;) { t = q->tail; *hazards[0] = t; MemoryBarrier(); if(q->tail != t) { continue; } next = t->next; if(q->tail != t) { continue; } if(next != nullptr) { casp((void* volatile*)&q->tail, t, next); continue; } if(casp((void* volatile*)&t->next, nullptr, node)) { break; } } casp((void* volatile*)&q->tail, t, node); deallocate_hazard_pointers(key); }
/* a lock free deallocator */ static void tmpi_bch_free(tmpi_BcastObj *bco, int which, tmpi_BCHandle *handle) { register tmpi_BCHandle **pfree=&(bco->bco_array[which]->free); thr_mtx_destroy(&handle->lock); thr_cnd_destroy(&handle->wait); do { handle->next=*pfree; } while (!casp((void **)pfree, (void *)handle->next, (void *)handle)); return; }
void runtime·notesleep(Note *n) { if(m->waitsema == 0) m->waitsema = runtime·semacreate(); if(!runtime·casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup) if(n->key != LOCKED) runtime·throw("notesleep - waitm out of sync"); return; } // Queued. Sleep. if(m->profilehz > 0) runtime·setprof(false); runtime·semasleep(-1); if(m->profilehz > 0) runtime·setprof(true); }
static String gostringsize ( intgo l ) { String s; uintptr ms; #line 47 "/home/14/ren/source/golang/go/src/pkg/runtime/string.goc" if ( l == 0 ) return runtime·emptystring; s.str = runtime·mallocgc ( l , 0 , FlagNoScan|FlagNoZero ) ; s.len = l; for ( ;; ) { ms = runtime·maxstring; if ( ( uintptr ) l <= ms || runtime·casp ( ( void** ) &runtime·maxstring , ( void* ) ms , ( void* ) l ) ) break; } return s; }
static String gostringsize ( intgo l ) { String s; uintptr ms; #line 47 "/home/pi/go_build/go/src/pkg/runtime/string.goc" if ( l == 0 ) return runtime·emptystring; #line 50 "/home/pi/go_build/go/src/pkg/runtime/string.goc" s.str = runtime·mallocgc ( l+1 , 0 , FlagNoScan|FlagNoZero ) ; s.len = l; s.str[l] = 0; for ( ;; ) { ms = runtime·maxstring; if ( ( uintptr ) l <= ms || runtime·casp ( ( void** ) &runtime·maxstring , ( void* ) ms , ( void* ) l ) ) break; } return s; }
void runtime·notewakeup(Note *n) { M *mp; do mp = runtime·atomicloadp((void**)&n->key); while(!runtime·casp((void**)&n->key, mp, (void*)LOCKED)); // Successfully set waitm to LOCKED. // What was it before? if(mp == nil) { // Nothing was waiting. Done. } else if(mp == (M*)LOCKED) { // Two notewakeups! Not allowed. runtime·throw("notewakeup - double wakeup"); } else { // Must be the waiting m. Wake it up. runtime·semawakeup(mp); } }
void runtime·notetsleep(Note *n, int64 ns) { M *mp; int64 deadline, now; if(ns < 0) { runtime·notesleep(n); return; } if(m->waitsema == 0) m->waitsema = runtime·semacreate(); // Register for wakeup on n->waitm. if(!runtime·casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup already) if(n->key != LOCKED) runtime·throw("notetsleep - waitm out of sync"); return; } if(m->profilehz > 0) runtime·setprof(false); deadline = runtime·nanotime() + ns; for(;;) { // Registered. Sleep. if(runtime·semasleep(ns) >= 0) { // Acquired semaphore, semawakeup unregistered us. // Done. if(m->profilehz > 0) runtime·setprof(true); return; } // Interrupted or timed out. Still registered. Semaphore not acquired. now = runtime·nanotime(); if(now >= deadline) break; // Deadline hasn't arrived. Keep sleeping. ns = deadline - now; } if(m->profilehz > 0) runtime·setprof(true); // Deadline arrived. Still registered. Semaphore not acquired. // Want to give up and return, but have to unregister first, // so that any notewakeup racing with the return does not // try to grant us the semaphore when we don't expect it. for(;;) { mp = runtime·atomicloadp((void**)&n->key); if(mp == m) { // No wakeup yet; unregister if possible. if(runtime·casp((void**)&n->key, mp, nil)) return; } else if(mp == (M*)LOCKED) { // Wakeup happened so semaphore is available. // Grab it to avoid getting out of sync. if(runtime·semasleep(-1) < 0) runtime·throw("runtime: unable to acquire - semaphore out of sync"); return; } else { runtime·throw("runtime: unexpected waitm - semaphore out of sync"); } } }
static bool blockcommit ( G *gp , G **gpp ) { return runtime·casp ( gpp , WAIT , gp ) ; }