static Hchan* makechan(ChanType *t, int64 hint) { Hchan *c; uintptr n; const Type *elem; elem = t->__element_type; // compiler checks this but be safe. if(elem->__size >= (1<<16)) runtime_throw("makechan: invalid channel element type"); if(hint < 0 || (intgo)hint != hint || (elem->__size > 0 && (uintptr)hint > (MaxMem - sizeof(*c)) / elem->__size)) runtime_panicstring("makechan: size out of range"); n = sizeof(*c); n = ROUND(n, elem->__align); // allocate memory in one call c = (Hchan*)runtime_mallocgc(sizeof(*c) + hint*elem->__size, (uintptr)t | TypeInfo_Chan, 0); c->elemsize = elem->__size; c->elemtype = elem; c->dataqsiz = hint; if(debug) runtime_printf("makechan: chan=%p; elemsize=%D; dataqsiz=%D\n", c, (int64)elem->__size, (int64)c->dataqsiz); return c; }
_Bool __go_type_equal_empty_interface (const void *vv1, const void *vv2, size_t key_size __attribute__ ((unused))) { const struct __go_empty_interface *v1; const struct __go_empty_interface *v2; const struct __go_type_descriptor* v1_descriptor; const struct __go_type_descriptor* v2_descriptor; v1 = (const struct __go_empty_interface *) vv1; v2 = (const struct __go_empty_interface *) vv2; v1_descriptor = v1->__type_descriptor; v2_descriptor = v2->__type_descriptor; if (((uintptr_t) v1_descriptor & reflectFlags) != 0 || ((uintptr_t) v2_descriptor & reflectFlags) != 0) runtime_panicstring ("invalid interface value"); if (v1_descriptor == NULL || v2_descriptor == NULL) return v1_descriptor == v2_descriptor; if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor)) return 0; if (__go_is_pointer_type (v1_descriptor)) return v1->__object == v2->__object; else return v1_descriptor->__equalfn (v1->__object, v2->__object, v1_descriptor->__size); }
int __go_empty_interface_compare (struct __go_empty_interface left, struct __go_empty_interface right) { const struct __go_type_descriptor *left_descriptor; left_descriptor = left.__type_descriptor; if (((uintptr_t) left_descriptor & reflectFlags) != 0 || ((uintptr_t) right.__type_descriptor & reflectFlags) != 0) runtime_panicstring ("invalid interface value"); if (left_descriptor == NULL && right.__type_descriptor == NULL) return 0; if (left_descriptor == NULL || right.__type_descriptor == NULL) return 1; if (!__go_type_descriptors_equal (left_descriptor, right.__type_descriptor)) return 1; if (__go_is_pointer_type (left_descriptor)) return left.__object == right.__object ? 0 : 1; if (!left_descriptor->__equalfn (left.__object, right.__object, left_descriptor->__size)) return 1; return 0; }
_Bool __go_type_equal_empty_interface (const void *vv1, const void *vv2, uintptr_t key_size __attribute__ ((unused))) { const struct __go_empty_interface *v1; const struct __go_empty_interface *v2; const struct __go_type_descriptor* v1_descriptor; const struct __go_type_descriptor* v2_descriptor; v1 = (const struct __go_empty_interface *) vv1; v2 = (const struct __go_empty_interface *) vv2; v1_descriptor = v1->__type_descriptor; v2_descriptor = v2->__type_descriptor; if (v1_descriptor == NULL || v2_descriptor == NULL) return v1_descriptor == v2_descriptor; if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor)) return 0; if (v1_descriptor->__equalfn == NULL) runtime_panicstring ("comparing uncomparable types"); if (__go_is_pointer_type (v1_descriptor)) return v1->__object == v2->__object; else return __go_call_equalfn (v1_descriptor->__equalfn, v1->__object, v2->__object, v1_descriptor->__size); }
void * __go_map_index (struct __go_map *map, const void *key, _Bool insert) { const struct __go_map_descriptor *descriptor; const struct __go_type_descriptor *key_descriptor; uintptr_t key_offset; _Bool (*equalfn) (const void*, const void*, uintptr_t); size_t key_hash; size_t key_size; size_t bucket_index; char *entry; if (map == NULL) { if (insert) runtime_panicstring ("assignment to entry in nil map"); return NULL; } descriptor = map->__descriptor; key_descriptor = descriptor->__map_descriptor->__key_type; key_offset = descriptor->__key_offset; key_size = key_descriptor->__size; __go_assert (key_size != 0 && key_size != -1UL); equalfn = key_descriptor->__equalfn; key_hash = key_descriptor->__hashfn (key, key_size); bucket_index = key_hash % map->__bucket_count; entry = (char *) map->__buckets[bucket_index]; while (entry != NULL) { if (equalfn (key, entry + key_offset, key_size)) return entry + descriptor->__val_offset; entry = *(char **) entry; } if (!insert) return NULL; if (map->__element_count >= map->__bucket_count) { __go_map_rehash (map); bucket_index = key_hash % map->__bucket_count; } entry = (char *) __go_alloc (descriptor->__entry_size); __builtin_memset (entry, 0, descriptor->__entry_size); __builtin_memcpy (entry + key_offset, key, key_size); *(char **) entry = map->__buckets[bucket_index]; map->__buckets[bucket_index] = entry; map->__element_count += 1; return entry + descriptor->__val_offset; }
_Unwind_Reason_Code __gccgo_personality_v0(int version, _Unwind_Action actions, _Unwind_Exception_Class exception_class, struct _Unwind_Exception *ue_header, struct _Unwind_Context *context) { runtime_panicstring("__gccgo_personality_v0 not implemented!!"); }
struct __go_map * __go_new_map_big (const struct __go_map_descriptor *descriptor, uint64_t entries) { uintptr_t sentries; sentries = (uintptr_t) entries; if ((uint64_t) sentries != entries) runtime_panicstring ("map size out of range"); return __go_new_map (descriptor, sentries); }
void * unsafe_New (struct __go_empty_interface type) { const struct __go_type_descriptor *descriptor; if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0) runtime_panicstring ("invalid interface value"); /* FIXME: We should check __type_descriptor to verify that this is really a type descriptor. */ descriptor = (const struct __go_type_descriptor *) type.__object; return __go_alloc (descriptor->__size); }
String __go_string_slice (String s, intgo start, intgo end) { intgo len; String ret; len = s.len; if (end == -1) end = len; if (start > len || end < start || end > len) runtime_panicstring ("string index out of bounds"); ret.str = s.str + start; ret.len = end - start; return ret; }
struct __go_open_array __go_make_slice2 (const struct __go_type_descriptor *td, uintptr_t len, uintptr_t cap) { const struct __go_slice_type* std; int ilen; int icap; uintptr_t size; struct __go_open_array ret; unsigned int flag; __go_assert (td->__code == GO_SLICE); std = (const struct __go_slice_type *) td; ilen = (int) len; if (ilen < 0 || (uintptr_t) ilen != len) runtime_panicstring ("makeslice: len out of range"); icap = (int) cap; if (cap < len || (uintptr_t) icap != cap || (std->__element_type->__size > 0 && cap > (uintptr_t) -1U / std->__element_type->__size)) runtime_panicstring ("makeslice: cap out of range"); ret.__count = ilen; ret.__capacity = icap; size = cap * std->__element_type->__size; flag = ((std->__element_type->__code & GO_NO_POINTERS) != 0 ? FlagNoPointers : 0); ret.__values = runtime_mallocgc (size, flag, 1, 1); return ret; }
uintptr_t __go_type_hash_empty_interface (const void *vval, uintptr_t seed, uintptr_t key_size __attribute__ ((unused))) { const struct __go_empty_interface *val; const struct __go_type_descriptor *descriptor; uintptr_t size; val = (const struct __go_empty_interface *) vval; descriptor = val->__type_descriptor; if (descriptor == NULL) return 0; if (descriptor->__hashfn == NULL) runtime_panicstring ("hash of unhashable type"); size = descriptor->__size; if (__go_is_pointer_type (descriptor)) return __go_call_hashfn (descriptor->__hashfn, &val->__object, seed, size); else return __go_call_hashfn (descriptor->__hashfn, val->__object, seed, size); }
void __go_map_delete (struct __go_map *map, const void *key) { const struct __go_map_descriptor *descriptor; const struct __go_type_descriptor *key_descriptor; uintptr_t key_offset; _Bool (*equalfn) (const void*, const void*, uintptr_t); size_t key_hash; size_t key_size; size_t bucket_index; void **pentry; if (map == NULL) runtime_panicstring ("deletion of entry in nil map"); descriptor = map->__descriptor; key_descriptor = descriptor->__map_descriptor->__key_type; key_offset = descriptor->__key_offset; key_size = key_descriptor->__size; __go_assert (key_size != 0 && key_size != -1UL); equalfn = key_descriptor->__equalfn; key_hash = key_descriptor->__hashfn (key, key_size); bucket_index = key_hash % map->__bucket_count; pentry = map->__buckets + bucket_index; while (*pentry != NULL) { char *entry = (char *) *pentry; if (equalfn (key, entry + key_offset, key_size)) { *pentry = *(void **) entry; __go_free (entry); map->__element_count -= 1; break; } pentry = (void **) entry; } }
struct __go_map * __go_new_map (const struct __go_map_descriptor *descriptor, uintptr_t entries) { int32 ientries; struct __go_map *ret; /* The master library limits map entries to int32, so we do too. */ ientries = (int32) entries; if (ientries < 0 || (uintptr_t) ientries != entries) runtime_panicstring ("map size out of range"); if (entries == 0) entries = 5; else entries = __go_map_next_prime (entries); ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map)); ret->__descriptor = descriptor; ret->__element_count = 0; ret->__bucket_count = entries; ret->__buckets = (void **) __go_alloc (entries * sizeof (void *)); __builtin_memset (ret->__buckets, 0, entries * sizeof (void *)); return ret; }
void mapassign (struct __go_map_type *mt, uintptr_t m, uintptr_t key_i, uintptr_t val_i, _Bool pres) { struct __go_map *map = (struct __go_map *) m; const struct __go_type_descriptor *key_descriptor; void *key; __go_assert (mt->__common.__code == GO_MAP); if (map == NULL) runtime_panicstring ("assignment to entry in nil map"); key_descriptor = mt->__key_type; if (__go_is_pointer_type (key_descriptor)) key = &key_i; else key = (void *) key_i; if (!pres) __go_map_delete (map, key); else { void *p; const struct __go_type_descriptor *val_descriptor; void *pv; p = __go_map_index (map, key, 1); val_descriptor = mt->__val_type; if (__go_is_pointer_type (val_descriptor)) pv = &val_i; else pv = (void *) val_i; __builtin_memcpy (p, pv, val_descriptor->__size); } }
void runtime_panicdivide(void) { runtime_panicstring("integer divide by zero"); }
/* * generic single channel send/recv * if the bool pointer is nil, * then the full exchange will * occur. if pres is not nil, * then the protocol will not * sleep but return if it could * not complete. * * sleep can wake up with g->param == nil * when a channel involved in the sleep has * been closed. it is easiest to loop and re-run * the operation; we'll see that it's now closed. */ static bool chansend(ChanType *t, Hchan *c, byte *ep, bool block, void *pc) { SudoG *sg; SudoG mysg; G* gp; int64 t0; G* g; g = runtime_g(); if(raceenabled) runtime_racereadobjectpc(ep, t->__element_type, runtime_getcallerpc(&t), chansend); if(c == nil) { USED(t); if(!block) return false; runtime_park(nil, nil, "chan send (nil chan)"); return false; // not reached } if(runtime_gcwaiting()) runtime_gosched(); if(debug) { runtime_printf("chansend: chan=%p\n", c); } t0 = 0; mysg.releasetime = 0; if(runtime_blockprofilerate > 0) { t0 = runtime_cputicks(); mysg.releasetime = -1; } runtime_lock(c); if(raceenabled) runtime_racereadpc(c, pc, chansend); if(c->closed) goto closed; if(c->dataqsiz > 0) goto asynch; sg = dequeue(&c->recvq); if(sg != nil) { if(raceenabled) racesync(c, sg); runtime_unlock(c); gp = sg->g; gp->param = sg; if(sg->elem != nil) runtime_memmove(sg->elem, ep, c->elemsize); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); return true; } if(!block) { runtime_unlock(c); return false; } mysg.elem = ep; mysg.g = g; mysg.selectdone = nil; g->param = nil; enqueue(&c->sendq, &mysg); runtime_parkunlock(c, "chan send"); if(g->param == nil) { runtime_lock(c); if(!c->closed) runtime_throw("chansend: spurious wakeup"); goto closed; } if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; asynch: if(c->closed) goto closed; if(c->qcount >= c->dataqsiz) { if(!block) { runtime_unlock(c); return false; } mysg.g = g; mysg.elem = nil; mysg.selectdone = nil; enqueue(&c->sendq, &mysg); runtime_parkunlock(c, "chan send"); runtime_lock(c); goto asynch; } if(raceenabled) runtime_racerelease(chanbuf(c, c->sendx)); runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize); if(++c->sendx == c->dataqsiz) c->sendx = 0; c->qcount++; sg = dequeue(&c->recvq); if(sg != nil) { gp = sg->g; runtime_unlock(c); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); } else runtime_unlock(c); if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return true; closed: runtime_unlock(c); runtime_panicstring("send on closed channel"); return false; // not reached }
/* * generic single channel send/recv * if the bool pointer is nil, * then the full exchange will * occur. if pres is not nil, * then the protocol will not * sleep but return if it could * not complete. * * sleep can wake up with g->param == nil * when a channel involved in the sleep has * been closed. it is easiest to loop and re-run * the operation; we'll see that it's now closed. */ void runtime_chansend(ChanType *t, Hchan *c, byte *ep, bool *pres, void *pc) { SudoG *sg; SudoG mysg; G* gp; int64 t0; G* g; g = runtime_g(); if(c == nil) { USED(t); if(pres != nil) { *pres = false; return; } runtime_park(nil, nil, "chan send (nil chan)"); return; // not reached } if(runtime_gcwaiting) runtime_gosched(); if(debug) { runtime_printf("chansend: chan=%p\n", c); } t0 = 0; mysg.releasetime = 0; if(runtime_blockprofilerate > 0) { t0 = runtime_cputicks(); mysg.releasetime = -1; } runtime_lock(c); // TODO(dvyukov): add similar instrumentation to select. if(raceenabled) runtime_racereadpc(c, pc); if(c->closed) goto closed; if(c->dataqsiz > 0) goto asynch; sg = dequeue(&c->recvq); if(sg != nil) { if(raceenabled) racesync(c, sg); runtime_unlock(c); gp = sg->g; gp->param = sg; if(sg->elem != nil) runtime_memmove(sg->elem, ep, c->elemsize); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); if(pres != nil) *pres = true; return; } if(pres != nil) { runtime_unlock(c); *pres = false; return; } mysg.elem = ep; mysg.g = g; mysg.selgen = NOSELGEN; g->param = nil; enqueue(&c->sendq, &mysg); runtime_park(runtime_unlock, c, "chan send"); if(g->param == nil) { runtime_lock(c); if(!c->closed) runtime_throw("chansend: spurious wakeup"); goto closed; } if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return; asynch: if(c->closed) goto closed; if(c->qcount >= c->dataqsiz) { if(pres != nil) { runtime_unlock(c); *pres = false; return; } mysg.g = g; mysg.elem = nil; mysg.selgen = NOSELGEN; enqueue(&c->sendq, &mysg); runtime_park(runtime_unlock, c, "chan send"); runtime_lock(c); goto asynch; } if(raceenabled) runtime_racerelease(chanbuf(c, c->sendx)); runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize); if(++c->sendx == c->dataqsiz) c->sendx = 0; c->qcount++; sg = dequeue(&c->recvq); if(sg != nil) { gp = sg->g; runtime_unlock(c); if(sg->releasetime) sg->releasetime = runtime_cputicks(); runtime_ready(gp); } else runtime_unlock(c); if(pres != nil) *pres = true; if(mysg.releasetime > 0) runtime_blockevent(mysg.releasetime - t0, 2); return; closed: runtime_unlock(c); runtime_panicstring("send on closed channel"); }
_Bool __go_send_nonblocking_acquire (struct __go_channel *channel) { int i; _Bool has_space; i = pthread_mutex_lock (&channel->lock); __go_assert (i == 0); while (channel->selected_for_send) runtime_cond_wait (&channel->cond, &channel->lock); if (channel->is_closed) { i = pthread_mutex_unlock (&channel->lock); __go_assert (i == 0); runtime_panicstring ("send on closed channel"); } if (channel->num_entries > 0) has_space = ((channel->next_store + 1) % channel->num_entries != channel->next_fetch); else { /* This is a synchronous channel. If somebody is current sending, then we can't send. Otherwise, see if somebody is waiting to receive, or see if we can synch with a select. */ if (channel->waiting_to_send) { /* Some other goroutine is currently sending on this channel, which means that we can't. */ has_space = 0; } else if (channel->waiting_to_receive) { /* Some other goroutine is waiting to receive a value, so we can send directly to them. */ has_space = 1; } else if (__go_synch_with_select (channel, 1)) { /* We found a select waiting to receive data, so we can send to that. */ __go_broadcast_to_select (channel); has_space = 1; } else { /* Otherwise, we can't send, because nobody is waiting to receive. */ has_space = 0; } if (has_space) { channel->waiting_to_send = 1; __go_assert (channel->next_store == 0); } } if (!has_space) { i = pthread_mutex_unlock (&channel->lock); __go_assert (i == 0); return 0; } return 1; }