static ffi_type * go_array_to_ffi (const struct __go_array_type *descriptor) { ffi_type *ret; uintptr_t len; ffi_type *element; uintptr_t i; ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); ret->type = FFI_TYPE_STRUCT; len = descriptor->__len; if (len == 0) { /* The libffi library won't accept an empty struct. */ ret->elements = (ffi_type **) __go_alloc (2 * sizeof (ffi_type *)); ret->elements[0] = &ffi_type_void; ret->elements[1] = NULL; return ret; } ret->elements = (ffi_type **) __go_alloc ((len + 1) * sizeof (ffi_type *)); element = go_type_to_ffi (descriptor->__element_type); for (i = 0; i < len; ++i) ret->elements[i] = element; ret->elements[len] = NULL; return ret; }
uintptr_t makemap (const struct __go_map_type *t) { struct __go_map_descriptor *md; unsigned int o; const struct __go_type_descriptor *kt; const struct __go_type_descriptor *vt; struct __go_map* map; void *ret; /* FIXME: Reference count. */ md = (struct __go_map_descriptor *) __go_alloc (sizeof (*md)); md->__map_descriptor = t; o = sizeof (void *); kt = t->__key_type; o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1); md->__key_offset = o; o += kt->__size; vt = t->__val_type; o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1); md->__val_offset = o; o += vt->__size; o = (o + sizeof (void *) - 1) & ~ (sizeof (void *) - 1); o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1); o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1); md->__entry_size = o; map = __go_new_map (md, 0); ret = __go_alloc (sizeof (void *)); __builtin_memcpy (ret, &map, sizeof (void *)); return (uintptr_t) ret; }
void setenv_c (String k, String v) { const byte *ks; unsigned char *kn; const byte *vs; unsigned char *vn; ks = k.str; if (ks == NULL) ks = (const byte *) ""; kn = NULL; vs = v.str; if (vs == NULL) vs = (const byte *) ""; vn = NULL; #ifdef HAVE_SETENV if (ks != NULL && ks[k.len] != 0) { kn = __go_alloc (k.len + 1); __builtin_memcpy (kn, ks, k.len); ks = kn; } if (vs != NULL && vs[v.len] != 0) { vn = __go_alloc (v.len + 1); __builtin_memcpy (vn, vs, v.len); vs = vn; } setenv ((const char *) ks, (const char *) vs, 1); #else /* !defined(HAVE_SETENV) */ kn = __go_alloc (k.len + v.len + 2); __builtin_memcpy (kn, ks, k.len); kn[k.len] = '='; __builtin_memcpy (kn + k.len + 1, vs, v.len); kn[k.len + v.len + 1] = '\0'; putenv ((char *) kn); #endif /* !defined(HAVE_SETENV) */ if (kn != NULL) __go_free (kn); if (vn != NULL) __go_free (vn); }
void unsetenv_c (String k) { const byte *ks; unsigned char *kn; ks = k.str; if (ks == NULL) ks = (const byte *) ""; kn = NULL; #ifdef HAVE_UNSETENV intgo len; if (ks != NULL && ks[k.len] != 0) { // Objects that are explicitly freed must be at least 16 bytes in size, // so that they are not allocated using tiny alloc. len = k.len + 1; if (len < TinySize) len = TinySize; kn = __go_alloc (len); __builtin_memcpy (kn, ks, k.len); ks = kn; } unsetenv ((const char *) ks); #endif /* !defined(HAVE_UNSETENV) */ if (kn != NULL) __go_free (kn); }
void * __go_map_index (struct __go_map *map, const void *key, _Bool insert) { const struct __go_map_descriptor *descriptor; const struct __go_type_descriptor *key_descriptor; uintptr_t key_offset; _Bool (*equalfn) (const void*, const void*, uintptr_t); size_t key_hash; size_t key_size; size_t bucket_index; char *entry; if (map == NULL) { if (insert) runtime_panicstring ("assignment to entry in nil map"); return NULL; } descriptor = map->__descriptor; key_descriptor = descriptor->__map_descriptor->__key_type; key_offset = descriptor->__key_offset; key_size = key_descriptor->__size; __go_assert (key_size != 0 && key_size != -1UL); equalfn = key_descriptor->__equalfn; key_hash = key_descriptor->__hashfn (key, key_size); bucket_index = key_hash % map->__bucket_count; entry = (char *) map->__buckets[bucket_index]; while (entry != NULL) { if (equalfn (key, entry + key_offset, key_size)) return entry + descriptor->__val_offset; entry = *(char **) entry; } if (!insert) return NULL; if (map->__element_count >= map->__bucket_count) { __go_map_rehash (map); bucket_index = key_hash % map->__bucket_count; } entry = (char *) __go_alloc (descriptor->__entry_size); __builtin_memset (entry, 0, descriptor->__entry_size); __builtin_memcpy (entry + key_offset, key, key_size); *(char **) entry = map->__buckets[bucket_index]; map->__buckets[bucket_index] = entry; map->__element_count += 1; return entry + descriptor->__val_offset; }
void * alloc_saved (size_t n) { void *ret; M *m; CgoMal *c; ret = __go_alloc (n); m = runtime_m (); c = (CgoMal *) __go_alloc (sizeof (CgoMal)); c->next = m->cgomal; c->alloc = ret; m->cgomal = c; return ret; }
void * alloc_saved (size_t n) { void *ret; G *g; CgoMal *c; ret = __go_alloc (n); g = runtime_g (); c = (CgoMal *) __go_alloc (sizeof (CgoMal)); c->next = g->cgomal; c->alloc = ret; g->cgomal = c; return ret; }
static ffi_type * go_array_to_ffi (const struct __go_array_type *descriptor) { ffi_type *ret; uintptr_t len; ffi_type *element; uintptr_t i; ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); ret->type = FFI_TYPE_STRUCT; len = descriptor->__len; ret->elements = (ffi_type **) __go_alloc ((len + 1) * sizeof (ffi_type *)); element = go_type_to_ffi (descriptor->__element_type); for (i = 0; i < len; ++i) ret->elements[i] = element; ret->elements[len] = NULL; return ret; }
void _cgo_panic (const char *p) { int len; unsigned char *data; struct __go_string *ps; struct __go_empty_interface e; len = __builtin_strlen (p); data = __go_alloc (len); __builtin_memcpy (data, p, len); ps = __go_alloc (sizeof *ps); ps->__data = data; ps->__length = len; e.__type_descriptor = &string_type_descriptor; e.__object = ps; __go_panic (e); }
unsigned char * mapiterinit (struct __go_map_type *mt, uintptr_t m) { struct __go_hash_iter *it; __go_assert (mt->__common.__code == GO_MAP); it = __go_alloc (sizeof (struct __go_hash_iter)); __go_mapiterinit ((struct __go_map *) m, it); return (unsigned char *) it; }
static void __go_map_rehash (struct __go_map *map) { const struct __go_map_descriptor *descriptor; const struct __go_type_descriptor *key_descriptor; uintptr_t key_offset; size_t key_size; size_t (*hashfn) (const void *, size_t); uintptr_t old_bucket_count; void **old_buckets; uintptr_t new_bucket_count; void **new_buckets; uintptr_t i; descriptor = map->__descriptor; key_descriptor = descriptor->__map_descriptor->__key_type; key_offset = descriptor->__key_offset; key_size = key_descriptor->__size; hashfn = key_descriptor->__hashfn; old_bucket_count = map->__bucket_count; old_buckets = map->__buckets; new_bucket_count = __go_map_next_prime (old_bucket_count * 2); new_buckets = (void **) __go_alloc (new_bucket_count * sizeof (void *)); __builtin_memset (new_buckets, 0, new_bucket_count * sizeof (void *)); for (i = 0; i < old_bucket_count; ++i) { char* entry; char* next; for (entry = old_buckets[i]; entry != NULL; entry = next) { size_t key_hash; size_t new_bucket_index; /* We could speed up rehashing at the cost of memory space by caching the hash code. */ key_hash = hashfn (entry + key_offset, key_size); new_bucket_index = key_hash % new_bucket_count; next = *(char **) entry; *(char **) entry = new_buckets[new_bucket_index]; new_buckets[new_bucket_index] = entry; } } __go_free (old_buckets); map->__bucket_count = new_bucket_count; map->__buckets = new_buckets; }
struct __go_map * __go_new_map (const struct __go_map_descriptor *descriptor, uintptr_t entries) { struct __go_map *ret; if ((uintptr_t) (int) entries != entries) __go_panic_msg ("map size out of range"); if (entries == 0) entries = 5; else entries = __go_map_next_prime (entries); ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map)); ret->__descriptor = descriptor; ret->__element_count = 0; ret->__bucket_count = entries; ret->__buckets = (void **) __go_alloc (entries * sizeof (void *)); __builtin_memset (ret->__buckets, 0, entries * sizeof (void *)); return ret; }
void * unsafe_New (struct __go_empty_interface type) { const struct __go_type_descriptor *descriptor; if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0) runtime_panicstring ("invalid interface value"); /* FIXME: We should check __type_descriptor to verify that this is really a type descriptor. */ descriptor = (const struct __go_type_descriptor *) type.__object; return __go_alloc (descriptor->__size); }
struct __go_map * __go_new_map (const struct __go_map_descriptor *descriptor, uintptr_t entries) { int32 ientries; struct __go_map *ret; /* The master library limits map entries to int32, so we do too. */ ientries = (int32) entries; if (ientries < 0 || (uintptr_t) ientries != entries) runtime_panicstring ("map size out of range"); if (entries == 0) entries = 5; else entries = __go_map_next_prime (entries); ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map)); ret->__descriptor = descriptor; ret->__element_count = 0; ret->__bucket_count = entries; ret->__buckets = (void **) __go_alloc (entries * sizeof (void *)); __builtin_memset (ret->__buckets, 0, entries * sizeof (void *)); return ret; }
struct mapaccess_ret mapaccess (struct __go_map_type *mt, uintptr_t m, uintptr_t key_i) { struct __go_map *map = (struct __go_map *) m; void *key; const struct __go_type_descriptor *key_descriptor; void *p; const struct __go_type_descriptor *val_descriptor; struct mapaccess_ret ret; void *val; void *pv; __go_assert (mt->__common.__code == GO_MAP); key_descriptor = mt->__key_type; if (__go_is_pointer_type (key_descriptor)) key = &key_i; else key = (void *) key_i; if (map == NULL) p = NULL; else p = __go_map_index (map, key, 0); val_descriptor = mt->__val_type; if (__go_is_pointer_type (val_descriptor)) { val = NULL; pv = &val; } else { val = __go_alloc (val_descriptor->__size); pv = val; } if (p == NULL) ret.pres = 0; else { __builtin_memcpy (pv, p, val_descriptor->__size); ret.pres = 1; } ret.val = (uintptr_t) val; return ret; }
void __go_defer (_Bool *frame, void (*pfn) (void *), void *arg) { G *g; struct __go_defer_stack *n; g = runtime_g (); n = (struct __go_defer_stack *) __go_alloc (sizeof (struct __go_defer_stack)); n->__next = g->defer; n->__frame = frame; n->__panic = g->panic; n->__pfn = pfn; n->__arg = arg; n->__retaddr = NULL; g->defer = n; }
void __go_unwind_stack () { struct _Unwind_Exception *hdr; hdr = ((struct _Unwind_Exception *) __go_alloc (sizeof (struct _Unwind_Exception))); __builtin_memcpy (&hdr->exception_class, &__go_exception_class, sizeof hdr->exception_class); hdr->exception_cleanup = NULL; runtime_g ()->exception = hdr; #ifdef __USING_SJLJ_EXCEPTIONS__ _Unwind_SjLj_RaiseException (hdr); #else _Unwind_RaiseException (hdr); #endif /* Raising an exception should not return. */ abort (); }
struct mapiterkey_ret mapiterkey (unsigned char *ita) { struct __go_hash_iter *it = (struct __go_hash_iter *) ita; struct mapiterkey_ret ret; if (it->entry == NULL) { ret.key = 0; ret.ok = 0; } else { const struct __go_type_descriptor *key_descriptor; void *key; void *pk; key_descriptor = it->map->__descriptor->__map_descriptor->__key_type; if (__go_is_pointer_type (key_descriptor)) { key = NULL; pk = &key; } else { key = __go_alloc (key_descriptor->__size); pk = key; } __go_mapiter1 (it, pk); ret.key = (uintptr_t) key; ret.ok = 1; } return ret; }
for (i = 0; i < len; ++i) ret->elements[i] = element; ret->elements[len] = NULL; return ret; } /* Return an ffi_type for a Go slice type. This describes the __go_open_array type defines in array.h. */ static ffi_type * go_slice_to_ffi ( const struct __go_slice_type *descriptor __attribute__ ((unused))) { ffi_type *ret; ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); ret->type = FFI_TYPE_STRUCT; ret->elements = (ffi_type **) __go_alloc (4 * sizeof (ffi_type *)); ret->elements[0] = &ffi_type_pointer; ret->elements[1] = &ffi_type_sint; ret->elements[2] = &ffi_type_sint; ret->elements[3] = NULL; return ret; } /* Return an ffi_type for a Go struct type. */ static ffi_type * go_struct_to_ffi (const struct __go_struct_type *descriptor) { ffi_type *ret;
void * _cgo_allocate (size_t n) { return __go_alloc (n); }
void __go_check_defer (_Bool *frame) { G *g; struct _Unwind_Exception *hdr; g = runtime_g (); if (g == NULL) { /* Some other language has thrown an exception. We know there are no defer handlers, so there is nothing to do. */ } else if (g->isforeign) { Panic *n; _Bool recovered; /* Some other language has thrown an exception. We need to run the local defer handlers. If they call recover, we stop unwinding the stack here. */ n = (Panic *) __go_alloc (sizeof (Panic)); n->arg.__type_descriptor = NULL; n->arg.__object = NULL; n->recovered = 0; n->isforeign = 1; n->next = g->_panic; g->_panic = n; while (1) { Defer *d; void (*pfn) (void *); d = g->_defer; if (d == NULL || d->frame != frame || d->pfn == 0) break; pfn = (void (*) (void *)) d->pfn; g->_defer = d->next; (*pfn) (d->arg); if (runtime_m () != NULL) runtime_freedefer (d); if (n->recovered) { /* The recover function caught the panic thrown by some other language. */ break; } } recovered = n->recovered; g->_panic = n->next; __go_free (n); if (recovered) { /* Just return and continue executing Go code. */ *frame = 1; return; } /* We are panicing through this function. */ *frame = 0; } else if (g->_defer != NULL && g->_defer->pfn == 0 && g->_defer->frame == frame) { Defer *d; /* This is the defer function which called recover. Simply return to stop the stack unwind, and let the Go code continue to execute. */ d = g->_defer; g->_defer = d->next; if (runtime_m () != NULL) runtime_freedefer (d); /* We are returning from this function. */ *frame = 1; return; } /* This is some other defer function. It was already run by the call to panic, or just above. Rethrow the exception. */ hdr = (struct _Unwind_Exception *) g->exception; #ifdef __USING_SJLJ_EXCEPTIONS__ _Unwind_SjLj_Resume_or_Rethrow (hdr); #else #if defined(_LIBUNWIND_STD_ABI) _Unwind_RaiseException (hdr); #else _Unwind_Resume_or_Rethrow (hdr); #endif #endif /* Rethrowing the exception should not return. */ abort(); }
void __go_panic (struct __go_empty_interface arg) { G *g; Panic *n; g = runtime_g (); n = (Panic *) __go_alloc (sizeof (Panic)); n->arg = arg; n->next = g->_panic; g->_panic = n; /* Run all the defer functions. */ while (1) { Defer *d; void (*pfn) (void *); d = g->_defer; if (d == NULL) break; pfn = (void (*) (void *)) d->pfn; d->pfn = 0; if (pfn != NULL) { (*pfn) (d->arg); if (n->recovered) { /* Some defer function called recover. That means that we should stop running this panic. */ g->_panic = n->next; __go_free (n); /* Now unwind the stack by throwing an exception. The compiler has arranged to create exception handlers in each function which uses a defer statement. These exception handlers will check whether the entry on the top of the defer stack is from the current function. If it is, we have unwound the stack far enough. */ __go_unwind_stack (); /* __go_unwind_stack should not return. */ abort (); } /* Because we executed that defer function by a panic, and it did not call recover, we know that we are not returning from the calling function--we are panicing through it. */ *d->frame = 0; } g->_defer = d->next; /* This may be called by a cgo callback routine to defer the call to syscall.CgocallBackDone, in which case we will not have a memory context. Don't try to free anything in that case--the GC will release it later. */ if (runtime_m () != NULL) runtime_freedefer (d); } /* The panic was not recovered. */ runtime_startpanic (); __printpanics (g->_panic); runtime_dopanic (0); }
void __go_panic (struct __go_empty_interface arg) { struct __go_panic_stack *n; if (__go_panic_defer == NULL) __go_panic_defer = ((struct __go_panic_defer_struct *) __go_alloc (sizeof (struct __go_panic_defer_struct))); n = (struct __go_panic_stack *) __go_alloc (sizeof (struct __go_panic_stack)); n->__arg = arg; n->__next = __go_panic_defer->__panic; __go_panic_defer->__panic = n; /* Run all the defer functions. */ while (1) { struct __go_defer_stack *d; void (*pfn) (void *); d = __go_panic_defer->__defer; if (d == NULL) break; pfn = d->__pfn; d->__pfn = NULL; if (pfn != NULL) { (*pfn) (d->__arg); if (n->__was_recovered) { /* Some defer function called recover. That means that we should stop running this panic. */ __go_panic_defer->__panic = n->__next; __go_free (n); /* Now unwind the stack by throwing an exception. The compiler has arranged to create exception handlers in each function which uses a defer statement. These exception handlers will check whether the entry on the top of the defer stack is from the current function. If it is, we have unwound the stack far enough. */ __go_unwind_stack (); /* __go_unwind_stack should not return. */ abort (); } } __go_panic_defer->__defer = d->__next; __go_free (d); } /* The panic was not recovered. */ __printpanics (__go_panic_defer->__panic); /* FIXME: We should dump a call stack here. */ abort (); }