/* :nodoc: */ static VALUE enumerator_init_copy(VALUE obj, SEL sel, VALUE orig) { struct enumerator *ptr0, *ptr1; ptr0 = enumerator_ptr(orig); if (ptr0->fib) { /* Fibers cannot be copied */ rb_raise(rb_eTypeError, "can't copy execution context"); } Data_Get_Struct(obj, struct enumerator, ptr1); if (!ptr1) { rb_raise(rb_eArgError, "unallocated enumerator"); } GC_WB(&ptr1->obj, ptr0->obj); ptr1->sel = ptr0->sel; if (ptr0->args != 0) { GC_WB(&ptr1->args, ptr0->args); } ptr1->fib = 0; return obj; }
static VALUE rb_source_init(VALUE self, SEL sel, VALUE type, VALUE handle, VALUE mask, VALUE queue) { Check_Queue(queue); rb_source_t *src = RSource(self); src->source_enum = (source_enum_t) NUM2LONG(type); dispatch_source_type_t c_type = rb_source_enum2type(src->source_enum); assert(c_type != NULL); uintptr_t c_handle = NUM2UINT(rb_Integer(handle)); unsigned long c_mask = NUM2LONG(mask); dispatch_queue_t c_queue = RQueue(queue)->queue; src->source = dispatch_source_create(c_type, c_handle, c_mask, c_queue); assert(src->source != NULL); rb_vm_block_t *block = get_prepared_block(); GC_WB(&src->event_handler, block); GC_RETAIN(self); // apparently needed to ensure consistent counting dispatch_set_context(src->source, (void *)self); dispatch_source_set_event_handler_f(src->source, rb_source_event_handler); GC_WB(&src->handle, handle); if (rb_source_is_file(src) && rb_obj_is_kind_of(handle, rb_cIO)) { dispatch_source_set_cancel_handler_f(src->source, rb_source_close_handler); } rb_dispatch_resume(self, 0); return self; }
static rb_vm_block_t * dup_block(rb_vm_block_t *src_b) { const size_t block_size = sizeof(rb_vm_block_t) + (sizeof(VALUE *) * src_b->dvars_size); rb_vm_block_t *new_b = (rb_vm_block_t *)xmalloc(block_size); memcpy(new_b, src_b, block_size); new_b->proc = src_b->proc; // weak GC_WB(&new_b->parent_block, src_b->parent_block); GC_WB(&new_b->self, src_b->self); new_b->flags = src_b->flags & ~VM_BLOCK_ACTIVE; rb_vm_local_t *src_l = src_b->locals; rb_vm_local_t **new_l = &new_b->locals; while (src_l != NULL) { GC_WB(new_l, xmalloc(sizeof(rb_vm_local_t))); (*new_l)->name = src_l->name; (*new_l)->value = src_l->value; new_l = &(*new_l)->next; src_l = src_l->next; } *new_l = NULL; return new_b; }
rb_vm_method_t * rb_vm_get_method(VALUE klass, VALUE obj, ID mid, int scope) { SEL sel = 0; IMP imp = NULL; rb_vm_method_node_t *node = NULL; // TODO honor scope if (!rb_vm_lookup_method2((Class)klass, mid, &sel, &imp, &node)) { rb_print_undef(klass, mid, 0); } Class k, oklass = (Class)klass; while ((k = class_getSuperclass(oklass)) != NULL) { if (!rb_vm_lookup_method(k, sel, NULL, NULL)) { break; } oklass = k; } Method method = class_getInstanceMethod((Class)klass, sel); assert(method != NULL); int arity; rb_vm_method_node_t *new_node; if (node == NULL) { arity = rb_method_getNumberOfArguments(method) - 2; new_node = NULL; } else { arity = rb_vm_arity_n(node->arity); new_node = (rb_vm_method_node_t *)xmalloc(sizeof(rb_vm_method_node_t)); memcpy(new_node, node, sizeof(rb_vm_method_node_t)); } rb_vm_method_t *m = (rb_vm_method_t *)xmalloc(sizeof(rb_vm_method_t)); m->oclass = (VALUE)oklass; m->rclass = klass; GC_WB(&m->recv, obj); m->sel = sel; m->arity = arity; GC_WB(&m->node, new_node); // Let's allocate a static cache here, since a rb_vm_method_t must always // point to the method it was created from. struct mcache *c = (struct mcache *)xmalloc(sizeof(struct mcache)); if (new_node == NULL) { fill_ocache(c, obj, oklass, imp, sel, method, arity); } else { fill_rcache(c, oklass, sel, new_node); } GC_WB(&m->cache, c); return m; }
/* :nodoc: */ static VALUE name_err_mesg_new(VALUE obj, SEL sel, VALUE mesg, VALUE recv, VALUE method) { VALUE *ptr = ALLOC_N(VALUE, 3); GC_WB(&ptr[0], mesg); GC_WB(&ptr[1], recv); GC_WB(&ptr[2], method); return Data_Wrap_Struct(rb_cNameErrorMesg, NULL, NULL, ptr); }
static void * imp_rhash_keyEnumerator(void *rcv, SEL sel) { NEWOBJ(keyenum, rb_hash_keyenum_t); keyenum->klass = rb_cRubyHashKeyEnumerator; GC_WB(&keyenum->hash, rcv); VALUE ary = rb_ary_new(); st_foreach_safe(RHASH(rcv)->tbl, keys_i, (st_data_t)ary); GC_WB(&keyenum->keys, ary); keyenum->pos = 0; return keyenum; }
static VALUE binding_dup(VALUE self, SEL sel) { VALUE bindval = binding_alloc(rb_cBinding); rb_vm_binding_t *src, *dst; GetBindingPtr(self, src); GetBindingPtr(bindval, dst); GC_WB(&dst->self, src->self); GC_WB(&dst->next, src->next); GC_WB(&dst->locals, src->locals); GC_WB(&dst->outer_stack, src->outer_stack); GC_WB(&dst->block, src->block); return bindval; }
static VALUE thgroup_s_alloc(VALUE self, SEL sel) { rb_thread_group_t *t = (rb_thread_group_t *)xmalloc( sizeof(rb_thread_group_t)); t->enclosed = false; GC_WB(&t->threads, rb_ary_new()); OBJ_UNTRUST(t->threads); VALUE mutex = mutex_s_alloc(rb_cMutex, 0); mutex_initialize(mutex, 0); GC_WB(&t->mutex, mutex); return Data_Wrap_Struct(self, NULL, NULL, t); }
static VALUE define_final(VALUE os, SEL sel, int argc, VALUE *argv) { VALUE obj, block; rb_scan_args(argc, argv, "11", &obj, &block); if (argc == 1) { block = rb_block_proc(); } else if (!rb_respond_to(block, rb_intern("call"))) { rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", rb_obj_classname(block)); } if (SPECIAL_CONST_P(obj)) { rb_raise(rb_eArgError, "immediate types are not finalizable"); } rb_vm_finalizer_t *finalizer = rb_objc_get_associative_ref((void *)obj, &finalizer_key); if (finalizer == NULL) { finalizer = (rb_vm_finalizer_t *) rb_objc_newobj(sizeof(rb_vm_finalizer_t *)); finalizer->klass = rb_cFinalizer; finalizer->objid = rb_obj_id(obj, 0); GC_WB(&finalizer->finalizers, rb_ary_new()); rb_objc_set_associative_ref((void *)obj, &finalizer_key, finalizer); rb_vm_register_finalizer(finalizer); } rb_ary_push(finalizer->finalizers, block); // For RubySpec conformance. return rb_ary_new3(2, INT2FIX(rb_safe_level()), block); }
static VALUE rb_mutex_lock(VALUE self, SEL sel) { rb_vm_thread_t *current = GetThreadPtr(rb_vm_current_thread()); rb_vm_mutex_t *m = GetMutexPtr(self); rb_vm_thread_status_t prev_status; if (m->thread == current) { rb_raise(rb_eThreadError, "deadlock; recursive locking"); } prev_status = current->status; if (current->status == THREAD_ALIVE) { current->status = THREAD_SLEEP; } current->wait_for_mutex_lock = true; pthread_assert(pthread_mutex_lock(&m->mutex)); current->wait_for_mutex_lock = false; current->status = prev_status; m->thread = current; if (current->mutexes == Qnil) { GC_WB(¤t->mutexes, rb_ary_new()); OBJ_UNTRUST(current->mutexes); } rb_ary_push(current->mutexes, self); return self; }
static VALUE proc_dup(VALUE self) { VALUE procval = rb_proc_alloc(rb_cProc); rb_proc_t *src, *dst; GetProcPtr(self, src); GetProcPtr(procval, dst); dst->block = src->block; GC_WB(&dst->block.proc, procval); GC_WB(&dst->envval, src->envval); dst->safe_level = src->safe_level; dst->is_lambda = src->is_lambda; return procval; }
static VALUE rb_yaml_parser_set_input(VALUE self, SEL sel, VALUE input) { rb_yaml_parser_t *rbparser = RYAMLParser(self); yaml_parser_t *parser = &rbparser->parser; if (!NIL_P(input)) { assert(parser != NULL); if (TYPE(input) == T_STRING) { const char * instring = RSTRING_PTR(input); yaml_parser_set_input_string(parser, (const unsigned char *)(instring), strlen(instring)); } else if (TYPE(input) == T_FILE) { yaml_parser_set_input(parser, rb_yaml_io_read_handler, (void *)input); } else { rb_raise(rb_eArgError, "invalid input for YAML parser: %s", rb_obj_classname(input)); } } GC_WB(&rbparser->input, input); return input; }
static VALUE rb_yaml_emitter_set_output(VALUE self, SEL sel, VALUE output) { rb_yaml_emitter_t *remitter = RYAMLEmitter(self); GC_WB(&remitter->output, output); yaml_emitter_t *emitter = &remitter->emitter; if (!NIL_P(output)) { switch (TYPE(output)) { case T_FILE: yaml_emitter_set_output(emitter, rb_yaml_io_output_handler, (void *)output); break; case T_STRING: yaml_emitter_set_output(emitter, rb_yaml_str_output_handler, (void *)output); break; default: rb_raise(rb_eArgError, "unsupported YAML output type %s", rb_obj_classname(output)); } } return output; }
/* * call-seq: * prc.binding => binding * * Returns the binding associated with <i>prc</i>. Note that * <code>Kernel#eval</code> accepts either a <code>Proc</code> or a * <code>Binding</code> object as its second parameter. * * def fred(param) * proc {} * end * * b = fred(99) * eval("param", b.binding) #=> 99 */ static VALUE proc_binding(VALUE self, SEL sel) { rb_vm_block_t *block; GetProcPtr(self, block); rb_vm_binding_t *binding = (rb_vm_binding_t *)xmalloc( sizeof(rb_vm_binding_t)); binding->block = NULL; GC_WB(&binding->self, block->self); GC_WB(&binding->locals, block->locals); binding->outer_stack = NULL; return Data_Wrap_Struct(rb_cBinding, NULL, NULL, binding); }
static VALUE struct_alloc(VALUE klass) { VALUE size; long n; NEWOBJ(st, struct RStruct); OBJSETUP(st, klass, T_STRUCT); size = rb_struct_iv_get(klass, "__size__"); n = FIX2LONG(size); if (0 < n && n <= RSTRUCT_EMBED_LEN_MAX) { RBASIC(st)->flags &= ~RSTRUCT_EMBED_LEN_MASK; RBASIC(st)->flags |= n << RSTRUCT_EMBED_LEN_SHIFT; rb_mem_clear(st->as.ary, n); } else { if (n > 0) { GC_WB(&st->as.heap.ptr, xmalloc_ptrs(sizeof(VALUE) * n)); rb_mem_clear(st->as.heap.ptr, n); } else { st->as.heap.ptr = NULL; } st->as.heap.len = n; } return (VALUE)st; }
static VALUE rb_struct_set(VALUE obj, SEL sel, VALUE val) { VALUE members, slot; long i; // foo=: -> foo char buf[100]; const size_t s = strlcpy(buf, sel_getName(sel), sizeof buf); buf[s - 2] = '\0'; ID field = rb_intern(buf); members = rb_struct_members(obj); rb_struct_modify(obj); for (i=0; i<RARRAY_LEN(members); i++) { slot = RARRAY_AT(members, i); if (SYM2ID(slot) == field) { GC_WB(&RSTRUCT_PTR(obj)[i], val); return val; } } rb_name_error(rb_frame_this_func(), "`%s' is not a struct member", rb_id2name(field)); return Qnil; /* not reached */ }
void rb_fd_init(volatile rb_fdset_t *fds) { fds->maxfd = 0; GC_WB(&fds->fdset, ALLOC(fd_set)); FD_ZERO(fds->fdset); }
NODE* rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2) { NODE *n = xmalloc(sizeof(struct RNode)); n->flags |= T_NODE; nd_set_type(n, type); GC_WB(&n->u1.value, a0); GC_WB(&n->u2.value, a1); GC_WB(&n->u3.value, a2); // FIXME this retain is added because the parser is NOT GC-safe at this point GC_RETAIN(n); return n; }
VALUE rhash_set_default(VALUE hash, SEL sel, VALUE ifnone) { rhash_modify(hash); GC_WB(&RHASH(hash)->ifnone, ifnone); RHASH(hash)->has_proc_default = false; return ifnone; }
static VALUE thgroup_s_alloc(VALUE self, SEL sel) { rb_thread_group_t *t = (rb_thread_group_t *)xmalloc( sizeof(rb_thread_group_t)); t->enclosed = false; GC_WB(&t->threads, rb_ary_new()); return Data_Wrap_Struct(rb_cThGroup, NULL, NULL, t); }
rb_vm_block_t * rb_vm_create_block(IMP imp, VALUE self, VALUE userdata) { rb_vm_block_t *b = rb_vm_prepare_block((void *)imp, VM_BLOCK_IFUNC, self, rb_vm_arity(0), // not used NULL, NULL, 0, 0); GC_WB(&b->userdata, userdata); return b; }
static VALUE rb_yaml_resolver_initialize(VALUE self, SEL sel) { rb_yaml_resolver_t *resolver = RYAMLResolver(self); CFMutableDictionaryRef d = CFDictionaryCreateMutable(NULL, 0, NULL, &kCFTypeDictionaryValueCallBacks); GC_WB(&resolver->tags, d); CFMakeCollectable(d); return self; }
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max) { int size = howmany(max, NFDBITS) * sizeof(fd_mask); if (size < sizeof(fd_set)) size = sizeof(fd_set); dst->maxfd = max; GC_WB(&dst->fdset, xrealloc(dst->fdset, size)); memcpy(dst->fdset, src, size); }
PRIMITIVE void vm_init_c_block(struct ruby_block_literal *b, void *imp, VALUE proc) { b->isa = &_NSConcreteAutoBlock; b->flags = __MR_BLOCK_IS_GC | __MR_BLOCK_HAS_DESCRIPTOR; b->reserved = 0; b->imp = imp; b->descriptor = &ruby_block_descriptor_value; GC_WB(&b->ruby_proc, proc); }
static rb_sym_t * sym_alloc(VALUE str, ID id) { rb_sym_t *sym = (rb_sym_t *)xmalloc(sizeof(rb_sym_t)); assert(rb_cSymbol != 0); sym->klass = rb_cSymbol; GC_WB(&sym->str, str); sym->id = id; return sym; }
static VALUE rhash_initialize(VALUE hash, SEL sel, int argc, const VALUE *argv) { rhash_modify(hash); if (rb_block_given_p()) { if (argc > 0) { rb_raise(rb_eArgError, "wrong number of arguments"); } GC_WB(&RHASH(hash)->ifnone, rb_block_proc()); RHASH(hash)->has_proc_default = true; } else { VALUE ifnone; rb_scan_args(argc, argv, "01", &ifnone); if (ifnone != Qnil) { GC_WB(&RHASH(hash)->ifnone, ifnone); } } return hash; }
static VALUE rhash_rehash(VALUE hash, SEL sel) { rhash_modify(hash); st_table *tbl = st_init_table_with_size(RHASH(hash)->tbl->type, RHASH(hash)->tbl->num_entries); rb_hash_foreach(hash, rhash_rehash_i, (VALUE)tbl); //st_free_table(RHASH(hash)->tbl); GC_WB(&RHASH(hash)->tbl, tbl); return hash; }
static void range_init(VALUE range, VALUE beg, VALUE end, int exclude_end) { VALUE args[2]; args[0] = beg; args[1] = end; if (!FIXNUM_P(beg) || !FIXNUM_P(end)) { VALUE v; v = rb_rescue(range_check, (VALUE)args, range_failed, 0); if (NIL_P(v)) range_failed(); } SET_EXCL(range, exclude_end); GC_WB(&RANGE_BEG(range), beg); GC_WB(&RANGE_END(range), end); }
static rb_vm_block_t * get_prepared_block() { rb_vm_block_t *block = rb_vm_current_block(); if (block == NULL) { rb_raise(rb_eArgError, "block not given"); } #if GCD_BLOCKS_COPY_DVARS block = rb_vm_dup_block(block); for (int i = 0; i < block->dvars_size; i++) { VALUE *slot = block->dvars[i]; VALUE *new_slot = xmalloc(sizeof(VALUE)); GC_WB(new_slot, *slot); GC_WB(&block->dvars[i], new_slot); } #else rb_vm_block_make_detachable_proc(block); #endif GC_RETAIN(block); return block; }
static VALUE make_struct(VALUE name, VALUE members, VALUE klass) { VALUE nstr; ID id; long i, len; OBJ_FREEZE(members); if (NIL_P(name)) { nstr = rb_class_new(klass); #if !WITH_OBJC rb_make_metaclass(nstr, RBASIC(klass)->klass); #endif rb_class_inherited(klass, nstr); } else { /* old style: should we warn? */ name = rb_str_to_str(name); id = rb_to_id(name); if (!rb_is_const_id(id)) { rb_name_error(id, "identifier %s needs to be constant", StringValuePtr(name)); } if (rb_const_defined_at(klass, id)) { rb_warn("redefining constant Struct::%s", StringValuePtr(name)); rb_mod_remove_const(klass, ID2SYM(id)); } nstr = rb_define_class_under(klass, rb_id2name(id), klass); } rb_ivar_set(nstr, id_members, members); rb_objc_define_method(*(VALUE *)nstr, "alloc", struct_alloc, 0); rb_objc_define_method(*(VALUE *)nstr, "new", rb_class_new_instance_imp, -1); rb_objc_define_method(*(VALUE *)nstr, "[]", rb_class_new_instance_imp, -1); rb_objc_define_method(*(VALUE *)nstr, "members", rb_struct_s_members_m, 0); len = RARRAY_LEN(members); for (i=0; i< len; i++) { ID id = SYM2ID(RARRAY_AT(members, i)); if (rb_is_local_id(id) || rb_is_const_id(id)) { long j = i; /* Needed for block data reference. */ /* Struct attribute reader */ rb_objc_define_method(nstr, rb_id2name(id), pl_imp_implementationWithBlock(^(VALUE obj) { return RSTRUCT_PTR(obj)[j]; }), 0); /* Struct attribute writer */ rb_objc_define_method(nstr, rb_id2name(rb_id_attrset(id)), pl_imp_implementationWithBlock(^(VALUE obj, VALUE val) { VALUE *ptr = RSTRUCT_PTR(obj); rb_struct_modify(obj); GC_WB(&ptr[i], val); return val; }), 1);