VALUE reval_do_call(VALUE args) { // Don't use rb_obj_instance_eval here, as it will implicitly // use any Ruby code block that was passed. See: // http://banisterfiend.wordpress.com/2008/09/25/metaprogramming-in-the-ruby-c-api-part-one-blocks/ VALUE ctx = rb_ary_shift(args); VALUE code = rb_ary_shift(args); return rb_funcall(ctx, rb_intern("instance_eval"), 1, code); }
VALUE Color::rb_arcLoad(VALUE self, VALUE value) { VALUE arr = rb_f_str_unpack(value, "eeee"); VALUE c_arr[4]; c_arr[0] = rb_ary_shift(arr); c_arr[1] = rb_ary_shift(arr); c_arr[2] = rb_ary_shift(arr); c_arr[3] = rb_ary_shift(arr); return Color::create(4, c_arr); }
static void require_libraries(VALUE *req_list) { VALUE list = *req_list; VALUE self = rb_vm_top_self(); ID require; rb_thread_t *th = GET_THREAD(); rb_block_t *prev_base_block = th->base_block; rb_encoding *extenc = rb_default_external_encoding(); int prev_parse_in_eval = th->parse_in_eval; th->base_block = 0; th->parse_in_eval = 0; Init_ext(); /* should be called here for some reason :-( */ CONST_ID(require, "require"); while (list && RARRAY_LEN(list) > 0) { VALUE feature = rb_ary_shift(list); rb_enc_associate(feature, extenc); RBASIC(feature)->klass = rb_cString; OBJ_FREEZE(feature); rb_funcall2(self, require, 1, &feature); } *req_list = 0; th->parse_in_eval = prev_parse_in_eval; th->base_block = prev_base_block; }
/* * call-seq: * Kernel.each_backtrace_frame( & block ) * * Return array of hashes with object and method frame information for backtrace. * Specifying number_of_frames will cause only the last number_of_frames to be returned. * Kernel.backtrace returns all frames including the current context (__method__/__callee__). */ VALUE rb_RPRuby_Sender_Kernel_each_backtrace_frame( int argc, VALUE* args, VALUE rb_self ) { rb_thread_t* c_thread = GET_THREAD(); // Get the current frame - we're doing a backtrace, so our current working frame to start is the first previous thread rb_control_frame_t* c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( RUBY_VM_PREVIOUS_CONTROL_FRAME( c_thread->cfp ) ); // c_top_of_control_frame describes the top edge of the stack trace // set c_top_of_control_frame to the first frame in <main> rb_control_frame_t* c_top_of_control_frame = RUBY_VM_NEXT_CONTROL_FRAME( RUBY_VM_NEXT_CONTROL_FRAME( (void *)( c_thread->stack + c_thread->stack_size ) ) ); VALUE rb_stored_backtrace_array = Qnil; // if we were passed a stored backtrace array, use it if ( argc == 1 && TYPE( args[ 0 ] ) == T_ARRAY ) { rb_stored_backtrace_array = args[ 0 ]; } // for each control frame: while ( c_current_context_frame < c_top_of_control_frame ) { VALUE rb_frame_hash; // if we are using a stored backtrace we don't need to ask for a new hash if ( rb_stored_backtrace_array == Qnil ) { rb_frame_hash = rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame( & c_current_context_frame ); } else { rb_frame_hash = rb_ary_shift( rb_stored_backtrace_array ); } if ( rb_frame_hash == Qnil ) { break; } // if we try to iterate using an Enumerator we will lose our context if ( ! rb_block_given_p() ) { // we solve this by assuming that the desired context is the moment when each_backtrace_frame is called // this allows us to store the backtrace and iterate it as we want // the only downside is that we have to get the entire backtrace first in order to store it rb_stored_backtrace_array = rb_RPRuby_Sender_Kernel_backtrace( 0, NULL, rb_self ); RETURN_ENUMERATOR( rb_self, 1, & rb_stored_backtrace_array ); } // otherwise, yield the block rb_yield( rb_frame_hash ); // only move the frame if we are not using a stored backtrace if ( rb_stored_backtrace_array == Qnil ) { c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_current_context_frame ); } } return Qnil; }
static VALUE queue_do_pop(VALUE self, int should_block) { struct waiting_delete args; args.waiting = GET_QUEUE_WAITERS(self); args.th = rb_thread_current(); while (queue_length(self) == 0) { if (!should_block) { rb_raise(rb_eThreadError, "queue empty"); } else if (queue_closed_p(self)) { return queue_closed_result(self); } else { assert(queue_length(self) == 0); assert(queue_closed_p(self) == 0); rb_ary_push(args.waiting, args.th); rb_ensure(queue_sleep, (VALUE)0, queue_delete_from_waiting, (VALUE)&args); } } return rb_ary_shift(GET_QUEUE_QUE(self)); }
static void free_expr_arglist(ABSTRACT_ARGLIST* arglist) { VALUE args = (VALUE)arglist; VALUE registory = rb_ary_shift(args); rb_hash_delete(registory, rb_obj_id(args)); return; }
static VALUE glut_Init( int argc, VALUE * argv, VALUE obj) { int largc; char** largv; VALUE new_argv; VALUE orig_arg; int i; if (rb_scan_args(argc, argv, "01", &orig_arg) == 0) orig_arg = rb_eval_string("[$0] + ARGV"); else Check_Type(orig_arg, T_ARRAY); /* converts commandline parameters from ruby to C, passes them to glutInit and returns the parameters stripped of glut-specific commands ("-display","-geometry" etc.) */ largc = RARRAY_LEN(orig_arg); largv = ALLOCA_N(char*, largc); for (i = 0; i < largc; i++) largv[i] = STR2CSTR(RARRAY_PTR(orig_arg)[i]); glutInit(&largc, largv); new_argv = rb_ary_new2(largc); for (i = 0; i < largc; i++) rb_ary_push(new_argv,rb_str_new2(largv[i])); rb_ary_shift(new_argv); return new_argv; }
/* Foo = Numo::Struct.new { int8 :byte float64 :float, [2,2] dcomplex :compl } */ static VALUE nst_s_new(int argc, VALUE *argv, VALUE klass) { VALUE name=Qnil, rest, size; VALUE st, members; ID id; rb_scan_args(argc, argv, "0*", &rest); if (RARRAY_LEN(rest)>0) { name = RARRAY_AREF(rest,0); if (!NIL_P(name)) { VALUE tmp = rb_check_string_type(name); if (!NIL_P(tmp)) { rb_ary_shift(rest); } else { name = Qnil; } } } if (NIL_P(name)) { st = rb_define_class_id(name, klass); rb_funcall(klass, rb_intern("inherited"), 1, st); } else { char *cname = StringValuePtr(name); id = rb_intern(cname); if (!rb_is_const_id(id)) { rb_name_error(id, "identifier %s needs to be constant", cname); } if (rb_const_defined_at(klass, id)) { rb_warn("redefining constant Struct::%s", cname); rb_mod_remove_const(klass, ID2SYM(id)); } st = rb_define_class_under(klass, rb_id2name(id), klass); } rb_iv_set(st, "__members__", rb_ary_new()); rb_iv_set(st, "__offset__", INT2FIX(0)); if (rb_block_given_p()) { rb_mod_module_eval(0, 0, st); } size = rb_iv_get(st, "__offset__"); members = rb_iv_get(st, "__members__"); //printf("size=%d\n",NUM2INT(size)); rb_define_const(st, CONTIGUOUS_STRIDE, size); rb_define_const(st, ELEMENT_BYTE_SIZE, size); rb_define_const(st, ELEMENT_BIT_SIZE, rb_funcall(size,'*',1,INT2FIX(8))); OBJ_FREEZE(members); rb_define_const(st, "DEFINITIONS", members); rb_define_singleton_method(st, "new", rb_class_new_instance, -1); //rb_define_singleton_method(st, "[]", rb_class_new_instance, -1); rb_define_method(st, "allocate", nst_allocate, 0); return st; }
/* rf_readdir * * Used when: 'ls' * * FuseFS will call: 'directory?' on FuseRoot with the given path * as an argument. If the return value is true, then it will in turn * call 'contents' and expects to receive an array of file contents. * * '.' and '..' are automatically added, so the programmer does not * need to worry about those. */ static int rf_readdir(const char *path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *fi) { VALUE contents; VALUE cur_entry; VALUE retval; debug("rf_readdir(%s)\n", path ); /* This is what fuse does to turn off 'unused' warnings. */ (void) offset; (void) fi; /* FuseRoot must exist */ if (FuseRoot == Qnil) { if (!strcmp(path,"/")) { filler(buf,".", NULL, 0); filler(buf,"..", NULL, 0); return 0; } return -ENOENT; } if (strcmp(path,"/") != 0) { debug(" Checking is_directory? ..."); retval = rf_call(path, is_directory,Qnil); if (!RTEST(retval)) { debug(" no.\n"); return -ENOENT; } debug(" yes.\n"); } /* These two are Always in a directory */ filler(buf,".", NULL, 0); filler(buf,"..", NULL, 0); retval = rf_call(path, id_dir_contents,Qnil); if (!RTEST(retval)) { return 0; } if (TYPE(retval) != T_ARRAY) { return 0; } /* Duplicate the array, just in case. */ /* TODO: Do this better! */ retval = rb_funcall(retval,id_dup,0); while ((cur_entry = rb_ary_shift(retval)) != Qnil) { if (TYPE(cur_entry) != T_STRING) continue; filler(buf,StringValuePtr(cur_entry),NULL,0); } return 0; }
static void wakeup_first_thread(VALUE list) { VALUE thread; while (!NIL_P(thread = rb_ary_shift(list))) { if (RTEST(rb_thread_wakeup_alive(thread))) break; } }
/* * call-seq: * Statgrab.new(drop_privileges=true) * * Set privileges and prepare connections, and then drop privileges. * See <tt>statgrab(3)</tt> manpage. */ static VALUE statgrab_initialize(VALUE self, VALUE args) { if (sg_init()) statgrab_handle_error(); if (rb_ary_shift(args) != Qfalse && sg_drop_privileges()) statgrab_handle_error(); return self; }
static VALUE cr_gradient_pattern_get_color_stop_color (VALUE self, VALUE index) { VALUE result, offset, rgba; result = cr_gradient_pattern_get_color_stop_rgba (self, index); offset = rb_ary_shift (result); rgba = result; return rb_ary_new3 (2, offset, cr_color_parse (rgba)); }
static void call_expr_userfnc (ABSTRACT_CALLER* callback_state, ABSTRACT_ARGLIST* arglist, ABSTRACT_USERFUNC* hashvalptr, ABSTRACT_EXPRVAL* exprval) { char* empty = ""; VALUE self = (VALUE)callback_state; VALUE func = (VALUE)hashvalptr; VALUE args = (VALUE)arglist; PSTRING retvalpstr = { empty, empty }; if (hashvalptr==NULL) { rb_raise(rb_eRuntimeError, "FATAL INTERNAL ERROR:Call_EXPR:function called but not exists"); tmplpro_set_expr_as_pstring(exprval, retvalpstr); return; } else if (NIL_P(func) || !rb_obj_is_kind_of(func, rb_cProc)) { rb_raise(rb_eRuntimeError, "FATAL INTERNAL ERROR:Call_EXPR:not a Proc object"); tmplpro_set_expr_as_pstring(exprval, retvalpstr); return; } /* head of args is not a true aruguments(it is a regisotry of args). */ VALUE tmp = rb_ary_shift(args); VALUE retval = rb_proc_call(func, args); rb_ary_unshift(args, tmp); VALUE registory = Qnil; switch (TYPE(retval)) { case T_FIXNUM: tmplpro_set_expr_as_int64(exprval, FIX2LONG(retval)); break; case T_TRUE: tmplpro_set_expr_as_int64(exprval, 1); break; case T_FALSE: tmplpro_set_expr_as_int64(exprval, 0); break; case T_FLOAT: tmplpro_set_expr_as_double(exprval, NUM2DBL(retval)); break; case T_NIL: tmplpro_set_expr_as_null(exprval); break; default: registory = rb_ivar_get(self, rb_intern("@internal_expr_results")); rb_ary_push(registory, retval); retvalpstr.begin = StringValuePtr(retval); retvalpstr.endnext = retvalpstr.begin + RSTRING_LEN(retval); tmplpro_set_expr_as_pstring(exprval, retvalpstr); } return; }
/** * initialize **/ static VALUE t_initialize(int argc, VALUE *argv, VALUE self) { VALUE num; root_node root; Data_Get_Struct(self, struct _root_node, root); if (argc == 1) { while((num = rb_ary_shift(argv[0])) != Qnil) { add_num(root, NUM2UINT(num)); } } return self; }
static VALUE each_cons_i(VALUE val, VALUE *memo) { VALUE ary = memo[0]; VALUE v = Qnil; long size = (long)memo[1]; if (RARRAY_LEN(ary) == size) { rb_ary_shift(ary); } rb_ary_push(ary, val); if (RARRAY_LEN(ary) == size) { v = rb_yield(rb_ary_dup(ary)); } return v; }
static VALUE ServiceModule_register(VALUE self, VALUE commands) { struct Service *ruby_service = get_service(self); struct ServiceMessage *generic_msgtab; VALUE command; long i; Check_Type(commands, T_ARRAY); for(i = RARRAY(commands)->len-1; i >= 0; --i) { VALUE name, param_min, param_max, flags, access, hlp_shrt, hlp_long; char *tmp; command = rb_ary_shift(commands); Check_Type(command, T_ARRAY); name = rb_ary_shift(command); param_min = rb_ary_shift(command); param_max = rb_ary_shift(command); flags = rb_ary_shift(command); access = rb_ary_shift(command); hlp_shrt = rb_ary_shift(command); hlp_long = rb_ary_shift(command); generic_msgtab = MyMalloc(sizeof(struct ServiceMessage)); DupString(tmp, StringValueCStr(name)); generic_msgtab->cmd = tmp; generic_msgtab->parameters = NUM2INT(param_min); generic_msgtab->maxpara = NUM2INT(param_max); generic_msgtab->flags = NUM2INT(flags); generic_msgtab->access = NUM2INT(access); generic_msgtab->help_short = NUM2INT(hlp_shrt); generic_msgtab->help_long = NUM2INT(hlp_long); generic_msgtab->handler = m_generic; mod_add_servcmd(&ruby_service->msg_tree, generic_msgtab); } return Qnil; }
/* Sits on the queue, deals with new connections*/ VALUE mtserver_fetcher_thread(VALUE self) { struct timeval delay,s_delay; int loops=80; VALUE socket; VALUE sockets=rb_iv_get(self,"@sockets"); delay.tv_usec=250000; s_delay.tv_usec=500; mtserver_avail_add(self); ++RMTServer(self)->workers; while (loops>0) { socket=rb_ary_shift(sockets); if (socket!=Qnil) { mtserver_avail_dec(self); if (rb_iv_get(self,"@ssl")==Qnil) { rb_funcall(self,rb_intern("_serve"),1,socket); } else { rb_funcall(self,rb_intern("_servessl"),1,socket); } mtserver_avail_add(self); } else { if (RMTServer(self)->avail>1) { rb_thread_wait_for(delay); --loops; } else { rb_thread_wait_for(s_delay); } } } mtserver_avail_dec(self); --RMTServer(self)->workers; // printf("-Thread\n"); return Qtrue; }
static VALUE dh_init(int argc, VALUE* argv, VALUE self) { VALUE db_user, db_pass, db_args, db_name, db_host, tds_socket; TDSSOCKET *tds; /* Let's fill our variables... */ rb_scan_args(argc, argv, "2*", &db_user, &db_pass, &db_args); db_args = rb_ary_shift(db_args); if(db_args != Qnil) { db_args = rb_str_split(db_args, ":"); db_name = rb_ary_entry(db_args, 0); db_host = rb_ary_entry(db_args, 1); } if(db_host == Qnil) { db_host = rb_str_new2("localhost"); } /* Get a TDSSOCKET */ tds_socket = tdss_new(db_host, db_user, db_pass); rb_iv_set(self, "@tds_socket", tds_socket); Data_Get_Struct(tds_socket, TDSSOCKET, tds); /* If the user submited a database-name, change to it */ if(db_name != Qnil) { if(tds_submit_query(tds,STR2CSTR(rb_str_concat(rb_str_new2("USE "), db_name))) != TDS_SUCCEED) { rb_raise(rb_eRuntimeError, "SQL-USE failed (1)"); } else { process_results(tds); } } return self; } // dh_init
VALUE rugged__block_yield_splat(VALUE args) { VALUE block = rb_ary_shift(args); int n = RARRAY_LENINT(args); if (n == 0) { return rb_funcall(block, rb_intern("call"), 0); } else { int i; VALUE *argv; argv = ALLOCA_N(VALUE, n); for (i=0; i < n; i++) { argv[i] = rb_ary_entry(args, i); } return rb_funcall2(block, rb_intern("call"), n, argv); } }
static VALUE rb_szqueue_max_set(VALUE self, VALUE vmax) { long max = NUM2LONG(vmax), diff = 0; VALUE t; if (max <= 0) { rb_raise(rb_eArgError, "queue size must be positive"); } if ((unsigned long)max > GET_SZQUEUE_ULONGMAX(self)) { diff = max - GET_SZQUEUE_ULONGMAX(self); } RSTRUCT_SET(self, SZQUEUE_MAX, vmax); while (diff-- > 0 && !NIL_P(t = rb_ary_shift(GET_SZQUEUE_WAITERS(self)))) { rb_thread_wakeup_alive(t); } return vmax; }
static VALUE ov_http_client_wait(VALUE self, VALUE request) { VALUE next; VALUE result; ov_http_client_object* ptr; ov_http_client_wait_context context; /* Get the pointer to the native object and check that it isn't closed: */ ov_http_client_ptr(self, ptr); ov_http_client_check_closed(ptr); /* Work till the transfer has been completed. */ context.handle = ptr->handle; context.code = CURLE_OK; context.cancel = false; for (;;) { /* Move requests from the queue to libcurl: */ while (RARRAY_LEN(ptr->queue) > 0 && RHASH_SIZE(ptr->pending) < ptr->limit) { next = rb_ary_shift(ptr->queue); ov_http_client_submit(self, next); } /* Check if the response is already available, if so then return it: */ result = rb_hash_delete(ptr->completed, request); if (!NIL_P(result)) { return result; } /* If the response isn't available yet, then do some real work: */ rb_thread_call_without_gvl( ov_http_client_wait_task, &context, ov_http_client_wait_cancel, &context ); if (context.cancel) { return Qnil; } if (context.code != CURLE_OK) { rb_raise(ov_error_class, "Unexpected error while waiting: %s", curl_easy_strerror(context.code)); } } return Qnil; }
VALUE rb_fairy_xmarshaled_queue_pop(VALUE self) { fairy_xmarshaled_queue_t *mq; VALUE buf; struct rb_fairy_xmarshaled_queue_pop_arg arg; GetFairyXMarshaledQueuePtr(self, mq); while (NIL_P(mq->pop_queue) || RARRAY_LEN(mq->pop_queue) == 0) { buf = rb_xthread_fifo_pop(mq->buffers); if (NIL_P(buf)) { arg.self = self; arg.buf = Qnil; mq->mon_synchronize(mq->buffers_mon, rb_fairy_xmarshaled_queue_pop_wait, &arg); buf = arg.buf; } if (EOS_P(buf)) { mq->pop_queue = rb_ary_new3(1, buf); } else if (CLASS_OF(buf) == rb_cFairyFastTempfile) { buf = rb_fairy_xmarshaled_queue_restore(self, buf); if (CLASS_OF(buf) == rb_cFairyStringBuffer) { VALUE tmp = buf; buf = rb_fairy_string_buffer_to_a(tmp); rb_fairy_string_buffer_clear(tmp); } mq->pop_queue = buf; } else { buf = rb_marshal_load(buf); if (CLASS_OF(buf) == rb_cFairyStringBuffer) { mq->pop_queue = rb_fairy_string_buffer_to_a(buf); rb_fairy_string_buffer_clear(buf); mq->buffers_cache_no--; } else { mq->pop_queue = buf; mq->buffers_cache_no--; } } } return rb_ary_shift(mq->pop_queue); }
/** * new **/ static VALUE t_new(int argc, VALUE *argv, VALUE klass) { node root; VALUE obj, array, string; root = initialize_node(NULL_CHAR); obj = Data_Make_Struct(klass, struct _node, NULL, destroy_node, root); if (argc == 1) { array = argv[0]; while((string = rb_ary_shift(argv[0])) != Qnil) { t_add(obj, string); } } return obj; }
static VALUE rb_queue_marshal_load(VALUE self, VALUE data) { Queue *queue; VALUE array; Data_Get_Struct(self, Queue, queue); array = rb_marshal_load(data); if (TYPE(array) != T_ARRAY) { rb_raise(rb_eRuntimeError, "expected Array of queue data"); } if (RARRAY_LEN(array) < 1) { rb_raise(rb_eRuntimeError, "missing capacity value"); } queue->capacity = NUM2ULONG(rb_ary_shift(array)); push_multiple_list(&queue->values, RARRAY_PTR(array), (unsigned)RARRAY_LEN(array)); return self; }
xmlXPathObjectPtr rxml_xpath_from_value(VALUE value) { xmlXPathObjectPtr result = NULL; switch (TYPE(value)) { case T_TRUE: case T_FALSE: result = xmlXPathNewBoolean(RTEST(value)); break; case T_FIXNUM: case T_FLOAT: result = xmlXPathNewFloat(NUM2DBL(value)); break; case T_STRING: result = xmlXPathWrapString(xmlStrdup((const xmlChar *)StringValuePtr(value))); break; case T_NIL: result = xmlXPathNewNodeSet(NULL); break; case T_ARRAY: { long i, j; result = xmlXPathNewNodeSet(NULL); for (i = RARRAY_LEN(value); i > 0; i--) { xmlXPathObjectPtr obj = rxml_xpath_from_value(rb_ary_shift(value)); if ((obj->nodesetval != NULL) && (obj->nodesetval->nodeNr != 0)) { for (j = 0; j < obj->nodesetval->nodeNr; j++) { xmlXPathNodeSetAdd(result->nodesetval, obj->nodesetval->nodeTab[j]); } } } break; } default: rb_raise(rb_eTypeError, "can't convert object of type %s to XPath object", rb_obj_classname(value) ); } return result; }
static void require_libraries(struct cmdline_options *opt) { VALUE list = opt->req_list; ID require; rb_thread_t *th = GET_THREAD(); rb_block_t *prev_base_block = th->base_block; int prev_parse_in_eval = th->parse_in_eval; th->base_block = 0; th->parse_in_eval = 0; Init_ext(); /* should be called here for some reason :-( */ CONST_ID(require, "require"); while (list && RARRAY_LEN(list) > 0) { VALUE feature = rb_ary_shift(list); rb_funcall2(rb_vm_top_self(), require, 1, &feature); } opt->req_list = 0; th->parse_in_eval = prev_parse_in_eval; th->base_block = prev_base_block; }
static VALUE storage_observe_callback(VALUE args, VALUE cookie) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE res = rb_ary_shift(args); if (ctx->proc != Qnil) { rb_ivar_set(res, cb_id_iv_operation, ctx->operation); cb_proc_call(bucket, ctx->proc, 1, res); } if (!RTEST(ctx->observe_options)) { ctx->nqueries--; if (ctx->nqueries == 0) { ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } } return Qnil; }
static VALUE array_spec_rb_ary_shift(VALUE self, VALUE array) { return rb_ary_shift(array); }
static void process_sflag(struct cmdline_options *opt) { if (opt->sflag) { long n; VALUE *args; VALUE argv = rb_argv; n = RARRAY_LEN(argv); args = RARRAY_PTR(argv); while (n > 0) { VALUE v = *args++; char *s = StringValuePtr(v); char *p; int hyphen = Qfalse; if (s[0] != '-') break; n--; if (s[1] == '-' && s[2] == '\0') break; v = Qtrue; /* check if valid name before replacing - with _ */ for (p = s + 1; *p; p++) { if (*p == '=') { *p++ = '\0'; v = rb_str_new2(p); break; } if (*p == '-') { hyphen = Qtrue; } else if (*p != '_' && !ISALNUM(*p)) { VALUE name_error[2]; name_error[0] = rb_str_new2("invalid name for global variable - "); if (!(p = strchr(p, '='))) { rb_str_cat2(name_error[0], s); } else { rb_str_cat(name_error[0], s, p - s); } name_error[1] = args[-1]; rb_exc_raise(rb_class_new_instance(2, name_error, rb_eNameError)); } } s[0] = '$'; if (hyphen) { for (p = s + 1; *p; ++p) { if (*p == '-') *p = '_'; } } rb_gv_set(s, v); } n = RARRAY_LEN(argv) - n; while (n--) { rb_ary_shift(argv); } } opt->sflag = 0; }
static VALUE rg_insert(int argc, VALUE *argv, VALUE self) { VALUE parent, position, values, ret; GtkTreeIter iter; GtkTreeStore* model = _SELF(self); rb_scan_args(argc, argv, "21", &parent, &position, &values); if (NIL_P(values)){ gtk_tree_store_insert(model, &iter, NIL_P(parent) ? NULL : RVAL2GTKTREEITER(parent), NUM2INT(position)); iter.user_data3 = model; ret = GTKTREEITER2RVAL(&iter); G_CHILD_ADD(self, ret); } else { #if GTK_CHECK_VERSION(2,10,0) gint *c_columns; GValue *c_values; long size, i; size = NUM2INT(rb_funcall(values, rb_intern("size"), 0)); c_columns = ALLOCA_N(gint, size); c_values = ALLOCA_N(GValue, size); if(TYPE(values)==T_ARRAY) { for(i=0; i<size; i++) { GType gtype; GValue gval = G_VALUE_INIT; c_columns[i] = i; gtype = gtk_tree_model_get_column_type(GTK_TREE_MODEL(RVAL2GOBJ(self)), c_columns[i]); g_value_init(&gval, gtype); rbgobj_rvalue_to_gvalue(rb_ary_shift(values), &gval); c_values[i] = gval; } } else if(TYPE(values)==T_HASH) { VALUE r_columns; r_columns = rb_funcall(values, rb_intern("keys"), 0); for(i=0; i<size; i++) { GType gtype; GValue gval = G_VALUE_INIT; c_columns[i] = NUM2INT (rb_ary_entry(r_columns, i)); gtype = gtk_tree_model_get_column_type(GTK_TREE_MODEL(RVAL2GOBJ(self)), c_columns[i]); g_value_init(&gval, gtype); rbgobj_rvalue_to_gvalue(rb_hash_aref(values, INT2NUM(c_columns[i])), &gval); c_values[i] = gval; } } else { rb_raise(rb_eArgError, "values must be of type Hash or Array"); } gtk_tree_store_insert_with_valuesv(model, &iter, NIL_P(parent) ? NULL : RVAL2GTKTREEITER(parent), NUM2INT(position), c_columns, c_values, size); iter.user_data3 = model; ret = GTKTREEITER2RVAL(&iter); G_CHILD_ADD(self, ret); for(i=0; i<size; i++) { G_CHILD_ADD(ret, rbgobj_gvalue_to_rvalue(&(c_values[i]))); g_value_unset(&(c_values[i])); } #else rb_warn("Gtk::TreeStore#insert(parent, position, values) requires GTK+-2.10.0 or later"); gtk_tree_store_insert(model, &iter, NIL_P(parent) ? NULL : RVAL2GTKTREEITER(parent), NUM2INT(position)); iter.user_data3 = model; ret = GTKTREEITER2RVAL(&iter); G_CHILD_ADD(self, ret); #endif } return ret; }