VALUE rb_yield_values(int n, ...) { if (n == 0) { return rb_yield_0(0, 0); } else { int i; VALUE *argv; va_list args; argv = ALLOCA_N(VALUE, n); va_start(args, n); for (i=0; i<n; i++) { argv[i] = va_arg(args, VALUE); } va_end(args); return rb_yield_0(n, argv); } }
static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th) { HANDLE *targets = events; HANDLE intr; DWORD ret; thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n", events, count, timeout, th); if (th && (intr = th->native_thread_data.interrupt_event)) { w32_reset_event(intr); if (RUBY_VM_INTERRUPTED(th)) { w32_set_event(intr); } targets = ALLOCA_N(HANDLE, count + 1); memcpy(targets, events, sizeof(HANDLE) * count); targets[count++] = intr; thread_debug(" * handle: %p (count: %d, intr)\n", intr, count); } thread_debug(" WaitForMultipleObjects start (count: %d)\n", count); ret = WaitForMultipleObjects(count, targets, FALSE, timeout); thread_debug(" WaitForMultipleObjects end (ret: %lu)\n", ret); if (ret == WAIT_OBJECT_0 + count - 1 && th) { errno = EINTR; } if (ret == -1 && THREAD_DEBUG) { int i; DWORD dmy; for (i = 0; i < count; i++) { thread_debug(" * error handle %d - %s\n", i, GetHandleInformation(targets[i], &dmy) ? "OK" : "NG"); } } return ret; }
VALUE do_ruby(VALUE recv, ID id, int parc, ...) { int i; va_list varg; VALUE *parv; if(parc > 0) { parv = ALLOCA_N(VALUE, parc); va_start(varg, parc); for(i = 0; i < parc; i++) parv[i] = va_arg(varg, VALUE); va_end(varg); } else parv = 0; return do_rubyv(recv, id, parc, parv); }
VALUE rb_funcall(VALUE recv, ID mid, int n, ...) { VALUE *argv; va_list ar; va_init_list(ar, n); if (n > 0) { long i; argv = ALLOCA_N(VALUE, n); for (i = 0; i < n; i++) { argv[i] = va_arg(ar, VALUE); } va_end(ar); } else { argv = 0; } return rb_call(CLASS_OF(recv), recv, mid, n, argv, CALL_FCALL); }
static VALUE unixfdlist_initialize(int argc, VALUE *argv, VALUE self) { VALUE rbfds; gint i, n_fds; gint *fds; rb_scan_args(argc, argv, "0*", &rbfds); n_fds = RARRAY_LEN(rbfds); if (n_fds == 0) { G_INITIALIZE(self, g_unix_fd_list_new()); return Qnil; } fds = ALLOCA_N(gint, n_fds); for (i = 0; i < n_fds; i++) fds[0] = RVAL2FD(RARRAY_PTR(rbfds)[i]); G_INITIALIZE(self, g_unix_fd_list_new_from_array(fds, n_fds)); return Qnil; }
/* * Should return value * * e.g. * filter.set_modify_func(String) do |model, iter, column| * "foo" * end */ static VALUE rg_set_modify_func(int argc, VALUE *argv, VALUE self) { VALUE func = rb_block_proc(); gint i; GType* types; if (argc == 0) rb_raise(rb_eArgError, "need more than 1 class type."); types = ALLOCA_N(GType, argc); G_RELATIVE(self, func); for (i = 0; i < argc; i++){ types[i] = CLASS2GTYPE(argv[i]); } gtk_tree_model_filter_set_modify_func(_SELF(self), argc, types, (GtkTreeModelFilterModifyFunc)modify_func, (gpointer)func, NULL); return self; }
static VALUE oleparam_ole_param_from_index(VALUE self, ITypeInfo *pTypeInfo, UINT method_index, int param_index) { FUNCDESC *pFuncDesc; HRESULT hr; BSTR *bstrs; UINT len; struct oleparamdata *pparam; hr = pTypeInfo->lpVtbl->GetFuncDesc(pTypeInfo, method_index, &pFuncDesc); if (FAILED(hr)) ole_raise(hr, rb_eRuntimeError, "fail to ITypeInfo::GetFuncDesc"); len = 0; bstrs = ALLOCA_N(BSTR, pFuncDesc->cParams + 1); hr = pTypeInfo->lpVtbl->GetNames(pTypeInfo, pFuncDesc->memid, bstrs, pFuncDesc->cParams + 1, &len); if (FAILED(hr)) { pTypeInfo->lpVtbl->ReleaseFuncDesc(pTypeInfo, pFuncDesc); ole_raise(hr, rb_eRuntimeError, "fail to ITypeInfo::GetNames"); } SysFreeString(bstrs[0]); if (param_index < 1 || len <= (UINT)param_index) { pTypeInfo->lpVtbl->ReleaseFuncDesc(pTypeInfo, pFuncDesc); rb_raise(rb_eIndexError, "index of param must be in 1..%d", len); } TypedData_Get_Struct(self, struct oleparamdata, &oleparam_datatype, pparam); pparam->pTypeInfo = pTypeInfo; OLE_ADDREF(pTypeInfo); pparam->method_index = method_index; pparam->index = param_index - 1; rb_ivar_set(self, rb_intern("name"), WC2VSTR(bstrs[param_index])); pTypeInfo->lpVtbl->ReleaseFuncDesc(pTypeInfo, pFuncDesc); return self; }
/* * call-seq: * column.sources = Groonga::Columnの配列 * * インデックス対象となる複数のカラムを配列で設定する。 */ static VALUE rb_grn_index_column_set_sources (VALUE self, VALUE rb_sources) { VALUE exception; grn_ctx *context = NULL; grn_obj *column; int i, n; VALUE *rb_source_values; grn_id *sources; grn_rc rc; rb_grn_index_column_deconstruct(SELF(self), &column, &context, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); n = RARRAY_LEN(rb_sources); rb_source_values = RARRAY_PTR(rb_sources); sources = ALLOCA_N(grn_id, n); for (i = 0; i < n; i++) { sources[i] = resolve_source_id(context, column, rb_source_values[i]); } { grn_obj bulk_sources; GRN_OBJ_INIT(&bulk_sources, GRN_BULK, 0, GRN_ID_NIL); GRN_TEXT_SET(context, &bulk_sources, sources, n * sizeof(grn_id)); rc = grn_obj_set_info(context, column, GRN_INFO_SOURCE, &bulk_sources); exception = rb_grn_context_to_exception(context, self); grn_obj_unlink(context, &bulk_sources); } if (!NIL_P(exception)) rb_exc_raise(exception); rb_grn_rc_check(rc, self); return Qnil; }
VALUE rb_funcall(VALUE recv, ID mid, int n, ...) { VALUE *argv; va_list ar; if (n > 0) { long i; va_start(ar, n); argv = ALLOCA_N(VALUE, n); for (i = 0; i < n; i++) { argv[i] = va_arg(ar, VALUE); } va_end(ar); } else { argv = 0; } return rb_call(recv, mid, n, argv, CALL_FCALL, false); }
VALUE method(VALUE recv, ID id, int n, ...) { VALUE *argv = 0; if (n > 0) { argv = ALLOCA_N(VALUE, n); va_list ar; va_start(ar, n); int i; for(i=0; i<n ;i++) { argv[i] = va_arg(ar, VALUE); } va_end(ar); } Arguments arg; arg.recv = recv; arg.id = id; arg.n = n; arg.argv = argv; int error = 0; VALUE result = rb_protect(method_wrap, reinterpret_cast<VALUE>(&arg), &error); if(error) { Exception e; e.backtrace(); throw e; } return result; }
VALUE rb_protect_funcall (VALUE recv, ID mid, int *state, int argc, ...) { va_list ap; VALUE *argv; struct protect_call_arg arg; if (argc > 0) { int i; argv = ALLOCA_N(VALUE, argc); va_start(ap, argc); for (i = 0; i < argc; i++) argv[i] = va_arg(ap, VALUE); va_end(ap); } else argv = 0; arg.recv = recv; arg.mid = mid; arg.argc = argc; arg.argv = argv; return rb_protect(protect_funcall0, (VALUE) &arg, state); }
VALUE cb_proc_call(struct cb_bucket_st *bucket, VALUE recv, int argc, ...) { VALUE *argv; va_list ar; int arity; int ii; struct proc_params_st params; arity = FIX2INT(rb_funcall(recv, cb_id_arity, 0)); if (arity < 0) { arity = argc; } if (arity > 0) { va_init_list(ar, argc); argv = ALLOCA_N(VALUE, argc); for (ii = 0; ii < arity; ++ii) { if (ii < argc) { argv[ii] = va_arg(ar, VALUE); } else { argv[ii] = Qnil; } } va_end(ar); } else { argv = NULL; } params.bucket = bucket; params.recv = recv; params.mid = cb_id_call; params.argc = arity; params.argv = argv; return rb_rescue2(do_func_call, (VALUE)¶ms, func_call_failed, (VALUE)¶ms, rb_eException, (VALUE)0); }
static VALUE gdkregion_initialize(int argc, VALUE *argv, VALUE self) { VALUE points_or_rectangle, fill_rule; GdkRegion* region; GdkPoint *gpoints; int i; rb_scan_args(argc, argv, "02", &points_or_rectangle, &fill_rule); if (NIL_P(points_or_rectangle)){ region = gdk_region_new(); } else if (TYPE(points_or_rectangle) == T_ARRAY){ gpoints = ALLOCA_N(GdkPoint, RARRAY_LEN(points_or_rectangle)); for (i = 0; i < RARRAY_LEN(points_or_rectangle); i++) { Check_Type(RARRAY_PTR(points_or_rectangle)[i], T_ARRAY); if (RARRAY_LEN(RARRAY_PTR(points_or_rectangle)[i]) < 2) { rb_raise(rb_eArgError, "point %d should be array of size 2", i); } gpoints[i].x = NUM2INT(RARRAY_PTR(RARRAY_PTR(points_or_rectangle)[i])[0]); gpoints[i].y = NUM2INT(RARRAY_PTR(RARRAY_PTR(points_or_rectangle)[i])[1]); } region = gdk_region_polygon(gpoints, RARRAY_LEN(points_or_rectangle), RVAL2GENUM(fill_rule, GDK_TYPE_FILL_RULE)); } else if (RVAL2GTYPE(points_or_rectangle) == GDK_TYPE_RECTANGLE){ region = gdk_region_rectangle((GdkRectangle*)RVAL2BOXED(points_or_rectangle, GDK_TYPE_RECTANGLE)); } else { rb_raise(rb_eArgError, "invalid argument %s (expect array of Gdk::Point or Gdk::Rectangle, nil)", rb_class2name(CLASS_OF(points_or_rectangle))); } G_INITIALIZE(self, region); return Qnil; }
static VALUE guess4r_f__guess_open(int argc, VALUE *argv) { VALUE *args, opt, f, str, enc; int i; args = ALLOCA_N(VALUE, argc + 1); args[0] = argv[0]; if (TYPE(argv[argc - 1]) != T_HASH) { args[1] = args[argc++] = rb_hash_new(); } else { args[1] = argv[argc - 1]; } rb_hash_aset(args[1], ID2SYM(rb_intern("encoding")), rb_const_get(rb_cEncoding, rb_intern("ASCII_8BIT"))); rb_funcall(args[1], rb_intern("delete"), 1, ID2SYM(rb_intern("external_encoding"))); rb_funcall(args[1], rb_intern("delete"), 1, ID2SYM(rb_intern("internal_encoding"))); f = rb_funcall2(rb_mKernel, rb_intern("open"), 2, args); str = rb_funcall(f, rb_intern("read"), 0); enc = guess4r_str__guess_encoding(str); rb_funcall(f, rb_intern("close"), 0); rb_hash_aset(args[1], ID2SYM(rb_intern("encoding")), enc); MEMCPY(args, argv, VALUE, argc - 1); return rb_funcall_passing_block(rb_mKernel, rb_intern("open"), argc, args); }
static VALUE nstruct_add_type(VALUE type, int argc, VALUE *argv, VALUE nst) { VALUE ofs, size; ID id; int i; VALUE name=Qnil; size_t *shape=NULL; int ndim=0; ssize_t stride; narray_view_t *nt; int j; for (i=0; i<argc; i++) { switch(TYPE(argv[i])) { case T_STRING: case T_SYMBOL: if (NIL_P(name)) { name = argv[i]; break; } rb_raise(rb_eArgError,"multiple name in struct definition"); case T_ARRAY: if (shape) { rb_raise(rb_eArgError,"multiple shape in struct definition"); } ndim = RARRAY_LEN(argv[i]); if (ndim > NA_MAX_DIMENSION) { rb_raise(rb_eArgError,"too large number of dimensions"); } if (ndim == 0) { rb_raise(rb_eArgError,"array is empty"); } shape = ALLOCA_N(size_t, ndim); na_array_to_internal_shape(Qnil, argv[i], shape); break; } } id = rb_to_id(name); name = ID2SYM(id); if (rb_obj_is_kind_of(type,cNArray)) { narray_t *na; GetNArray(type,na); type = CLASS_OF(type); ndim = na->ndim; shape = na->shape; } type = rb_narray_view_new(type,ndim,shape); GetNArrayView(type,nt); nt->stridx = ALLOC_N(stridx_t,ndim); stride = na_dtype_elmsz(CLASS_OF(type)); for (j=ndim; j--; ) { SDX_SET_STRIDE(nt->stridx[j], stride); stride *= shape[j]; } ofs = rb_iv_get(nst, "__offset__"); nt->offset = NUM2SIZET(ofs); size = rb_funcall(type, rb_intern("byte_size"), 0); rb_iv_set(nst, "__offset__", rb_funcall(ofs,'+',1,size)); rb_ary_push(rb_iv_get(nst,"__members__"), rb_ary_new3(4,name,type,ofs,size)); // <- field definition return Qnil; }
static int setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, struct rb_calling_info *const calling, const struct rb_call_info *ci, VALUE * const locals, const enum arg_setup_type arg_setup_type) { const int min_argc = iseq->body->param.lead_num + iseq->body->param.post_num; const int max_argc = (iseq->body->param.flags.has_rest == FALSE) ? min_argc + iseq->body->param.opt_num : UNLIMITED_ARGUMENTS; int opt_pc = 0; int given_argc; struct args_info args_body, *args; VALUE keyword_hash = Qnil; VALUE * const orig_sp = th->cfp->sp; unsigned int i; /* * Extend SP for GC. * * [pushed values] [uninitialized values] * <- ci->argc --> * <- iseq->body->param.size------------> * ^ locals ^ sp * * => * [pushed values] [initialized values ] * <- ci->argc --> * <- iseq->body->param.size------------> * ^ locals ^ sp */ for (i=calling->argc; i<iseq->body->param.size; i++) { locals[i] = Qnil; } th->cfp->sp = &locals[i]; /* setup args */ args = &args_body; given_argc = args->argc = calling->argc; args->argv = locals; if (ci->flag & VM_CALL_KWARG) { args->kw_arg = ((struct rb_call_info_with_kwarg *)ci)->kw_arg; if (iseq->body->param.flags.has_kw) { int kw_len = args->kw_arg->keyword_len; /* copy kw_argv */ args->kw_argv = ALLOCA_N(VALUE, kw_len); args->argc -= kw_len; given_argc -= kw_len; MEMCPY(args->kw_argv, locals + args->argc, VALUE, kw_len); } else { args->kw_argv = NULL; given_argc = args_kw_argv_to_hash(args); } } else { args->kw_arg = NULL; args->kw_argv = NULL; } if (ci->flag & VM_CALL_ARGS_SPLAT) { args->rest = locals[--args->argc]; args->rest_index = 0; given_argc += RARRAY_LENINT(args->rest) - 1; } else { args->rest = Qfalse; } switch (arg_setup_type) { case arg_setup_method: break; /* do nothing special */ case arg_setup_block: if (given_argc == 1 && (min_argc > 0 || iseq->body->param.opt_num > 1 || iseq->body->param.flags.has_kw || iseq->body->param.flags.has_kwrest) && !iseq->body->param.flags.ambiguous_param0 && args_check_block_arg0(args, th)) { given_argc = RARRAY_LENINT(args->rest); } break; case arg_setup_lambda: if (given_argc == 1 && given_argc != iseq->body->param.lead_num && !iseq->body->param.flags.has_rest && args_check_block_arg0(args, th)) { given_argc = RARRAY_LENINT(args->rest); } } /* argc check */ if (given_argc < min_argc) { if (given_argc == min_argc - 1 && args->kw_argv) { args_stored_kw_argv_to_hash(args); given_argc = args_argc(args); } else { if (arg_setup_type == arg_setup_block) { CHECK_VM_STACK_OVERFLOW(th->cfp, min_argc); given_argc = min_argc; args_extend(args, min_argc); } else { argument_arity_error(th, iseq, given_argc, min_argc, max_argc); } } } if (given_argc > min_argc && (iseq->body->param.flags.has_kw || iseq->body->param.flags.has_kwrest) && args->kw_argv == NULL) { if (args_pop_keyword_hash(args, &keyword_hash, th)) { given_argc--; } } if (given_argc > max_argc && max_argc != UNLIMITED_ARGUMENTS) { if (arg_setup_type == arg_setup_block) { /* truncate */ args_reduce(args, given_argc - max_argc); given_argc = max_argc; } else { argument_arity_error(th, iseq, given_argc, min_argc, max_argc); } } if (iseq->body->param.flags.has_lead) { args_setup_lead_parameters(args, iseq->body->param.lead_num, locals + 0); } if (iseq->body->param.flags.has_post) { args_setup_post_parameters(args, iseq->body->param.post_num, locals + iseq->body->param.post_start); } if (iseq->body->param.flags.has_opt) { int opt = args_setup_opt_parameters(args, iseq->body->param.opt_num, locals + iseq->body->param.lead_num); opt_pc = (int)iseq->body->param.opt_table[opt]; } if (iseq->body->param.flags.has_rest) { args_setup_rest_parameter(args, locals + iseq->body->param.rest_start); } if (iseq->body->param.flags.has_kw) { VALUE * const klocals = locals + iseq->body->param.keyword->bits_start - iseq->body->param.keyword->num; if (args->kw_argv != NULL) { const struct rb_call_info_kw_arg *kw_arg = args->kw_arg; args_setup_kw_parameters(args->kw_argv, kw_arg->keyword_len, kw_arg->keywords, iseq, klocals); } else if (!NIL_P(keyword_hash)) { int kw_len = rb_long2int(RHASH_SIZE(keyword_hash)); struct fill_values_arg arg; /* copy kw_argv */ arg.keys = args->kw_argv = ALLOCA_N(VALUE, kw_len * 2); arg.vals = arg.keys + kw_len; arg.argc = 0; rb_hash_foreach(keyword_hash, fill_keys_values, (VALUE)&arg); VM_ASSERT(arg.argc == kw_len); args_setup_kw_parameters(arg.vals, kw_len, arg.keys, iseq, klocals); } else { VM_ASSERT(args_argc(args) == 0); args_setup_kw_parameters(NULL, 0, NULL, iseq, klocals); } } else if (iseq->body->param.flags.has_kwrest) { args_setup_kw_rest_parameter(keyword_hash, locals + iseq->body->param.keyword->rest_start); } if (iseq->body->param.flags.has_block) { args_setup_block_parameter(th, calling, locals + iseq->body->param.block_start); } #if 0 { int i; for (i=0; i<iseq->body->param.size; i++) { fprintf(stderr, "local[%d] = %p\n", i, (void *)locals[i]); } } #endif th->cfp->sp = orig_sp; return opt_pc; }
static void callback_invoke(ffi_cif* cif, void* retval, void** parameters, void* user_data) { Closure* closure = (Closure *) user_data; Function* fn = (Function *) closure->info; FunctionType *cbInfo = fn->info; VALUE* rbParams; VALUE rbReturnValue; int i; rbParams = ALLOCA_N(VALUE, cbInfo->parameterCount); for (i = 0; i < cbInfo->parameterCount; ++i) { VALUE param; switch (cbInfo->parameterTypes[i]->nativeType) { case NATIVE_INT8: param = INT2NUM(*(int8_t *) parameters[i]); break; case NATIVE_UINT8: param = UINT2NUM(*(uint8_t *) parameters[i]); break; case NATIVE_INT16: param = INT2NUM(*(int16_t *) parameters[i]); break; case NATIVE_UINT16: param = UINT2NUM(*(uint16_t *) parameters[i]); break; case NATIVE_INT32: param = INT2NUM(*(int32_t *) parameters[i]); break; case NATIVE_UINT32: param = UINT2NUM(*(uint32_t *) parameters[i]); break; case NATIVE_INT64: param = LL2NUM(*(int64_t *) parameters[i]); break; case NATIVE_UINT64: param = ULL2NUM(*(uint64_t *) parameters[i]); break; case NATIVE_LONG: param = LONG2NUM(*(long *) parameters[i]); break; case NATIVE_ULONG: param = ULONG2NUM(*(unsigned long *) parameters[i]); break; case NATIVE_FLOAT32: param = rb_float_new(*(float *) parameters[i]); break; case NATIVE_FLOAT64: param = rb_float_new(*(double *) parameters[i]); break; case NATIVE_STRING: param = (*(void **) parameters[i] != NULL) ? rb_tainted_str_new2(*(char **) parameters[i]) : Qnil; break; case NATIVE_POINTER: param = rbffi_Pointer_NewInstance(*(void **) parameters[i]); break; case NATIVE_BOOL: param = (*(uint8_t *) parameters[i]) ? Qtrue : Qfalse; break; case NATIVE_FUNCTION: case NATIVE_CALLBACK: param = rbffi_NativeValue_ToRuby(cbInfo->parameterTypes[i], rb_ary_entry(cbInfo->rbParameterTypes, i), parameters[i], Qnil); break; default: param = Qnil; break; } rbParams[i] = param; } rbReturnValue = rb_funcall2(fn->rbProc, id_call, cbInfo->parameterCount, rbParams); if (rbReturnValue == Qnil || TYPE(rbReturnValue) == T_NIL) { memset(retval, 0, cbInfo->ffiReturnType->size); } else switch (cbInfo->returnType->nativeType) { case NATIVE_INT8: case NATIVE_INT16: case NATIVE_INT32: *((ffi_sarg *) retval) = NUM2INT(rbReturnValue); break; case NATIVE_UINT8: case NATIVE_UINT16: case NATIVE_UINT32: *((ffi_arg *) retval) = NUM2UINT(rbReturnValue); break; case NATIVE_INT64: *((int64_t *) retval) = NUM2LL(rbReturnValue); break; case NATIVE_UINT64: *((uint64_t *) retval) = NUM2ULL(rbReturnValue); break; case NATIVE_LONG: *((ffi_sarg *) retval) = NUM2LONG(rbReturnValue); break; case NATIVE_ULONG: *((ffi_arg *) retval) = NUM2ULONG(rbReturnValue); break; case NATIVE_FLOAT32: *((float *) retval) = (float) NUM2DBL(rbReturnValue); break; case NATIVE_FLOAT64: *((double *) retval) = NUM2DBL(rbReturnValue); break; case NATIVE_POINTER: if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_PointerClass)) { *((void **) retval) = ((AbstractMemory *) DATA_PTR(rbReturnValue))->address; } else { // Default to returning NULL if not a value pointer object. handles nil case as well *((void **) retval) = NULL; } break; case NATIVE_BOOL: *((ffi_arg *) retval) = rbReturnValue == Qtrue; break; case NATIVE_FUNCTION: case NATIVE_CALLBACK: if (TYPE(rbReturnValue) == T_DATA && rb_obj_is_kind_of(rbReturnValue, rbffi_PointerClass)) { *((void **) retval) = ((AbstractMemory *) DATA_PTR(rbReturnValue))->address; } else if (rb_obj_is_kind_of(rbReturnValue, rb_cProc) || rb_respond_to(rbReturnValue, id_call)) { VALUE function; function = rbffi_Function_ForProc(cbInfo->rbReturnType, rbReturnValue); *((void **) retval) = ((AbstractMemory *) DATA_PTR(function))->address; } else { *((void **) retval) = NULL; } break; default: *((ffi_arg *) retval) = 0; break; } }
static VALUE rg_insert(int argc, VALUE *argv, VALUE self) { VALUE parent, position, values, ret; GtkTreeIter iter; GtkTreeStore* model = _SELF(self); rb_scan_args(argc, argv, "21", &parent, &position, &values); if (NIL_P(values)){ gtk_tree_store_insert(model, &iter, NIL_P(parent) ? NULL : RVAL2GTKTREEITER(parent), NUM2INT(position)); iter.user_data3 = model; ret = GTKTREEITER2RVAL(&iter); G_CHILD_ADD(self, ret); } else { #if GTK_CHECK_VERSION(2,10,0) gint *c_columns; GValue *c_values; long size, i; size = NUM2INT(rb_funcall(values, rb_intern("size"), 0)); c_columns = ALLOCA_N(gint, size); c_values = ALLOCA_N(GValue, size); if(TYPE(values)==T_ARRAY) { for(i=0; i<size; i++) { GType gtype; GValue gval = G_VALUE_INIT; c_columns[i] = i; gtype = gtk_tree_model_get_column_type(GTK_TREE_MODEL(RVAL2GOBJ(self)), c_columns[i]); g_value_init(&gval, gtype); rbgobj_rvalue_to_gvalue(rb_ary_shift(values), &gval); c_values[i] = gval; } } else if(TYPE(values)==T_HASH) { VALUE r_columns; r_columns = rb_funcall(values, rb_intern("keys"), 0); for(i=0; i<size; i++) { GType gtype; GValue gval = G_VALUE_INIT; c_columns[i] = NUM2INT (rb_ary_entry(r_columns, i)); gtype = gtk_tree_model_get_column_type(GTK_TREE_MODEL(RVAL2GOBJ(self)), c_columns[i]); g_value_init(&gval, gtype); rbgobj_rvalue_to_gvalue(rb_hash_aref(values, INT2NUM(c_columns[i])), &gval); c_values[i] = gval; } } else { rb_raise(rb_eArgError, "values must be of type Hash or Array"); } gtk_tree_store_insert_with_valuesv(model, &iter, NIL_P(parent) ? NULL : RVAL2GTKTREEITER(parent), NUM2INT(position), c_columns, c_values, size); iter.user_data3 = model; ret = GTKTREEITER2RVAL(&iter); G_CHILD_ADD(self, ret); for(i=0; i<size; i++) { G_CHILD_ADD(ret, rbgobj_gvalue_to_rvalue(&(c_values[i]))); g_value_unset(&(c_values[i])); } #else rb_warn("Gtk::TreeStore#insert(parent, position, values) requires GTK+-2.10.0 or later"); gtk_tree_store_insert(model, &iter, NIL_P(parent) ? NULL : RVAL2GTKTREEITER(parent), NUM2INT(position)); iter.user_data3 = model; ret = GTKTREEITER2RVAL(&iter); G_CHILD_ADD(self, ret); #endif } return ret; }
static VALUE variadic_invoke(VALUE self, VALUE parameterTypes, VALUE parameterValues) { VariadicInvoker* invoker; FFIStorage* params; void* retval; ffi_cif cif; void** ffiValues; ffi_type** ffiParamTypes; ffi_type* ffiReturnType; Type** paramTypes; VALUE* argv; int paramCount = 0, fixedCount = 0, i; ffi_status ffiStatus; rbffi_frame_t frame = { 0 }; Check_Type(parameterTypes, T_ARRAY); Check_Type(parameterValues, T_ARRAY); Data_Get_Struct(self, VariadicInvoker, invoker); paramCount = (int) RARRAY_LEN(parameterTypes); paramTypes = ALLOCA_N(Type *, paramCount); ffiParamTypes = ALLOCA_N(ffi_type *, paramCount); params = ALLOCA_N(FFIStorage, paramCount); ffiValues = ALLOCA_N(void*, paramCount); argv = ALLOCA_N(VALUE, paramCount); retval = alloca(MAX(invoker->returnType->ffiType->size, FFI_SIZEOF_ARG)); for (i = 0; i < paramCount; ++i) { VALUE rbType = rb_ary_entry(parameterTypes, i); if (!rb_obj_is_kind_of(rbType, rbffi_TypeClass)) { rb_raise(rb_eTypeError, "wrong type. Expected (FFI::Type)"); } Data_Get_Struct(rbType, Type, paramTypes[i]); switch (paramTypes[i]->nativeType) { case NATIVE_INT8: case NATIVE_INT16: case NATIVE_INT32: rbType = rb_const_get(rbffi_TypeClass, rb_intern("INT32")); Data_Get_Struct(rbType, Type, paramTypes[i]); break; case NATIVE_UINT8: case NATIVE_UINT16: case NATIVE_UINT32: rbType = rb_const_get(rbffi_TypeClass, rb_intern("UINT32")); Data_Get_Struct(rbType, Type, paramTypes[i]); break; case NATIVE_FLOAT32: rbType = rb_const_get(rbffi_TypeClass, rb_intern("DOUBLE")); Data_Get_Struct(rbType, Type, paramTypes[i]); break; default: break; } ffiParamTypes[i] = paramTypes[i]->ffiType; if (ffiParamTypes[i] == NULL) { rb_raise(rb_eArgError, "Invalid parameter type #%x", paramTypes[i]->nativeType); } argv[i] = rb_ary_entry(parameterValues, i); } ffiReturnType = invoker->returnType->ffiType; if (ffiReturnType == NULL) { rb_raise(rb_eArgError, "Invalid return type"); } /*Get the number of fixed args from @fixed array*/ fixedCount = RARRAY_LEN(rb_iv_get(self, "@fixed")); #ifdef HAVE_FFI_PREP_CIF_VAR ffiStatus = ffi_prep_cif_var(&cif, invoker->abi, fixedCount, paramCount, ffiReturnType, ffiParamTypes); #else ffiStatus = ffi_prep_cif(&cif, invoker->abi, paramCount, ffiReturnType, ffiParamTypes); #endif switch (ffiStatus) { case FFI_BAD_ABI: rb_raise(rb_eArgError, "Invalid ABI specified"); case FFI_BAD_TYPEDEF: rb_raise(rb_eArgError, "Invalid argument type specified"); case FFI_OK: break; default: rb_raise(rb_eArgError, "Unknown FFI error"); } rbffi_SetupCallParams(paramCount, argv, -1, paramTypes, params, ffiValues, NULL, 0, invoker->rbEnums); rbffi_frame_push(&frame); ffi_call(&cif, FFI_FN(invoker->function), retval, ffiValues); rbffi_frame_pop(&frame); rbffi_save_errno(); if (RTEST(frame.exc) && frame.exc != Qnil) { rb_exc_raise(frame.exc); } return rbffi_NativeValue_ToRuby(invoker->returnType, invoker->rbReturnType, retval); }
static inline VALUE vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, int num, const rb_block_t *blockptr, VALUE flag, ID id, const rb_method_entry_t *me, VALUE recv) { VALUE val; start_method_dispatch: if (me != 0) { if ((me->flag == 0)) { normal_method_dispatch: switch (me->def->type) { case VM_METHOD_TYPE_ISEQ:{ vm_setup_method(th, cfp, recv, num, blockptr, flag, me); return Qundef; } case VM_METHOD_TYPE_NOTIMPLEMENTED: case VM_METHOD_TYPE_CFUNC:{ val = vm_call_cfunc(th, cfp, num, recv, blockptr, me); break; } case VM_METHOD_TYPE_ATTRSET:{ if (num != 1) { rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", num); } val = rb_ivar_set(recv, me->def->body.attr.id, *(cfp->sp - 1)); cfp->sp -= 2; break; } case VM_METHOD_TYPE_IVAR:{ if (num != 0) { rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", num); } val = rb_attr_get(recv, me->def->body.attr.id); cfp->sp -= 1; break; } case VM_METHOD_TYPE_MISSING:{ VALUE *argv = ALLOCA_N(VALUE, num+1); argv[0] = ID2SYM(me->def->original_id); MEMCPY(argv+1, cfp->sp - num, VALUE, num); cfp->sp += - num - 1; th->passed_block = blockptr; val = rb_funcall2(recv, rb_intern("method_missing"), num+1, argv); break; } case VM_METHOD_TYPE_BMETHOD:{ VALUE *argv = ALLOCA_N(VALUE, num); MEMCPY(argv, cfp->sp - num, VALUE, num); cfp->sp += - num - 1; val = vm_call_bmethod(th, recv, num, argv, blockptr, me); break; } case VM_METHOD_TYPE_ZSUPER:{ VALUE klass = RCLASS_SUPER(me->klass); me = rb_method_entry(klass, id); if (me != 0) { goto normal_method_dispatch; } else { goto start_method_dispatch; } } case VM_METHOD_TYPE_OPTIMIZED:{ switch (me->def->body.optimize_type) { case OPTIMIZED_METHOD_TYPE_SEND: { rb_control_frame_t *reg_cfp = cfp; rb_num_t i = num - 1; VALUE sym; if (num == 0) { rb_raise(rb_eArgError, "no method name given"); } sym = TOPN(i); id = SYMBOL_P(sym) ? SYM2ID(sym) : rb_to_id(sym); /* shift arguments */ if (i > 0) { MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i); } me = rb_method_entry(CLASS_OF(recv), id); num -= 1; DEC_SP(1); flag |= VM_CALL_FCALL_BIT | VM_CALL_OPT_SEND_BIT; goto start_method_dispatch; } case OPTIMIZED_METHOD_TYPE_CALL: { rb_proc_t *proc; int argc = num; VALUE *argv = ALLOCA_N(VALUE, num); GetProcPtr(recv, proc); MEMCPY(argv, cfp->sp - num, VALUE, num); cfp->sp -= num + 1; val = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, blockptr); break; } default: rb_bug("eval_invoke_method: unsupported optimized method type (%d)", me->def->body.optimize_type); } break; } default:{ rb_bug("eval_invoke_method: unsupported method type (%d)", me->def->type); break; } } } else { int noex_safe; if (!(flag & VM_CALL_FCALL_BIT) && (me->flag & NOEX_MASK) & NOEX_PRIVATE) { int stat = NOEX_PRIVATE; if (flag & VM_CALL_VCALL_BIT) { stat |= NOEX_VCALL; } val = vm_method_missing(th, id, recv, num, blockptr, stat); } else if (!(flag & VM_CALL_OPT_SEND_BIT) && (me->flag & NOEX_MASK) & NOEX_PROTECTED) { VALUE defined_class = me->klass; if (RB_TYPE_P(defined_class, T_ICLASS)) { defined_class = RBASIC(defined_class)->klass; } if (!rb_obj_is_kind_of(cfp->self, defined_class)) { val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED); } else { goto normal_method_dispatch; } } else if ((noex_safe = NOEX_SAFE(me->flag)) > th->safe_level && (noex_safe > 2)) { rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id)); } else { goto normal_method_dispatch; } } } else { /* method missing */ int stat = 0; if (flag & VM_CALL_VCALL_BIT) { stat |= NOEX_VCALL; } if (flag & VM_CALL_SUPER_BIT) { stat |= NOEX_SUPER; } if (id == idMethodMissing) { VALUE *argv = ALLOCA_N(VALUE, num); vm_method_missing_args(th, argv, num - 1, 0, stat); rb_raise_method_missing(th, num, argv, recv, stat); } else { val = vm_method_missing(th, id, recv, num, blockptr, stat); } } RUBY_VM_CHECK_INTS(); return val; }
static VALUE variadic_invoke(VALUE self, VALUE parameterTypes, VALUE parameterValues) { VariadicInvoker* invoker; FFIStorage* params; void* retval; ffi_cif cif; void** ffiValues; ffi_type** ffiParamTypes; ffi_type* ffiReturnType; Type** paramTypes; VALUE* argv; int paramCount = 0, fixedCount = 0, i; ffi_status ffiStatus; rbffi_frame_t frame = { 0 }; Check_Type(parameterTypes, T_ARRAY); Check_Type(parameterValues, T_ARRAY); Data_Get_Struct(self, VariadicInvoker, invoker); paramCount = (int) RARRAY_LEN(parameterTypes); paramTypes = ALLOCA_N(Type *, paramCount); ffiParamTypes = ALLOCA_N(ffi_type *, paramCount); params = ALLOCA_N(FFIStorage, paramCount); ffiValues = ALLOCA_N(void*, paramCount); argv = ALLOCA_N(VALUE, paramCount); retval = alloca(MAX(invoker->returnType->ffiType->size, FFI_SIZEOF_ARG)); for (i = 0; i < paramCount; ++i) { VALUE rbType = rb_ary_entry(parameterTypes, i); if (!rb_obj_is_kind_of(rbType, rbffi_TypeClass)) { rb_raise(rb_eTypeError, "wrong type. Expected (FFI::Type)"); } Data_Get_Struct(rbType, Type, paramTypes[i]); switch (paramTypes[i]->nativeType) { case NATIVE_INT8: case NATIVE_INT16: case NATIVE_INT32: rbType = rb_const_get(rbffi_TypeClass, rb_intern("INT32")); Data_Get_Struct(rbType, Type, paramTypes[i]); break; case NATIVE_UINT8: case NATIVE_UINT16: case NATIVE_UINT32: rbType = rb_const_get(rbffi_TypeClass, rb_intern("UINT32")); Data_Get_Struct(rbType, Type, paramTypes[i]); break; case NATIVE_FLOAT32: rbType = rb_const_get(rbffi_TypeClass, rb_intern("DOUBLE")); Data_Get_Struct(rbType, Type, paramTypes[i]); break; default: break; } ffiParamTypes[i] = paramTypes[i]->ffiType; if (ffiParamTypes[i] == NULL) { rb_raise(rb_eArgError, "Invalid parameter type #%x", paramTypes[i]->nativeType); } argv[i] = rb_ary_entry(parameterValues, i); } ffiReturnType = invoker->returnType->ffiType; if (ffiReturnType == NULL) { rb_raise(rb_eArgError, "Invalid return type"); } /*Get the number of fixed args from @fixed array*/ fixedCount = RARRAY_LEN(rb_iv_get(self, "@fixed")); #ifdef HAVE_FFI_PREP_CIF_VAR ffiStatus = ffi_prep_cif_var(&cif, invoker->abi, fixedCount, paramCount, ffiReturnType, ffiParamTypes); #else ffiStatus = ffi_prep_cif(&cif, invoker->abi, paramCount, ffiReturnType, ffiParamTypes); #endif switch (ffiStatus) { case FFI_BAD_ABI: rb_raise(rb_eArgError, "Invalid ABI specified"); case FFI_BAD_TYPEDEF: rb_raise(rb_eArgError, "Invalid argument type specified"); case FFI_OK: break; default: rb_raise(rb_eArgError, "Unknown FFI error"); } rbffi_SetupCallParams(paramCount, argv, -1, paramTypes, params, ffiValues, NULL, 0, invoker->rbEnums); rbffi_frame_push(&frame); #ifdef HAVE_RB_THREAD_CALL_WITHOUT_GVL /* In Call.c, blocking: true is supported on older ruby variants * without rb_thread_call_without_gvl by allocating on the heap instead * of the stack. Since this functionality is being added later, * we’re skipping support for old rubies here. */ if(unlikely(invoker->blocking)) { rbffi_blocking_call_t* bc; bc = ALLOCA_N(rbffi_blocking_call_t, 1); bc->retval = retval; bc->function = invoker->function; bc->ffiValues = ffiValues; bc->params = params; bc->frame = &frame; bc->cif = cif; rb_rescue2(rbffi_do_blocking_call, (VALUE) bc, rbffi_save_frame_exception, (VALUE) &frame, rb_eException, (VALUE) 0); } else { ffi_call(&cif, FFI_FN(invoker->function), retval, ffiValues); } #else ffi_call(&cif, FFI_FN(invoker->function), retval, ffiValues); #endif rbffi_frame_pop(&frame); rbffi_save_errno(); if (RTEST(frame.exc) && frame.exc != Qnil) { rb_exc_raise(frame.exc); } return rbffi_NativeValue_ToRuby(invoker->returnType, invoker->rbReturnType, retval); }
static inline VALUE vm_call_method(rb_thread_t * const th, rb_control_frame_t * const cfp, const int num, rb_block_t * const blockptr, const VALUE flag, const ID id, const NODE * mn, const VALUE recv) { VALUE val; start_method_dispatch: if (mn != 0) { if ((mn->nd_noex == 0)) { /* dispatch method */ NODE *node; normal_method_dispatch: node = mn->nd_body; switch (nd_type(node)) { case RUBY_VM_METHOD_NODE:{ vm_setup_method(th, cfp, num, blockptr, flag, (VALUE)node->nd_body, recv); return Qundef; } case NODE_CFUNC:{ val = vm_call_cfunc(th, cfp, num, id, (ID)mn->nd_file, recv, mn->nd_clss, flag, node, blockptr); break; } case NODE_ATTRSET:{ val = rb_ivar_set(recv, node->nd_vid, *(cfp->sp - 1)); cfp->sp -= 2; break; } case NODE_IVAR:{ if (num != 0) { rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", num); } val = rb_attr_get(recv, node->nd_vid); cfp->sp -= 1; break; } case NODE_BMETHOD:{ VALUE *argv = ALLOCA_N(VALUE, num); MEMCPY(argv, cfp->sp - num, VALUE, num); cfp->sp += - num - 1; val = vm_call_bmethod(th, (ID)mn->nd_file, node->nd_cval, recv, mn->nd_clss, num, argv, blockptr); break; } case NODE_ZSUPER:{ VALUE klass; klass = RCLASS_SUPER(mn->nd_clss); mn = rb_method_node(klass, id); if (mn != 0) { goto normal_method_dispatch; } else { goto start_method_dispatch; } } default:{ printf("node: %s\n", ruby_node_name(nd_type(node))); rb_bug("eval_invoke_method: unreachable"); /* unreachable */ break; } } } else { int noex_safe; if (!(flag & VM_CALL_FCALL_BIT) && (mn->nd_noex & NOEX_MASK) & NOEX_PRIVATE) { int stat = NOEX_PRIVATE; if (flag & VM_CALL_VCALL_BIT) { stat |= NOEX_VCALL; } val = vm_method_missing(th, id, recv, num, blockptr, stat); } else if (((mn->nd_noex & NOEX_MASK) & NOEX_PROTECTED) && !(flag & VM_CALL_SEND_BIT)) { VALUE defined_class = mn->nd_clss; if (TYPE(defined_class) == T_ICLASS) { defined_class = RBASIC(defined_class)->klass; } if (!rb_obj_is_kind_of(cfp->self, rb_class_real(defined_class))) { val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED); } else { goto normal_method_dispatch; } } else if ((noex_safe = NOEX_SAFE(mn->nd_noex)) > th->safe_level && (noex_safe > 2)) { rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id)); } else { goto normal_method_dispatch; } } } else { /* method missing */ if (id == idMethodMissing) { rb_bug("method missing"); } else { int stat = 0; if (flag & VM_CALL_VCALL_BIT) { stat |= NOEX_VCALL; } if (flag & VM_CALL_SUPER_BIT) { stat |= NOEX_SUPER; } val = vm_method_missing(th, id, recv, num, blockptr, stat); } } RUBY_VM_CHECK_INTS(); return val; }
int na_get_result_dimension(VALUE self, int argc, VALUE *argv, ssize_t stride, size_t *pos_idx) { int i, j; int count_new=0; int count_rest=0; ssize_t x, s, m, pos, *idx; narray_t *na; narray_view_t *nv; stridx_t sdx; VALUE a; GetNArray(self,na); if (na->size == 0) { rb_raise(nary_eShapeError, "cannot get element of empty array"); } idx = ALLOCA_N(ssize_t, argc); for (i=j=0; i<argc; i++) { a = argv[i]; switch(TYPE(a)) { case T_FIXNUM: idx[j++] = FIX2LONG(a); break; case T_BIGNUM: case T_FLOAT: idx[j++] = NUM2SSIZET(a); break; case T_FALSE: case T_SYMBOL: if (a==sym_rest || a==sym_tilde || a==Qfalse) { argv[i] = Qfalse; count_rest++; break; } else if (a==sym_new || a==sym_minus) { argv[i] = sym_new; count_new++; } } } if (j != argc) { return check_index_count(argc, na->ndim, count_new, count_rest); } switch(na->type) { case NARRAY_VIEW_T: GetNArrayView(self,nv); pos = nv->offset; if (j == na->ndim) { for (i=j-1; i>=0; i--) { x = na_range_check(idx[i], na->shape[i], i); sdx = nv->stridx[i]; if (SDX_IS_INDEX(sdx)) { pos += SDX_GET_INDEX(sdx)[x]; } else { pos += SDX_GET_STRIDE(sdx)*x; } } *pos_idx = pos; return 0; } if (j == 1) { x = na_range_check(idx[0], na->size, 0); for (i=na->ndim-1; i>=0; i--) { s = na->shape[i]; m = x % s; x = x / s; sdx = nv->stridx[i]; if (SDX_IS_INDEX(sdx)) { pos += SDX_GET_INDEX(sdx)[m]; } else { pos += SDX_GET_STRIDE(sdx)*m; } } *pos_idx = pos; return 0; } break; default: if (!stride) { stride = nary_element_stride(self); } if (j == 1) { x = na_range_check(idx[0], na->size, 0); *pos_idx = stride * x; return 0; } if (j == na->ndim) { pos = 0; for (i=j-1; i>=0; i--) { x = na_range_check(idx[i], na->shape[i], i); pos += stride * x; stride *= na->shape[i]; } *pos_idx = pos; return 0; } } rb_raise(rb_eIndexError,"# of index(=%i) should be " "equal to ndim(=%i) or 1", argc,na->ndim); return -1; }
VALUE na_make_view_struct(VALUE self, VALUE dtype, VALUE offset) { size_t i, n; int j, k, ndim; size_t *shape; size_t *idx1, *idx2; ssize_t stride; stridx_t *stridx; narray_t *na, *nt; narray_view_t *na1, *na2; VALUE klass; volatile VALUE view; GetNArray(self,na); // build from Numo::Struct if (rb_obj_is_kind_of(dtype,cNArray)) { GetNArray(dtype,nt); ndim = na->ndim + nt->ndim; shape = ALLOCA_N(size_t,ndim); // struct dimensions for (j=0; j<na->ndim; j++) { shape[j] = na->shape[j]; } // member dimension for (j=na->ndim,k=0; j<ndim; j++,k++) { shape[j] = nt->shape[k]; } klass = CLASS_OF(dtype); stridx = ALLOC_N(stridx_t, ndim); stride = na_dtype_elmsz(klass); for (j=ndim,k=nt->ndim; k; ) { SDX_SET_STRIDE(stridx[--j],stride); stride *= nt->shape[--k]; } } else { ndim = na->ndim; shape = ALLOCA_N(size_t,ndim); for (j=0; j<ndim; j++) { shape[j] = na->shape[j]; } klass = CLASS_OF(self); if (TYPE(dtype)==T_CLASS) { if (RTEST(rb_class_inherited_p(dtype,cNArray))) { klass = dtype; } } stridx = ALLOC_N(stridx_t, ndim); } view = na_s_allocate_view(klass); na_copy_flags(self, view); GetNArrayView(view, na2); na_setup_shape((narray_t*)na2, ndim, shape); na2->stridx = stridx; switch(na->type) { case NARRAY_DATA_T: case NARRAY_FILEMAP_T: stride = na_get_elmsz(self); for (j=na->ndim; j--;) { SDX_SET_STRIDE(na2->stridx[j], stride); stride *= na->shape[j]; } na2->offset = 0; na2->data = self; break; case NARRAY_VIEW_T: GetNArrayView(self, na1); for (j=na1->base.ndim; j--; ) { if (SDX_IS_INDEX(na1->stridx[j])) { n = na1->base.shape[j]; idx1 = SDX_GET_INDEX(na1->stridx[j]); idx2 = ALLOC_N(size_t, na1->base.shape[j]); for (i=0; i<n; i++) { idx2[i] = idx1[i]; } SDX_SET_INDEX(na2->stridx[j],idx2); } else { na2->stridx[j] = na1->stridx[j]; } } na2->offset = na1->offset; na2->data = na1->data; break; } if (RTEST(offset)) { na2->offset += NUM2SIZET(offset); } return view; }
static inline int vm_callee_setup_arg_complex(rb_thread_t *th, const rb_iseq_t * iseq, int orig_argc, VALUE * orig_argv, const rb_block_t **block) { const int m = iseq->argc; int argc = orig_argc; VALUE *argv = orig_argv; rb_num_t opt_pc = 0; th->mark_stack_len = argc + iseq->arg_size; /* mandatory */ if (argc < (m + iseq->arg_post_len)) { /* check with post arg */ argument_error(iseq, argc, m + iseq->arg_post_len); } argv += m; argc -= m; /* post arguments */ if (iseq->arg_post_len) { if (!(orig_argc < iseq->arg_post_start)) { VALUE *new_argv = ALLOCA_N(VALUE, argc); MEMCPY(new_argv, argv, VALUE, argc); argv = new_argv; } MEMCPY(&orig_argv[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len], VALUE, iseq->arg_post_len); } /* opt arguments */ if (iseq->arg_opts) { const int opts = iseq->arg_opts - 1 /* no opt */; if (iseq->arg_rest == -1 && argc > opts) { argument_error(iseq, orig_argc, m + opts + iseq->arg_post_len); } if (argc > opts) { argc -= opts; argv += opts; opt_pc = iseq->arg_opt_table[opts]; /* no opt */ } else { int i; for (i = argc; i<opts; i++) { orig_argv[i + m] = Qnil; } opt_pc = iseq->arg_opt_table[argc]; argc = 0; } } /* rest arguments */ if (iseq->arg_rest != -1) { orig_argv[iseq->arg_rest] = rb_ary_new4(argc, argv); argc = 0; } /* block arguments */ if (block && iseq->arg_block != -1) { VALUE blockval = Qnil; const rb_block_t *blockptr = *block; if (argc != 0) { argument_error(iseq, orig_argc, m + iseq->arg_post_len); } if (blockptr) { /* make Proc object */ if (blockptr->proc == 0) { rb_proc_t *proc; blockval = rb_vm_make_proc(th, blockptr, rb_cProc); GetProcPtr(blockval, proc); *block = &proc->block; } else { blockval = blockptr->proc; } } orig_argv[iseq->arg_block] = blockval; /* Proc or nil */ } th->mark_stack_len = 0; return (int)opt_pc; }
/* FIXME: DON'T WORK!!! */ static VALUE rb_grn_view_sort (int argc, VALUE *argv, VALUE self) { VALUE rb_result = Qnil; #ifdef WIN32 rb_raise(rb_eNotImpError, "grn_view_add() isn't available on Windows."); #else grn_ctx *context = NULL; grn_obj *view; grn_obj *result; grn_table_sort_key *keys; int i, n_keys; int n_records, offset = 0, limit = -1; VALUE rb_keys, options; VALUE rb_offset, rb_limit; VALUE *rb_sort_keys; grn_table_cursor *cursor; VALUE exception; grn_obj id; rb_grn_table_deconstruct(SELF(self), &view, &context, NULL, NULL, NULL, NULL, NULL, NULL); rb_scan_args(argc, argv, "11", &rb_keys, &options); if (!RVAL2CBOOL(rb_obj_is_kind_of(rb_keys, rb_cArray))) rb_raise(rb_eArgError, "keys should be an array of key: <%s>", rb_grn_inspect(rb_keys)); n_keys = RARRAY_LEN(rb_keys); rb_sort_keys = RARRAY_PTR(rb_keys); keys = ALLOCA_N(grn_table_sort_key, n_keys); for (i = 0; i < n_keys; i++) { VALUE rb_sort_options, rb_key, rb_resolved_key, rb_order; if (RVAL2CBOOL(rb_obj_is_kind_of(rb_sort_keys[i], rb_cHash))) { rb_sort_options = rb_sort_keys[i]; } else if (RVAL2CBOOL(rb_obj_is_kind_of(rb_sort_keys[i], rb_cArray))) { rb_sort_options = rb_hash_new(); rb_hash_aset(rb_sort_options, RB_GRN_INTERN("key"), rb_ary_entry(rb_sort_keys[i], 0)); rb_hash_aset(rb_sort_options, RB_GRN_INTERN("order"), rb_ary_entry(rb_sort_keys[i], 1)); } else { rb_sort_options = rb_hash_new(); rb_hash_aset(rb_sort_options, RB_GRN_INTERN("key"), rb_sort_keys[i]); } rb_grn_scan_options(rb_sort_options, "key", &rb_key, "order", &rb_order, NULL); if (RVAL2CBOOL(rb_obj_is_kind_of(rb_key, rb_cString))) { rb_resolved_key = rb_grn_table_get_column(self, rb_key); } else { rb_resolved_key = rb_key; } keys[i].key = RVAL2GRNOBJECT(rb_resolved_key, &context); if (!keys[i].key) { rb_raise(rb_eGrnNoSuchColumn, "no such column: <%s>: <%s>", rb_grn_inspect(rb_key), rb_grn_inspect(self)); } if (NIL_P(rb_order)) { keys[i].flags = 0; } else if (rb_grn_equal_option(rb_order, "desc") || rb_grn_equal_option(rb_order, "descending")) { keys[i].flags = GRN_TABLE_SORT_DESC; } else if (rb_grn_equal_option(rb_order, "asc") || rb_grn_equal_option(rb_order, "ascending")) { keys[i].flags = GRN_TABLE_SORT_ASC; } else { rb_raise(rb_eArgError, "order should be one of " "[nil, :desc, :descending, :asc, :ascending]: %s", rb_grn_inspect(rb_order)); } } rb_grn_scan_options(options, "offset", &rb_offset, "limit", &rb_limit, NULL); if (!NIL_P(rb_offset)) offset = NUM2INT(rb_offset); if (!NIL_P(rb_limit)) limit = NUM2INT(rb_limit); result = grn_table_create(context, NULL, 0, NULL, GRN_TABLE_VIEW, NULL, NULL); grn_view_add(context, result, grn_table_create(context, NULL, 0, NULL, GRN_TABLE_NO_KEY, NULL, grn_ctx_get(context, "People", strlen("People")))); grn_view_add(context, result, grn_table_create(context, NULL, 0, NULL, GRN_TABLE_NO_KEY, NULL, grn_ctx_get(context, "People", strlen("People")))); n_records = grn_table_sort(context, view, offset, limit, result, keys, n_keys); exception = rb_grn_context_to_exception(context, self); if (!NIL_P(exception)) { grn_obj_unlink(context, result); rb_exc_raise(exception); } rb_result = rb_ary_new(); cursor = grn_table_cursor_open(context, result, NULL, 0, NULL, 0, 0, -1, GRN_CURSOR_ASCENDING); GRN_TEXT_INIT(&id, 0); while (grn_table_cursor_next_o(context, cursor, &id) == GRN_SUCCESS) { rb_ary_push(rb_result, rb_grn_view_record_new(self, &id)); } GRN_OBJ_FIN(context, &id); grn_table_cursor_close(context, cursor); grn_obj_unlink(context, result); #endif return rb_result; }
// TODO: // 1. this function uses a lot of 'alloca' calls, which AFAIK is not recommeneded! Can this be avoided? // 2. all wcscat calls can be done faster with memcpy, but is it worth sacrificing the readability? static VALUE extract_absolute_path_from_notification(const LPWSTR base_dir, const PFILE_NOTIFY_INFORMATION info) { LPWSTR buffer, absolute_filepath; WCHAR file[_MAX_FNAME], ext[_MAX_EXT], filename[WDM_MAX_FILENAME]; DWORD filename_len, absolute_filepath_len; LPSTR multibyte_filepath; int multibyte_filepath_buffer_size; VALUE path; filename_len = info->FileNameLength/sizeof(WCHAR); // The file in the 'info' struct is NOT null-terminated, so add 1 extra char to the allocation buffer = ALLOCA_N(WCHAR, filename_len + 1); memcpy(buffer, info->FileName, info->FileNameLength); // Null-terminate the string buffer[filename_len] = L'\0'; WDM_WDEBUG("change in: '%s'", buffer); absolute_filepath_len = wcslen(base_dir) + filename_len; absolute_filepath = ALLOCA_N(WCHAR, absolute_filepath_len + 1); // 1 for NULL absolute_filepath[0] = L'\0'; wcscat(absolute_filepath, base_dir); wcscat(absolute_filepath, buffer); WDM_WDEBUG("absolute path is: '%s'", absolute_filepath); _wsplitpath(buffer, NULL, NULL, file, ext); // TODO: Extracting the file name from 'buffer' is only needed when watching sub-dirs filename[0] = L'\0'; if ( file[0] != L'\0' ) wcscat(filename, file); if ( ext[0] != L'\0' ) wcscat(filename, ext); WDM_WDEBUG("filename: '%s'", filename); filename_len = wcslen(filename); // The maximum length of an 8.3 filename is twelve, including the dot. if (filename_len <= 12 && wcschr(filename, L'~')) { LPWSTR unicode_absolute_filepath; WCHAR absolute_long_filepath[WDM_MAX_WCHAR_LONG_PATH]; BOOL is_unc_path; is_unc_path = wdm_utils_is_unc_path(absolute_filepath); unicode_absolute_filepath = ALLOCA_N(WCHAR, absolute_filepath_len + (is_unc_path ? 8 : 4) + 1); // 8 for "\\?\UNC\" or 4 for "\\?\", and 1 for \0 unicode_absolute_filepath[0] = L'\0'; wcscat(unicode_absolute_filepath, L"\\\\?\\"); if ( is_unc_path ) { wcscat(unicode_absolute_filepath, L"UNC\\"); wcscat(unicode_absolute_filepath, absolute_filepath + 2); // +2 to skip the begin of a UNC path } else { wcscat(unicode_absolute_filepath, absolute_filepath); } // Convert to the long filename form. Unfortunately, this // does not work for deletions, so it's an imperfect fix. if (GetLongPathNameW(unicode_absolute_filepath, absolute_long_filepath, WDM_MAX_WCHAR_LONG_PATH) != 0) { absolute_filepath = absolute_long_filepath + 4; // Skip first 4 pointers of "\\?\" absolute_filepath_len = wcslen(absolute_filepath); WDM_WDEBUG("Short path converted to long: '%s'", absolute_filepath); } else { WDM_DEBUG("Can't convert short path to long: '%s'", rb_w32_strerror(GetLastError())); } } // The convention in Ruby is to use forward-slashes to seprarate dirs on all platforms. wdm_utils_convert_back_to_forward_slashes(absolute_filepath, absolute_filepath_len + 1); // Convert the path from WCHAR to multibyte CHAR to use it in a ruby string multibyte_filepath_buffer_size = WideCharToMultiByte(CP_UTF8, 0, absolute_filepath, absolute_filepath_len + 1, NULL, 0, NULL, NULL); multibyte_filepath = ALLOCA_N(CHAR, multibyte_filepath_buffer_size); if ( 0 == WideCharToMultiByte(CP_UTF8, 0, absolute_filepath, absolute_filepath_len + 1, multibyte_filepath, multibyte_filepath_buffer_size, NULL, NULL) ) { rb_raise(eWDM_Error, "Failed to add the change file path to the event!"); } WDM_DEBUG("will report change in: '%s'", multibyte_filepath); path = rb_enc_str_new(multibyte_filepath, multibyte_filepath_buffer_size - 1, // -1 because this func takes the chars count, not bytes count wdm_rb_enc_utf8); OBJ_TAINT(path); return path; }
static VALUE combined_watch(BOOL recursively, int argc, VALUE *argv, VALUE self) { WDM_PMonitor monitor; WDM_PEntry entry; int directory_letters_count; VALUE directory, flags, os_encoded_directory; BOOL running; // TODO: Maybe raise a more user-friendly error? rb_need_block(); Data_Get_Struct(self, WDM_Monitor, monitor); EnterCriticalSection(&monitor->lock); running = monitor->running; LeaveCriticalSection(&monitor->lock); if ( running ) { rb_raise(eWDM_MonitorRunningError, "You can't watch new directories while the monitor is running!"); } rb_scan_args(argc, argv, "1*", &directory, &flags); Check_Type(directory, T_STRING); entry = wdm_entry_new(); entry->user_data->watch_childeren = recursively; entry->user_data->callback = rb_block_proc(); entry->user_data->flags = RARRAY_LEN(flags) == 0 ? WDM_MONITOR_FLAGS_DEFAULT : extract_flags_from_rb_array(flags); // WTF Ruby source: The original code (file.c) uses the following macro to make sure that the encoding // of the string is ASCII-compatible, but UTF-16LE (Windows default encoding) is not!!! // // FilePathValue(directory); os_encoded_directory = rb_str_encode_ospath(directory); // RSTRING_LEN can't be used because it would return the count of bytes the string uses in its encoding (like UTF-8). // UTF-8 might use more than one byte for the char, which is not needed for WCHAR strings. // Also, the result of MultiByteToWideChar _includes_ the NULL char at the end, which is not true for RSTRING. // // Example: 'C:\Users\Maher\Desktop\تجربة' with __ENCODING__ == UTF-8 // MultiByteToWideChar => 29 (28-char + null) // RSTRING_LEN => 33 (23-char + 10-bytes for 5 Arabic letters which take 2 bytes each) // directory_letters_count = MultiByteToWideChar(CP_UTF8, 0, RSTRING_PTR(os_encoded_directory), -1, NULL, 0); entry->user_data->dir = ALLOCA_N(WCHAR, directory_letters_count); MultiByteToWideChar(CP_UTF8, 0, RSTRING_PTR(os_encoded_directory), -1, entry->user_data->dir, directory_letters_count); WDM_WDEBUG("New path to watch: '%s'", entry->user_data->dir); entry->user_data->dir = wdm_utils_full_pathname(entry->user_data->dir); if ( entry->user_data->dir == 0 ) { wdm_entry_free(entry); rb_raise(eWDM_Error, "Can't get the absolute path for the passed directory: '%s'!", RSTRING_PTR(directory)); } if ( ! wdm_utils_unicode_is_directory(entry->user_data->dir) ) { wdm_entry_free(entry); rb_raise(eWDM_InvalidDirectoryError, "No such directory: '%s'!", RSTRING_PTR(directory)); } entry->dir_handle = CreateFileW( entry->user_data->dir, // pointer to the file name FILE_LIST_DIRECTORY, // access (read/write) mode FILE_SHARE_READ // share mode | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, // security descriptor OPEN_EXISTING, // how to create FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED, // file attributes NULL ); if ( entry->dir_handle == INVALID_HANDLE_VALUE ) { wdm_entry_free(entry); rb_raise(eWDM_Error, "Can't watch directory: '%s'!", RSTRING_PTR(directory)); } // Store a reference to the entry instead of an event as the event // won't be used when using callbacks. entry->event_container.hEvent = wdm_monitor_callback_param_new(monitor, entry); wdm_monitor_update_head(monitor, entry); WDM_WDEBUG("Watching directory: '%s'", entry->user_data->dir); return Qnil; }
void <%=c_iter%>(na_loop_t *const lp) { size_t i; char *p1, *p2; ssize_t s1, s2; size_t *idx1, *idx2; dtype x; VALUE *a; size_t *c; int nd, md; c = (size_t*)(lp->opt_ptr); nd = lp->ndim - 1; md = lp->ndim + 1; a = ALLOCA_N(VALUE,md); INIT_COUNTER(lp, i); INIT_PTR_IDX(lp, 0, p1, s1, idx1); INIT_PTR_IDX(lp, 1, p2, s2, idx2); c[nd] = 0; if (idx1) { if (idx2) { for (; i--;) { GET_DATA_INDEX(p1,idx1,dtype,x); x = yield_map_with_index(x,c,a,nd,md); SET_DATA_INDEX(p2,idx2,dtype,x); c[nd]++; } } else {