/* Called to obtain the x509 cert of an authenticated peer. */ static VALUE grpc_rb_call_get_peer_cert(VALUE self) { grpc_rb_call *call = NULL; VALUE res = Qnil; grpc_auth_context *ctx = NULL; if (RTYPEDDATA_DATA(self) == NULL) { rb_raise(grpc_rb_eCallError, "Cannot get peer cert on closed call"); return Qnil; } TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); ctx = grpc_call_auth_context(call->wrapped); if (!ctx || !grpc_auth_context_peer_is_authenticated(ctx)) { return Qnil; } { grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name( ctx, GRPC_X509_PEM_CERT_PROPERTY_NAME); const grpc_auth_property *prop = grpc_auth_property_iterator_next(&it); if (prop == NULL) { return Qnil; } res = rb_str_new2(prop->value); } grpc_auth_context_release(ctx); return res; }
/* * call-seq: * Kernel.each_backtrace_frame( & block ) * * Return array of hashes with object and method frame information for backtrace. * Specifying number_of_frames will cause only the last number_of_frames to be returned. * Kernel.backtrace returns all frames including the current context (__method__/__callee__). */ VALUE rb_RPRuby_Sender_Kernel_each_backtrace_frame( int argc, VALUE* args, VALUE rb_self ) { rb_thread_t* c_thread = (rb_thread_t *)RTYPEDDATA_DATA(rb_thread_current()); // Get the current frame - we're doing a backtrace, so our current working frame to start is the first previous thread rb_control_frame_t* c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( RUBY_VM_PREVIOUS_CONTROL_FRAME( c_thread->cfp ) ); // c_top_of_control_frame describes the top edge of the stack trace // set c_top_of_control_frame to the first frame in <main> rb_control_frame_t* c_top_of_control_frame = RUBY_VM_NEXT_CONTROL_FRAME( RUBY_VM_NEXT_CONTROL_FRAME( (void *)( c_thread->stack + c_thread->stack_size ) ) ); VALUE rb_stored_backtrace_array = Qnil; // if we were passed a stored backtrace array, use it if ( argc == 1 && TYPE( args[ 0 ] ) == T_ARRAY ) { rb_stored_backtrace_array = args[ 0 ]; } // for each control frame: while ( c_current_context_frame < c_top_of_control_frame ) { VALUE rb_frame_hash; // if we are using a stored backtrace we don't need to ask for a new hash if ( rb_stored_backtrace_array == Qnil ) { rb_frame_hash = rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame( & c_current_context_frame ); } else { rb_frame_hash = rb_ary_shift( rb_stored_backtrace_array ); } if ( rb_frame_hash == Qnil ) { break; } // if we try to iterate using an Enumerator we will lose our context if ( ! rb_block_given_p() ) { // we solve this by assuming that the desired context is the moment when each_backtrace_frame is called // this allows us to store the backtrace and iterate it as we want // the only downside is that we have to get the entire backtrace first in order to store it rb_stored_backtrace_array = rb_RPRuby_Sender_Kernel_backtrace( 0, NULL, rb_self ); RETURN_ENUMERATOR( rb_self, 1, & rb_stored_backtrace_array ); } // otherwise, yield the block rb_yield( rb_frame_hash ); // only move the frame if we are not using a stored backtrace if ( rb_stored_backtrace_array == Qnil ) { c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_current_context_frame ); } } return Qnil; }
rb_control_frame_t* RPRuby_internal_framePriorTo( rb_control_frame_t* c_control_frame ) { rb_thread_t* c_thread = (rb_thread_t *)RTYPEDDATA_DATA(rb_thread_current()); rb_control_frame_t* c_prior_control_frame = NULL; // get the current frame pointer if ( c_control_frame == NULL ) { c_control_frame = c_thread->cfp; } if ( ( c_prior_control_frame = rb_vm_get_ruby_level_next_cfp( c_thread, c_control_frame ) ) != 0) { // not sure why we have to call this a second time after it was called at the end of rb_vm_get_ruby_level_next_cfp, // but for some reason it seems to be necessary c_prior_control_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_prior_control_frame ); } else { c_prior_control_frame = NULL; } // if we have a nil object we've passed main, we're done if ( c_prior_control_frame->self == Qnil ) { return NULL; } return c_prior_control_frame; }
static inline cfunc get_method_with_func(cfunc func, VALUE obj, char *name) { VALUE vmethod; struct METHOD *method; vmethod = func(obj, set_buf_string(name)); method = (struct METHOD*)RTYPEDDATA_DATA(vmethod); return method->me.def->body.cfunc.func; }
/* Releases the c-level resources associated with a call Once a call has been closed, no further requests can be processed. */ static VALUE grpc_rb_call_close(VALUE self) { grpc_rb_call *call = NULL; TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); if (call != NULL) { destroy_call(call); RTYPEDDATA_DATA(self) = NULL; } return Qnil; }
/* * Private */ static VALUE ossl_digest_alloc(VALUE klass) { VALUE obj = TypedData_Wrap_Struct(klass, &ossl_digest_type, 0); EVP_MD_CTX *ctx = EVP_MD_CTX_create(); if (ctx == NULL) ossl_raise(rb_eRuntimeError, "EVP_MD_CTX_create() failed"); RTYPEDDATA_DATA(obj) = ctx; return obj; }
/* * call-seq: * Kernel.backtrace( number_of_frames = nil ) -> [ { :object => object, :method => method }, ... ] * * Return array of hashes with object and method frame information for backtrace. * Specifying number_of_frames will cause only the last number_of_frames to be returned. * Kernel.backtrace returns all frames including the current context (__method__/__callee__). */ VALUE rb_RPRuby_Sender_Kernel_backtrace( int argc, VALUE* args, VALUE rb_self ) { // Get max stack level from args if it is there int c_max_stack_level = 0; if ( argc ) { c_max_stack_level = FIX2INT( args[ 0 ] ); // if max_stack_level is 0 return empty array if ( c_max_stack_level == 0 ) { return rb_ary_new(); } // if max_stack_level < 0, throw error else if ( c_max_stack_level < 0 ) { rb_raise( rb_eArgError, RPRUBY_SENDER_ERROR_STACK_LEVEL_LESS_THAN_ZERO ); } } rb_thread_t* c_thread = (rb_thread_t *)RTYPEDDATA_DATA(rb_thread_current()); // Get the current frame - we're doing a backtrace, so our current working frame to start is the first previous thread rb_control_frame_t* c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_thread->cfp ); // c_top_of_control_frame describes the top edge of the stack trace // set c_top_of_control_frame to the first frame in <main> rb_control_frame_t* c_top_of_control_frame = RUBY_VM_NEXT_CONTROL_FRAME( RUBY_VM_NEXT_CONTROL_FRAME( (void *)( c_thread->stack + c_thread->stack_size ) ) ); VALUE rb_return_array = rb_ary_new(); int c_stack_level = 0; // for each control frame: while ( c_current_context_frame < c_top_of_control_frame && ( argc == 0 || c_stack_level < c_max_stack_level ) ) { VALUE rb_frame_hash = rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame( & c_current_context_frame ); if ( rb_frame_hash == Qnil ) { break; } // push hash to array rb_ary_push( rb_return_array, rb_frame_hash ); c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_current_context_frame ); c_stack_level++; } return rb_return_array; }
void rb_define_method(VALUE klass, const char *name, VALUE (*func)(ANYARGS), int argc) { VALUE vmethod, v[2] = {value_buf_string, dummy_proc}; struct METHOD *method; set_buf_string(name); rb_mod_define_method(2, v, klass); vmethod = rb_mod_instance_method(klass, value_buf_string); method = (struct METHOD*)RTYPEDDATA_DATA(vmethod); method->me.def->type = VM_METHOD_TYPE_CFUNC; method->me.def->body.cfunc.func = func; method->me.def->body.cfunc.argc = argc; rb_mod_public(1, &value_buf_string, klass); }
/* call-seq: ops = { GRPC::Core::CallOps::SEND_INITIAL_METADATA => <op_value>, GRPC::Core::CallOps::SEND_MESSAGE => <op_value>, ... } tag = Object.new timeout = 10 call.start_batch(tag, timeout, ops) Start a batch of operations defined in the array ops; when complete, post a completion of type 'tag' to the completion queue bound to the call. Also waits for the batch to complete, until timeout is reached. The order of ops specified in the batch has no significance. Only one operation of each type can be active at once in any given batch */ static VALUE grpc_rb_call_run_batch(VALUE self, VALUE ops_hash) { run_batch_stack *st = NULL; grpc_rb_call *call = NULL; grpc_event ev; grpc_call_error err; VALUE result = Qnil; VALUE rb_write_flag = rb_ivar_get(self, id_write_flag); unsigned write_flag = 0; void *tag = (void *)&st; if (RTYPEDDATA_DATA(self) == NULL) { rb_raise(grpc_rb_eCallError, "Cannot run batch on closed call"); return Qnil; } TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); /* Validate the ops args, adding them to a ruby array */ if (TYPE(ops_hash) != T_HASH) { rb_raise(rb_eTypeError, "call#run_batch: ops hash should be a hash"); return Qnil; } if (rb_write_flag != Qnil) { write_flag = NUM2UINT(rb_write_flag); } st = gpr_malloc(sizeof(run_batch_stack)); grpc_run_batch_stack_init(st, write_flag); grpc_run_batch_stack_fill_ops(st, ops_hash); /* call grpc_call_start_batch, then wait for it to complete using * pluck_event */ err = grpc_call_start_batch(call->wrapped, st->ops, st->op_num, tag, NULL); if (err != GRPC_CALL_OK) { grpc_run_batch_stack_cleanup(st); gpr_free(st); rb_raise(grpc_rb_eCallError, "grpc_call_start_batch failed with %s (code=%d)", grpc_call_error_detail_of(err), err); return Qnil; } ev = rb_completion_queue_pluck(call->queue, tag, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); if (!ev.success) { rb_raise(grpc_rb_eCallError, "call#run_batch failed somehow"); } /* Build and return the BatchResult struct result, if there is an error, it's reflected in the status */ result = grpc_run_batch_stack_build_result(st); grpc_run_batch_stack_cleanup(st); gpr_free(st); return result; }
static VALUE ossl_hmac_alloc(VALUE klass) { VALUE obj; HMAC_CTX *ctx; obj = NewHMAC(klass); ctx = HMAC_CTX_new(); if (!ctx) ossl_raise(eHMACError, NULL); RTYPEDDATA_DATA(obj) = ctx; return obj; }
/* Called to obtain the peer that this call is connected to. */ static VALUE grpc_rb_call_get_peer(VALUE self) { VALUE res = Qnil; grpc_rb_call *call = NULL; char *peer = NULL; if (RTYPEDDATA_DATA(self) == NULL) { rb_raise(grpc_rb_eCallError, "Cannot get peer value on closed call"); return Qnil; } TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); peer = grpc_call_get_peer(call->wrapped); res = rb_str_new2(peer); gpr_free(peer); return res; }
static VALUE rb_st_thread_stacktrace(int argc, VALUE* argv, VALUE thval) { rb_thread_t *th = (rb_thread_t *)RTYPEDDATA_DATA(thval); switch (th->status) { case THREAD_RUNNABLE: case THREAD_STOPPED: case THREAD_STOPPED_FOREVER: break; case THREAD_TO_KILL: case THREAD_KILLED: return Qnil; } return stacktrace(argc, argv, th); }
/* Called by clients to cancel an RPC on the server. Can be called multiple times, from any thread. */ static VALUE grpc_rb_call_cancel(VALUE self) { grpc_rb_call *call = NULL; grpc_call_error err; if (RTYPEDDATA_DATA(self) == NULL) { // This call has been closed return Qnil; } TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); err = grpc_call_cancel(call->wrapped, NULL); if (err != GRPC_CALL_OK) { rb_raise(grpc_rb_eCallError, "cancel failed: %s (code=%d)", grpc_call_error_detail_of(err), err); } return Qnil; }
/* call-seq: call.set_credentials call_credentials Sets credentials on a call */ static VALUE grpc_rb_call_set_credentials(VALUE self, VALUE credentials) { grpc_rb_call *call = NULL; grpc_call_credentials *creds; grpc_call_error err; if (RTYPEDDATA_DATA(self) == NULL) { rb_raise(grpc_rb_eCallError, "Cannot set credentials of closed call"); return Qnil; } TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call); creds = grpc_rb_get_wrapped_call_credentials(credentials); err = grpc_call_set_credentials(call->wrapped, creds); if (err != GRPC_CALL_OK) { rb_raise(grpc_rb_eCallError, "grpc_call_set_credentials failed with %s (code=%d)", grpc_call_error_detail_of(err), err); } /* We need the credentials to be alive for as long as the call is alive, but we don't care about destruction order. */ rb_ivar_set(self, id_credentials, credentials); return Qnil; }
rb_thread_t *GET_THREAD2(void) { ruby_current_thread = ((rb_thread_t *)RTYPEDDATA_DATA(rb_thread_current())); return GET_THREAD(); }
/* * call-seq: * Kernel.backtrace_includes?( method_or_object, ... ) -> true or false * Kernel.backtrace_includes?( number_of_frames, method_or_object, ... ) -> true or false * * Returns whether specified methods or objects or classes are in the current backtrace context. * Kernel.backtrace_includes? begins with the prior frame, so asking if the backtrace includes the current method * will only report true if the current method is part of the earlier call chain. */ VALUE rb_RPRuby_Sender_Kernel_backtrace_includes( int argc, VALUE* args, VALUE rb_self ) { // this function is also used for // * backtrace_includes_one_of? // * backtrace_includes_frame? // * backtrace_includes_one_of_frames? // create tracking array VALUE rb_tracking_array = rb_ary_new(); // populate tracking array with methods/objects // optional - if first arg is Qtrue, we are looking for one of the args instead of all of the args int c_which_arg = 0; BOOL c_requires_all_items = TRUE; if ( args[ 0 ] == Qnil || ( argc > 1 && args[ 1 ] == Qnil ) ) { c_which_arg++; c_requires_all_items = FALSE; } BOOL c_return_frame = FALSE; if ( args[ 0 ] == Qfalse || ( argc > 1 && args[ 1 ] == Qfalse ) ) { c_which_arg++; c_return_frame = TRUE; } BOOL c_return_all_frames = FALSE; if ( args[ 0 ] == Qtrue || ( argc > 1 && args[ 1 ] == Qtrue ) ) { c_which_arg++; c_return_all_frames = TRUE; } int c_args_offset = c_which_arg; for ( ; c_which_arg < argc ; c_which_arg++ ) { rb_ary_push( rb_tracking_array, args[ c_which_arg ] ); } rb_thread_t* c_thread = (rb_thread_t *)RTYPEDDATA_DATA(rb_thread_current()); // Get the current frame - we're doing a backtrace, so our current working frame to start is the first previous thread rb_control_frame_t* c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( RUBY_VM_PREVIOUS_CONTROL_FRAME( c_thread->cfp ) ); // c_top_of_control_frame describes the top edge of the stack trace // set c_top_of_control_frame to the first frame in <main> rb_control_frame_t* c_top_of_control_frame = RUBY_VM_NEXT_CONTROL_FRAME( RUBY_VM_NEXT_CONTROL_FRAME( (void *)( c_thread->stack + c_thread->stack_size ) ) ); VALUE rb_test_index_array = rb_ary_new(); // :object // instance or class rb_ary_push( rb_test_index_array, ID2SYM( rb_intern( "object" ) ) ); // :method rb_ary_push( rb_test_index_array, ID2SYM( rb_intern( "method" ) ) ); // :file rb_ary_push( rb_test_index_array, ID2SYM( rb_intern( "file" ) ) ); // :line rb_ary_push( rb_test_index_array, ID2SYM( rb_intern( "line" ) ) ); // only used if c_return_all_frames == TRUE VALUE rb_frame_hashes_array = Qnil; if ( c_return_all_frames == TRUE ) { rb_frame_hashes_array = rb_ary_new(); } VALUE rb_frame_hash = Qnil; // for each control frame: while ( c_current_context_frame < c_top_of_control_frame ) { // iterate each array member int c_which_member; for ( c_which_member = 0 ; c_which_member < RARRAY_LEN( rb_tracking_array ) ; c_which_member++ ) { VALUE rb_this_arg = args[ c_which_member + c_args_offset ]; BOOL matched = FALSE; rb_frame_hash = rb_RPRuby_Sender_Kernel_internal_backtraceHashForControlFrame( & c_current_context_frame ); // if we have a hash we are testing multiple items in a frame if ( TYPE( rb_this_arg ) == T_HASH ) { VALUE rb_frame_test_array = rb_obj_clone( rb_test_index_array ); // for each element that we could test for int c_which_index; int c_skipped_index_count = 0; for ( c_which_index = 0 ; c_which_index < RARRAY_LEN( rb_frame_test_array ) ; c_which_index++ ) { VALUE rb_this_index = RARRAY_PTR( rb_frame_test_array )[ c_which_index ]; // see if our requested test hash includes the potential test element if ( rb_hash_lookup( rb_this_arg, rb_this_index ) != Qnil ) { VALUE rb_required_element = rb_hash_aref( rb_this_arg, rb_this_index ); VALUE rb_frame_element = rb_hash_aref( rb_frame_hash, rb_this_index ); // if it does, we need to see if the current frame's element matches this element VALUE rb_required_element_klass; if ( rb_required_element == rb_frame_element // if we have a string, which is a filename || ( TYPE( rb_required_element ) == T_STRING && rb_funcall( rb_frame_element, rb_intern( "==" ), 1, rb_required_element ) == Qtrue ) // if we have a class, which is a special case for :object || ( rb_this_index == ID2SYM( rb_intern( "class" ) ) && ( rb_required_element_klass = ( ( TYPE( rb_required_element ) == T_CLASS ) ? rb_required_element : rb_funcall( rb_required_element, rb_intern( "class" ), 0 ) ) ) && rb_required_element_klass == rb_required_element ) ) { rb_ary_delete_at( rb_frame_test_array, c_which_index ); c_which_index--; } } else { c_skipped_index_count++; } if ( RARRAY_LEN( rb_frame_test_array ) == c_skipped_index_count ) { if ( c_return_frame == TRUE ) { return rb_frame_hash; } else if ( c_return_all_frames == TRUE ) { rb_ary_push( rb_frame_hashes_array, rb_frame_hash ); } else { return Qtrue; } } } } else { // :object => <class:instance> if ( TYPE( rb_this_arg ) == T_OBJECT ) { if ( rb_hash_aref( rb_frame_hash, ID2SYM( rb_intern( "object" ) ) ) == rb_this_arg ) { matched = TRUE; } } // :object => <class> else if ( TYPE( rb_this_arg ) == T_CLASS ) { VALUE rb_frame_object = rb_hash_aref( rb_frame_hash, ID2SYM( rb_intern( "object" ) ) ); VALUE rb_frame_object_klass = TYPE( rb_frame_object ) == T_CLASS ? rb_frame_object : rb_funcall( rb_frame_object, rb_intern( "class" ), 0 ); if ( rb_frame_object_klass == rb_this_arg ) { matched = TRUE; } } // :method => :method else if ( TYPE( rb_this_arg ) == T_SYMBOL ) { if ( rb_hash_aref( rb_frame_hash, ID2SYM( rb_intern( "method" ) ) ) == rb_this_arg ) { matched = TRUE; } } // :file => "filename" else if ( TYPE( rb_this_arg ) == T_STRING ) { VALUE rb_filename = rb_hash_aref( rb_frame_hash, ID2SYM( rb_intern( "file" ) ) ); VALUE rb_comparison = rb_funcall( rb_filename, rb_intern( "==" ), 1, rb_this_arg ); if ( rb_comparison == Qtrue ) { matched = TRUE; } } // :line => number else if ( TYPE( rb_this_arg ) == T_FIXNUM ) { if ( rb_hash_aref( rb_frame_hash, ID2SYM( rb_intern( "line" ) ) ) == rb_this_arg ) { matched = TRUE; } } // if array member exists in frame, remove from array if ( matched ) { if ( c_requires_all_items == FALSE ) { if ( c_return_frame == TRUE ) { return rb_frame_hash; } else { return Qtrue; } } else { // delete this index rb_ary_delete_at( rb_tracking_array, c_which_member ); // decrement the loop iterator so that the increase is offset // this is necessary since we just removed an index and are iterating vs. the length of the array c_which_member--; } } } } // if array is empty, return true // we check here as well as at the end so we can stop iterating the backtrace if we find all our items if ( RARRAY_LEN( rb_tracking_array ) == 0 ) { if ( c_return_frame == TRUE ) { return rb_frame_hash; } else if ( c_return_all_frames == TRUE ) { rb_ary_push( rb_frame_hashes_array, rb_frame_hash ); return rb_frame_hashes_array; } else { return Qtrue; } } c_current_context_frame = RUBY_VM_PREVIOUS_CONTROL_FRAME( c_current_context_frame ); } if ( c_return_all_frames == TRUE && RARRAY_LEN( rb_frame_hashes_array ) > 0 ) { return rb_frame_hashes_array; } // if we finish iterating frames and still have items in the array, return false else if ( RARRAY_LEN( rb_tracking_array ) > 0 ) { if ( c_return_frame == TRUE ) { return Qnil; } else { return Qfalse; } } // otherwise, return true else if ( c_return_frame == TRUE ) { return rb_frame_hash; } else { return Qtrue; } // we don't get here return Qnil; }
void *rb_check_typeddata(VALUE value, const rb_data_type_t *data_type) { // TODO CS 24-Sep-2016 we're supposed to do some error checking here return RTYPEDDATA_DATA(value); }
void Init_ClassCore(VALUE vmethod) { struct METHOD *method = (struct METHOD*)RTYPEDDATA_DATA(vmethod); rb_obj_method = method->me.def->body.cfunc.func; rb_mod_instance_method = get_method(rb_cObject, "instance_method"); }