Пример #1
0
static void
init_copy(mrb_state *mrb, mrb_value dest, mrb_value obj)
{
    //if (OBJ_FROZEN(dest)) {
    //    rb_raise(rb_eTypeError, "[bug] frozen object (%s) allocated", rb_obj_classname(dest));
    //}
    //RBASIC(dest)->flags &= ~(T_MASK|FL_EXIVAR);
    //RBASIC(dest)->flags |= RBASIC(obj)->flags & (T_MASK|FL_EXIVAR|FL_TAINT);
    //if (FL_TEST(obj, FL_EXIVAR)) {
    //    mrb_copy_generic_ivar(dest, obj);
    //}
    //mrb_gc_copy_finalizer(dest, obj);
    switch (mrb_type(obj)) {
      case MRB_TT_OBJECT:
      case MRB_TT_CLASS:
      case MRB_TT_MODULE:
        if (ROBJECT(dest)->iv) {
            ROBJECT(dest)->iv = 0;
        }
        if (ROBJECT(obj)->iv) {
            ROBJECT(dest)->iv = ROBJECT(obj)->iv;
        }
        break;

      default:
        break;
    }
    mrb_funcall(mrb, dest, "initialize_copy", 1, obj);
}
Пример #2
0
/*
  call-seq:
     call.invoke(completion_queue, tag, flags=nil)

   Invoke the RPC. Starts sending metadata and request headers on the wire.
   flags is a bit-field combination of the write flags defined above.
   REQUIRES: Can be called at most once per call.
             Can only be called on the client.
   Produces a GRPC_INVOKE_ACCEPTED event on completion. */
static VALUE grpc_rb_call_invoke(int argc, VALUE *argv, VALUE self) {
  VALUE cqueue = Qnil;
  VALUE metadata_read_tag = Qnil;
  VALUE finished_tag = Qnil;
  VALUE flags = Qnil;
  grpc_call *call = NULL;
  grpc_completion_queue *cq = NULL;
  grpc_call_error err;

  /* "31" == 3 mandatory args, 1 (flags) is optional */
  rb_scan_args(argc, argv, "31", &cqueue, &metadata_read_tag, &finished_tag,
               &flags);
  if (NIL_P(flags)) {
    flags = UINT2NUM(0); /* Default to no flags */
  }
  cq = grpc_rb_get_wrapped_completion_queue(cqueue);
  Data_Get_Struct(self, grpc_call, call);
  err = grpc_call_invoke_old(call, cq, ROBJECT(metadata_read_tag),
                             ROBJECT(finished_tag), NUM2UINT(flags));
  if (err != GRPC_CALL_OK) {
    rb_raise(rb_eCallError, "invoke failed: %s (code=%d)",
             grpc_call_error_detail_of(err), err);
  }

  /* Add the completion queue as an instance attribute, prevents it from being
   * GCed until this call object is GCed */
  rb_ivar_set(self, id_cq, cqueue);

  return Qnil;
}
Пример #3
0
/* call-seq:
 *   swap(other) -> self
 * 
 * Swap the contents of the receiver with +other+:
 *
 *   a = []
 *   b = {}
 *   a.swap(b) # => {}
 *   a # => {}
 *   b # => []
 *
 * You cannot swap a Class or Module except with another
 * Class or Module, and you can only swap a Class with a Class and
 * a Module with a Module (no swapping a Class with Module), and you
 * cannot swap immediate values.  If an invalid swap attempt is
 * detected, a  +TypeError+ is raised.*/
static VALUE evilr_swap(VALUE self, VALUE other) {
  char tmp[OBJECT_SIZE];
  evilr__check_immediates(self, other);
  if ((BUILTIN_TYPE(self) == T_MODULE || BUILTIN_TYPE(self) == T_CLASS ||
       BUILTIN_TYPE(other) == T_MODULE || BUILTIN_TYPE(other) == T_CLASS) &&
       BUILTIN_TYPE(self) != BUILTIN_TYPE(other)) {
    rb_raise(rb_eTypeError, "incompatible types used");
  }
  memcpy(tmp, ROBJECT(self), OBJECT_SIZE);
  memcpy(ROBJECT(self), ROBJECT(other), OBJECT_SIZE);
  memcpy(ROBJECT(other), tmp, OBJECT_SIZE);
  return self;
}
Пример #4
0
static VALUE callwith_cleanup(VALUE self)
{
  /* We don't want to keep the ivar table pointer around indefinitely,
   * because if we do, the GC will free the ivar table, which is
   * undesirable, since the original object still references it.  So we
   * set the ivar table back to something that won't get freed, instead.
   */

  NEWOBJ(dummy, struct RObject);
  OBJSETUP(dummy, rb_cCallWith, T_OBJECT);

  struct RBasic basic = *(RBASIC(self));
  *(ROBJECT(self)) = *(ROBJECT(dummy));
  *(RBASIC(self)) = basic;
}
Пример #5
0
/* Watch for a change in connectivity state.

   Once the channel connectivity state is different from the last observed
   state, tag will be enqueued on cq with success=1

   If deadline expires BEFORE the state is changed, tag will be enqueued on
   the completion queue with success=0 */
static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
                                                      VALUE last_state,
                                                      VALUE cqueue,
                                                      VALUE deadline,
                                                      VALUE tag) {
  grpc_rb_channel *wrapper = NULL;
  grpc_channel *ch = NULL;
  grpc_completion_queue *cq = NULL;

  cq = grpc_rb_get_wrapped_completion_queue(cqueue);
  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
  ch = wrapper->wrapped;
  if (ch == NULL) {
    rb_raise(rb_eRuntimeError, "closed!");
    return Qnil;
  }
  grpc_channel_watch_connectivity_state(
      ch,
      (grpc_connectivity_state)NUM2LONG(last_state),
      grpc_rb_time_timeval(deadline, /* absolute time */ 0),
      cq,
      ROBJECT(tag));

  return Qnil;
}
Пример #6
0
/* call-seq:
   cq = CompletionQueue.new
   tag = Object.new
   timeout = 10
   server.request_call(cqueue, tag, timeout)

   Requests notification of a new call on a server. */
static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue,
                                         VALUE tag_new, VALUE timeout) {
  grpc_rb_server *s = NULL;
  grpc_call *call = NULL;
  grpc_event *ev = NULL;
  grpc_call_error err;
  request_call_stack st;
  VALUE result;
  Data_Get_Struct(self, grpc_rb_server, s);
  if (s->wrapped == NULL) {
    rb_raise(rb_eRuntimeError, "closed!");
    return Qnil;
  } else {
    grpc_request_call_stack_init(&st);
    /* call grpc_server_request_call, then wait for it to complete using
     * pluck_event */
    err = grpc_server_request_call(
        s->wrapped, &call, &st.details, &st.md_ary,
        grpc_rb_get_wrapped_completion_queue(cqueue),
        ROBJECT(tag_new));
    if (err != GRPC_CALL_OK) {
      grpc_request_call_stack_cleanup(&st);
      rb_raise(grpc_rb_eCallError,
              "grpc_server_request_call failed: %s (code=%d)",
               grpc_call_error_detail_of(err), err);
      return Qnil;
    }
    ev = grpc_rb_completion_queue_pluck_event(cqueue, tag_new, timeout);
    if (ev == NULL) {
      grpc_request_call_stack_cleanup(&st);
      return Qnil;
    }
    if (ev->data.op_complete != GRPC_OP_OK) {
      grpc_request_call_stack_cleanup(&st);
      grpc_event_finish(ev);
      rb_raise(grpc_rb_eCallError, "request_call completion failed: (code=%d)",
               ev->data.op_complete);
      return Qnil;
    }

    /* build the NewServerRpc struct result */
    result = rb_struct_new(
        grpc_rb_sNewServerRpc,
        rb_str_new2(st.details.method),
        rb_str_new2(st.details.host),
        rb_funcall(rb_cTime, id_at, 2, INT2NUM(st.details.deadline.tv_sec),
                   INT2NUM(st.details.deadline.tv_nsec)),
        grpc_rb_md_ary_to_h(&st.md_ary),
        grpc_rb_wrap_call(call),
        NULL);
    grpc_event_finish(ev);
    grpc_request_call_stack_cleanup(&st);
    return result;
  }
  return Qnil;
}
Пример #7
0
/* call-seq:
   cq = CompletionQueue.new
   tag = Object.new
   timeout = 10
   server.request_call(cqueue, tag, timeout)

   Requests notification of a new call on a server. */
static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue,
        VALUE tag_new, VALUE timeout) {
    grpc_rb_server *s = NULL;
    grpc_call *call = NULL;
    grpc_event ev;
    grpc_call_error err;
    request_call_stack st;
    VALUE result;
    gpr_timespec deadline;
    TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
    if (s->wrapped == NULL) {
        rb_raise(rb_eRuntimeError, "destroyed!");
        return Qnil;
    } else {
        grpc_request_call_stack_init(&st);
        /* call grpc_server_request_call, then wait for it to complete using
         * pluck_event */
        err = grpc_server_request_call(
                  s->wrapped, &call, &st.details, &st.md_ary,
                  grpc_rb_get_wrapped_completion_queue(cqueue),
                  grpc_rb_get_wrapped_completion_queue(cqueue),
                  ROBJECT(tag_new));
        if (err != GRPC_CALL_OK) {
            grpc_request_call_stack_cleanup(&st);
            rb_raise(grpc_rb_eCallError,
                     "grpc_server_request_call failed: %s (code=%d)",
                     grpc_call_error_detail_of(err), err);
            return Qnil;
        }

        ev = grpc_rb_completion_queue_pluck_event(cqueue, tag_new, timeout);
        if (ev.type == GRPC_QUEUE_TIMEOUT) {
            grpc_request_call_stack_cleanup(&st);
            return Qnil;
        }
        if (!ev.success) {
            grpc_request_call_stack_cleanup(&st);
            rb_raise(grpc_rb_eCallError, "request_call completion failed");
            return Qnil;
        }

        /* build the NewServerRpc struct result */
        deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME);
        result = rb_struct_new(
                     grpc_rb_sNewServerRpc, rb_str_new2(st.details.method),
                     rb_str_new2(st.details.host),
                     rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
                                INT2NUM(deadline.tv_nsec)),
                     grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call), NULL);
        grpc_request_call_stack_cleanup(&st);
        return result;
    }
    return Qnil;
}
Пример #8
0
/* No more messages to send.
   REQUIRES: No other writes are pending on the call. */
static VALUE grpc_rb_call_writes_done(VALUE self, VALUE tag) {
  grpc_call *call = NULL;
  grpc_call_error err;
  Data_Get_Struct(self, grpc_call, call);
  err = grpc_call_writes_done_old(call, ROBJECT(tag));
  if (err != GRPC_CALL_OK) {
    rb_raise(rb_eCallError, "writes done: %s (code=%d)",
             grpc_call_error_detail_of(err), err);
  }

  return Qnil;
}
Пример #9
0
PRIMITIVE VALUE
vm_ivar_get(VALUE obj, ID name, void *cache_p)
{
    struct icache *cache = (struct icache *)cache_p;
    VALUE klass = 0;

    if (!SPECIAL_CONST_P(obj)) {
	klass = *(VALUE *)obj;
	if (klass == cache->klass) {
	    if ((unsigned int)cache->slot < ROBJECT(obj)->num_slots) {
		rb_object_ivar_slot_t *slot;
use_slot:
		slot = &ROBJECT(obj)->slots[cache->slot];
		if (slot->name == name) {
		    VALUE val = slot->value;
		    if (val == Qundef) {
			val = Qnil;
		    }
		    return val;
		}
	    }
	    goto find_slot;
	}
    }

    if (cache->slot == SLOT_CACHE_VIRGIN) {
	int slot;
find_slot:
	slot = rb_vm_get_ivar_slot(obj, name, true);
	if (slot >= 0) {
	    cache->klass = *(VALUE *)obj;
	    cache->slot = slot;
	    goto use_slot;
	}
	cache->klass = 0;
	cache->slot = SLOT_CACHE_CANNOT;
    }

    return rb_ivar_get(obj, name);
}
Пример #10
0
PRIMITIVE void
vm_ivar_set(VALUE obj, ID name, VALUE val, void *cache_p)
{
    struct icache *cache = (struct icache *)cache_p; 
    VALUE klass = 0;

    if (!SPECIAL_CONST_P(obj)) {
	klass = *(VALUE *)obj;
	if (klass == cache->klass) {
	    if ((unsigned int)cache->slot < ROBJECT(obj)->num_slots) {
		rb_object_ivar_slot_t *slot;
use_slot:
		slot = &ROBJECT(obj)->slots[cache->slot];
		if (slot->name == name) {
		    if ((ROBJECT(obj)->basic.flags & FL_FREEZE) == FL_FREEZE) {
			rb_error_frozen("object");
		    }
		    GC_WB(&slot->value, val);
		    return;
		}
	    }
	    goto find_slot;
	}
    }

    if (cache->slot == SLOT_CACHE_VIRGIN) {
	int slot;
find_slot:
	slot = rb_vm_get_ivar_slot(obj, name, true);
	if (slot >= 0) {
	    cache->klass = *(VALUE *)obj;
	    cache->slot = slot;
	    goto use_slot;
	}
	cache->slot = SLOT_CACHE_CANNOT;
    }

    rb_ivar_set(obj, name, val);
}
Пример #11
0
/* call-seq:
 *   swap_instance_variables(other) -> self
 * 
 * Swaps only the instance variables of the receiver and +other+.
 * You can only swap the instance variables between two objects that
 * use the internal type number T_OBJECT, or between Classes and Modules.
 * You cannot swap instance variables of immediate values, since they
 * do not have instance variables. Invalid swap attempts will raise
 * +TypeError+. */
static VALUE evilr_swap_instance_variables(VALUE self, VALUE other) {
#ifndef RUBY19
  struct st_table *tmp;
#endif
  evilr__check_immediates(self, other);

  switch(BUILTIN_TYPE(self)) {
    case T_OBJECT:
      if (BUILTIN_TYPE(other) != T_OBJECT) {
        goto bad_types;
      }
      break;
    case T_MODULE:
    case T_CLASS:
      if (BUILTIN_TYPE(other) != T_MODULE && BUILTIN_TYPE(other) != T_CLASS) {
        goto bad_types;
      }
      break;
    default:
bad_types:
      rb_raise(rb_eTypeError, "incompatible types used");
  }

#ifdef RUBY19
  if (BUILTIN_TYPE(self) == T_MODULE || BUILTIN_TYPE(self) == T_CLASS) {
    struct st_table *tmp;
    tmp = RCLASS_IV_TBL(self);
    RCLASS(self)->ptr->iv_tbl = RCLASS_IV_TBL(other);
    RCLASS(other)->ptr->iv_tbl = tmp;
  } else {
    char tmp[OBJECT_SIZE];
    memcpy(tmp, &(ROBJECT(self)->as), sizeof(ROBJECT(tmp)->as));
    memcpy(&(ROBJECT(self)->as), &(ROBJECT(other)->as), sizeof(ROBJECT(self)->as));
    memcpy(&(ROBJECT(other)->as), tmp, sizeof(ROBJECT(other)->as));
  }
#else
  /* RClass and RObject have iv_tbl at same position in the structure
   * so no funny business is needed */
  tmp = ROBJECT_IVPTR(self);
  ROBJECT(self)->iv_tbl = ROBJECT_IVPTR(other);
  ROBJECT(other)->iv_tbl = tmp;
#endif
  return self;
}
Пример #12
0
static VALUE callwith_s_create(VALUE klass, VALUE obj, VALUE self_obj)
{
  /* Create a new CallWith object, bypassing the usual object creation,
   * because the CallWith class is not a normal class. */
  NEWOBJ(with, struct RObject);
  OBJSETUP(with, klass, T_OBJECT);
  VALUE self = (VALUE)with;

  /* Place our delegate objects into the singleton class so we can
   * access them later */
  VALUE s = rb_singleton_class(self);
  rb_iv_set(s, "__with_obj__", obj);
  rb_iv_set(s, "__with_self_obj__", self_obj);

  /* Copy the pointer to the instance variable table from self_obj.  As
   * long as we hold a reference to self_obj, this pointer should be
   * valid. */
  struct RBasic basic = *(RBASIC(self));
  *(ROBJECT(self)) = *(ROBJECT(self_obj));
  *(RBASIC(self)) = basic;

  return self;
}
Пример #13
0
/* Queue a status for writing.

   call-seq:
      tag = Object.new
      call.write_status(200, "OK", tag)

   REQUIRES: No other writes are pending on the call. It is only safe to
   start the next write after the corresponding write_accepted event
   is received.
   GRPC_INVOKE_ACCEPTED must have been received by the application
   prior to calling this.
   Only callable on the server.
   Produces a GRPC_FINISHED event when the status is sent and the stream is
   fully closed */
static VALUE grpc_rb_call_start_write_status(VALUE self, VALUE code,
                                             VALUE status, VALUE tag) {
  grpc_call *call = NULL;
  grpc_call_error err;
  Data_Get_Struct(self, grpc_call, call);
  err = grpc_call_start_write_status_old(call, NUM2UINT(code),
                                         StringValueCStr(status), ROBJECT(tag));
  if (err != GRPC_CALL_OK) {
    rb_raise(rb_eCallError, "start write status: %s (code=%d)",
             grpc_call_error_detail_of(err), err);
  }

  return Qnil;
}
Пример #14
0
static VALUE class_variable_hash(VALUE module)
{
  VALUE class_variables = rb_hash_new();
#if RUBY_VERSION_CODE < 190
  struct st_table * iv_tbl = ROBJECT(module)->iv_tbl;
  if (iv_tbl)
  {
    st_foreach(iv_tbl, add_var_to_hash, class_variables);
  }
#else
  rb_ivar_foreach(module, add_var_to_hash, class_variables);
#endif
  return class_variables;
}
Пример #15
0
/* Blocks until the next event for given tag is available, and returns the
 * event. */
static VALUE grpc_rb_completion_queue_pluck(VALUE self, VALUE tag,
                                            VALUE timeout) {
  next_call_stack next_call;
  MEMZERO(&next_call, next_call_stack, 1);
  Data_Get_Struct(self, grpc_completion_queue, next_call.cq);
  next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
  next_call.tag = ROBJECT(tag);
  next_call.event = NULL;
  rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil,
                             (void *)&next_call, NULL, NULL);
  if (next_call.event == NULL) {
    return Qnil;
  }
  return grpc_rb_new_event(next_call.event);
}
Пример #16
0
/* call-seq:
     call.server_accept(completion_queue, finished_tag)

   Accept an incoming RPC, binding a completion queue to it.
   To be called before sending or receiving messages.

   REQUIRES: Can be called at most once per call.
             Can only be called on the server.
   Produces a GRPC_FINISHED event with finished_tag when the call has been
       completed (there may be other events for the call pending at this
       time) */
static VALUE grpc_rb_call_server_accept(VALUE self, VALUE cqueue,
                                        VALUE finished_tag) {
  grpc_call *call = NULL;
  grpc_completion_queue *cq = grpc_rb_get_wrapped_completion_queue(cqueue);
  grpc_call_error err;
  Data_Get_Struct(self, grpc_call, call);
  err = grpc_call_server_accept_old(call, cq, ROBJECT(finished_tag));
  if (err != GRPC_CALL_OK) {
    rb_raise(rb_eCallError, "server_accept failed: %s (code=%d)",
             grpc_call_error_detail_of(err), err);
  }

  /* Add the completion queue as an instance attribute, prevents it from being
   * GCed until this call object is GCed */
  rb_ivar_set(self, id_cq, cqueue);
  return Qnil;
}
Пример #17
0
/* call-seq:
   cq = CompletionQueue.new
   ops = {
     GRPC::Core::CallOps::SEND_INITIAL_METADATA => <op_value>,
     GRPC::Core::CallOps::SEND_MESSAGE => <op_value>,
     ...
   }
   tag = Object.new
   timeout = 10
   call.start_batch(cqueue, tag, timeout, ops)

   Start a batch of operations defined in the array ops; when complete, post a
   completion of type 'tag' to the completion queue bound to the call.

   Also waits for the batch to complete, until timeout is reached.
   The order of ops specified in the batch has no significance.
   Only one operation of each type can be active at once in any given
   batch */
static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag,
                                    VALUE timeout, VALUE ops_hash) {
  run_batch_stack st;
  grpc_call *call = NULL;
  grpc_event *ev = NULL;
  grpc_call_error err;
  VALUE result = Qnil;
  Data_Get_Struct(self, grpc_call, call);

  /* Validate the ops args, adding them to a ruby array */
  if (TYPE(ops_hash) != T_HASH) {
    rb_raise(rb_eTypeError, "call#run_batch: ops hash should be a hash");
    return Qnil;
  }
  grpc_run_batch_stack_init(&st);
  grpc_run_batch_stack_fill_ops(&st, ops_hash);

  /* call grpc_call_start_batch, then wait for it to complete using
   * pluck_event */
  err = grpc_call_start_batch(call, st.ops, st.op_num, ROBJECT(tag));
  if (err != GRPC_CALL_OK) {
    grpc_run_batch_stack_cleanup(&st);
    rb_raise(grpc_rb_eCallError,
             "grpc_call_start_batch failed with %s (code=%d)",
             grpc_call_error_detail_of(err), err);
    return;
  }
  ev = grpc_rb_completion_queue_pluck_event(cqueue, tag, timeout);
  if (ev == NULL) {
    grpc_run_batch_stack_cleanup(&st);
    rb_raise(grpc_rb_eOutOfTime, "grpc_call_start_batch timed out");
    return;
  }
  if (ev->data.op_complete != GRPC_OP_OK) {
    grpc_run_batch_stack_cleanup(&st);
    rb_raise(grpc_rb_eCallError, "start_batch completion failed, (code=%d)",
             ev->data.op_complete);
    return;
  }

  /* Build and return the BatchResult struct result */
  result = grpc_run_batch_stack_build_result(&st);
  grpc_run_batch_stack_cleanup(&st);
  return result;
}
Пример #18
0
/* call-seq:
   cq = CompletionQueue.new
   ops = {
     GRPC::Core::CallOps::SEND_INITIAL_METADATA => <op_value>,
     GRPC::Core::CallOps::SEND_MESSAGE => <op_value>,
     ...
   }
   tag = Object.new
   timeout = 10
   call.start_batch(cqueue, tag, timeout, ops)

   Start a batch of operations defined in the array ops; when complete, post a
   completion of type 'tag' to the completion queue bound to the call.

   Also waits for the batch to complete, until timeout is reached.
   The order of ops specified in the batch has no significance.
   Only one operation of each type can be active at once in any given
   batch */
static VALUE grpc_rb_call_run_batch(VALUE self, VALUE cqueue, VALUE tag,
                                    VALUE timeout, VALUE ops_hash) {
  run_batch_stack st;
  grpc_call *call = NULL;
  grpc_event ev;
  grpc_call_error err;
  VALUE result = Qnil;
  VALUE rb_write_flag = rb_ivar_get(self, id_write_flag);
  unsigned write_flag = 0;
  TypedData_Get_Struct(self, grpc_call, &grpc_call_data_type, call);

  /* Validate the ops args, adding them to a ruby array */
  if (TYPE(ops_hash) != T_HASH) {
    rb_raise(rb_eTypeError, "call#run_batch: ops hash should be a hash");
    return Qnil;
  }
  if (rb_write_flag != Qnil) {
    write_flag = NUM2UINT(rb_write_flag);
  }
  grpc_run_batch_stack_init(&st, write_flag);
  grpc_run_batch_stack_fill_ops(&st, ops_hash);

  /* call grpc_call_start_batch, then wait for it to complete using
   * pluck_event */
  err = grpc_call_start_batch(call, st.ops, st.op_num, ROBJECT(tag), NULL);
  if (err != GRPC_CALL_OK) {
    grpc_run_batch_stack_cleanup(&st);
    rb_raise(grpc_rb_eCallError,
             "grpc_call_start_batch failed with %s (code=%d)",
             grpc_call_error_detail_of(err), err);
    return Qnil;
  }
  ev = grpc_rb_completion_queue_pluck_event(cqueue, tag, timeout);
  if (ev.type == GRPC_QUEUE_TIMEOUT) {
    grpc_run_batch_stack_cleanup(&st);
    rb_raise(grpc_rb_eOutOfTime, "grpc_call_start_batch timed out");
    return Qnil;
  }

  /* Build and return the BatchResult struct result,
     if there is an error, it's reflected in the status */
  result = grpc_run_batch_stack_build_result(&st);
  grpc_run_batch_stack_cleanup(&st);
  return result;
}
Пример #19
0
/* Blocks until the next event for given tag is available, and returns the
 * event. */
grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag,
        VALUE timeout) {
    next_call_stack next_call;
    MEMZERO(&next_call, next_call_stack, 1);
    TypedData_Get_Struct(self, grpc_completion_queue,
                         &grpc_rb_completion_queue_data_type, next_call.cq);
    if (TYPE(timeout) == T_NIL) {
        next_call.timeout = gpr_inf_future;
    } else {
        next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
    }
    if (TYPE(tag) == T_NIL) {
        next_call.tag = NULL;
    } else {
        next_call.tag = ROBJECT(tag);
    }
    next_call.event.type = GRPC_QUEUE_TIMEOUT;
    rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil,
                               (void *)&next_call, NULL, NULL);
    return next_call.event;
}
Пример #20
0
/*
  call-seq:
     call.start_write(byte_buffer, tag, flags=nil)

   Queue a byte buffer for writing.
   flags is a bit-field combination of the write flags defined above.
   A write with byte_buffer null is allowed, and will not send any bytes on the
   wire. If this is performed without GRPC_WRITE_BUFFER_HINT flag it provides
   a mechanism to flush any previously buffered writes to outgoing flow control.
   REQUIRES: No other writes are pending on the call. It is only safe to
             start the next write after the corresponding write_accepted event
             is received.
             GRPC_INVOKE_ACCEPTED must have been received by the application
             prior to calling this on the client. On the server,
             grpc_call_accept must have been called successfully.
   Produces a GRPC_WRITE_ACCEPTED event. */
static VALUE grpc_rb_call_start_write(int argc, VALUE *argv, VALUE self) {
  VALUE byte_buffer = Qnil;
  VALUE tag = Qnil;
  VALUE flags = Qnil;
  grpc_call *call = NULL;
  grpc_byte_buffer *bfr = NULL;
  grpc_call_error err;

  /* "21" == 2 mandatory args, 1 (flags) is optional */
  rb_scan_args(argc, argv, "21", &byte_buffer, &tag, &flags);
  if (NIL_P(flags)) {
    flags = UINT2NUM(0); /* Default to no flags */
  }
  bfr = grpc_rb_get_wrapped_byte_buffer(byte_buffer);
  Data_Get_Struct(self, grpc_call, call);
  err = grpc_call_start_write_old(call, bfr, ROBJECT(tag), NUM2UINT(flags));
  if (err != GRPC_CALL_OK) {
    rb_raise(rb_eCallError, "start write failed: %s (code=%d)",
             grpc_call_error_detail_of(err), err);
  }

  return Qnil;
}
Пример #21
0
/* Blocks until the next event for given tag is available, and returns the
 * event. */
grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag,
                                                VALUE timeout) {
  next_call_stack next_call;
  MEMZERO(&next_call, next_call_stack, 1);
  TypedData_Get_Struct(self, grpc_completion_queue,
                       &grpc_rb_completion_queue_data_type, next_call.cq);
  if (TYPE(timeout) == T_NIL) {
    next_call.timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
  } else {
    next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
  }
  if (TYPE(tag) == T_NIL) {
    next_call.tag = NULL;
  } else {
    next_call.tag = ROBJECT(tag);
  }
  next_call.event.type = GRPC_QUEUE_TIMEOUT;
  /* Loop until we finish a pluck without an interruption. The internal
     pluck function runs either until it is interrupted or it gets an
     event, or time runs out.

     The basic reason we need this relatively complicated construction is that
     we need to re-acquire the GVL when an interrupt comes in, so that the ruby
     interpreter can do what it needs to do with the interrupt. But we also need
     to get back to plucking when the interrupt has been handled. */
  do {
    next_call.interrupted = 0;
    rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil,
                               (void *)&next_call, unblock_func,
                               (void *)&next_call);
    /* If an interrupt prevented pluck from returning useful information, then
       any plucks that did complete must have timed out */
  } while (next_call.interrupted &&
           next_call.event.type == GRPC_QUEUE_TIMEOUT);
  return next_call.event;
}
Пример #22
0
static size_t
memsize_of(VALUE obj)
{
    size_t size = 0;

    if (SPECIAL_CONST_P(obj)) {
	return 0;
    }

    if (FL_TEST(obj, FL_EXIVAR)) {
	size += rb_generic_ivar_memsize(obj);
    }

    switch (BUILTIN_TYPE(obj)) {
      case T_OBJECT:
	if (!(RBASIC(obj)->flags & ROBJECT_EMBED) &&
	    ROBJECT(obj)->as.heap.ivptr) {
	    size += ROBJECT(obj)->as.heap.numiv * sizeof(VALUE);
	}
	break;
      case T_MODULE:
      case T_CLASS:
	size += st_memsize(RCLASS_M_TBL(obj));
	if (RCLASS_IV_TBL(obj)) {
	    size += st_memsize(RCLASS_IV_TBL(obj));
	}
	if (RCLASS_IV_INDEX_TBL(obj)) {
	    size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
	}
	if (RCLASS(obj)->ptr->iv_tbl) {
	    size += st_memsize(RCLASS(obj)->ptr->iv_tbl);
	}
	if (RCLASS(obj)->ptr->const_tbl) {
	    size += st_memsize(RCLASS(obj)->ptr->const_tbl);
	}
	size += sizeof(rb_classext_t);
	break;
      case T_STRING:
	size += rb_str_memsize(obj);
	break;
      case T_ARRAY:
	size += rb_ary_memsize(obj);
	break;
      case T_HASH:
	if (RHASH(obj)->ntbl) {
	    size += st_memsize(RHASH(obj)->ntbl);
	}
	break;
      case T_REGEXP:
	if (RREGEXP(obj)->ptr) {
	    size += onig_memsize(RREGEXP(obj)->ptr);
	}
	break;
      case T_DATA:
	size += rb_objspace_data_type_memsize(obj);
	break;
      case T_MATCH:
	if (RMATCH(obj)->rmatch) {
            struct rmatch *rm = RMATCH(obj)->rmatch;
	    size += sizeof(struct re_registers); /* TODO: onig_region_memsize(&rm->regs); */
	    size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
	    size += sizeof(struct rmatch);
	}
	break;
      case T_FILE:
	if (RFILE(obj)->fptr) {
	    size += rb_io_memsize(RFILE(obj)->fptr);
	}
	break;
      case T_RATIONAL:
      case T_COMPLEX:
	break;
      case T_ICLASS:
	/* iClass shares table with the module */
	break;

      case T_FLOAT:
	break;

      case T_BIGNUM:
	if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
	    size += RBIGNUM_LEN(obj) * sizeof(BDIGIT);
	}
	break;
      case T_NODE:
	switch (nd_type(obj)) {
	  case NODE_SCOPE:
	    if (RNODE(obj)->u1.tbl) {
		/* TODO: xfree(RANY(obj)->as.node.u1.tbl); */
	    }
	    break;
	  case NODE_ALLOCA:
	    /* TODO: xfree(RANY(obj)->as.node.u1.node); */
	    ;
	}
	break;			/* no need to free iv_tbl */

      case T_STRUCT:
	if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
	    RSTRUCT(obj)->as.heap.ptr) {
	    size += sizeof(VALUE) * RSTRUCT_LEN(obj);
	}
	break;

      case T_ZOMBIE:
	break;

      default:
	rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
	       BUILTIN_TYPE(obj), (void*)obj);
    }

    return size;
}