Esempio n. 1
0
static VALUE helium_event_thread(void *unused)
{
  struct helium_waiter_t waiter = {
    .callback = NULL,
    .abort = 0
  };

  while (waiter.abort == 0) {
    rb_thread_call_without_gvl(wait_for_callback, &waiter, &stop_waiting, &waiter);

    if (waiter.callback != NULL) {
      rb_thread_create(helium_callback_handler_thread, (void *)waiter.callback);
    }
  }

  return Qnil;
}

void Init_rbhelium()
{
  mHelium = rb_define_module("Helium");
  cConnection = rb_define_class_under(mHelium, "Connection", rb_cObject);
  rb_define_alloc_func(cConnection, helium_rb_allocate);
  rb_define_method(cConnection, "initialize", helium_rb_initialize, -1);
  rb_define_method(cConnection, "write", helium_rb_send, 3);
  rb_define_method(cConnection, "subscribe", helium_rb_subscribe, 2);
  rb_define_method(cConnection, "unsubscribe", helium_rb_unsubscribe, 1);
  rb_define_method(cConnection, "close", helium_rb_close, 0);
  rb_thread_create(helium_event_thread, NULL);
}
Esempio n. 2
0
VALUE mtserver_start(VALUE self,VALUE ssl)
{
   rb_iv_set(self,"@shutdown",Qfalse);
   if(ssl!=Qnil) rb_iv_set(self,"@ssl",ssl);
   rb_funcall(self,rb_intern("_start"),0);
   rb_iv_set(self,"@socket_reader__thread",rb_thread_create(mtserver_socket_reader_thread,(void *) self));
   rb_iv_set(self,"@monitor_thread",rb_thread_create(mtserver_monitor_thread,(void *) self));
//   rb_funcall(rb_iv_get(self,"@run_thread"),rb_intern("priority="),1,INT2FIX(10));
   return self;
}
Esempio n. 3
0
VALUE Ruby_Service_Ctrl(VALUE self)
{
   while (WaitForSingleObject(hStopEvent,0) == WAIT_TIMEOUT)
    {
      __try
      {
         EnterCriticalSection(&csControlCode);

         // Check to see if anything interesting has been signaled
         if (waiting_control_code != IDLE_CONTROL_CODE)
         {
            if (waiting_control_code != SERVICE_CONTROL_STOP) {
                // if there is a code, create a ruby thread to deal with it
                // this might be over engineering the solution, but I don't
                // want to block Service_Ctrl longer than necessary and the
                // critical section will block it.
                VALUE EventHookHash = rb_ivar_get(self, rb_intern("@event_hooks"));

                if(EventHookHash != Qnil){
                   VALUE val = rb_hash_aref(EventHookHash, INT2NUM(waiting_control_code));

                   if(val != Qnil)
                      rb_thread_create(Service_Event_Dispatch, (void*) val);
                }
            }
            else {
               break;
            }
            waiting_control_code = IDLE_CONTROL_CODE;
         }
      }
      __finally
      {
         LeaveCriticalSection(&csControlCode);
      }

      // This is an ugly polling loop, be as polite as possible
      rb_thread_polling();
      __end_finally
   }

   // force service_stop call
   {
      VALUE EventHookHash = rb_ivar_get(self, rb_intern("@event_hooks"));

      if(EventHookHash != Qnil){
         VALUE val = rb_hash_aref(EventHookHash, INT2NUM(SERVICE_CONTROL_STOP));

         if(val!=Qnil)
            rb_thread_create(Service_Event_Dispatch, (void*) val);
      }
   }

   return Qnil;
}
Esempio n. 4
0
static VALUE thread_spec_rb_thread_create(VALUE self, VALUE proc, VALUE arg) {
  VALUE args = rb_ary_new();
  rb_ary_push(args, proc);
  rb_ary_push(args, arg);

  return rb_thread_create(thread_spec_call_proc, (void*)args);
}
Esempio n. 5
0
void response_process(connection_t *c)
{
  c->backend->thread_count++;
  /* call the Rack app in a Ruby green thread */
  c->thread.active = 1;
  c->thread.obj = rb_thread_create(response_run, (void*) c);
}
Esempio n. 6
0
void grpc_rb_event_queue_thread_start() {
  event_queue.head = event_queue.tail = NULL;
  event_queue.abort = false;
  gpr_mu_init(&event_queue.mu);
  gpr_cv_init(&event_queue.cv);

  rb_thread_create(grpc_rb_event_thread, NULL);
}
Esempio n. 7
0
static VALUE
function_init(VALUE self, VALUE rbFunctionInfo, VALUE rbProc)
{
    Function* fn = NULL;
    
    Data_Get_Struct(self, Function, fn);

    fn->rbFunctionInfo = rbFunctionInfo;

    Data_Get_Struct(fn->rbFunctionInfo, FunctionType, fn->info);

    if (rb_obj_is_kind_of(rbProc, rbffi_PointerClass)) {
        Pointer* orig;
        Data_Get_Struct(rbProc, Pointer, orig);
        fn->base.memory = orig->memory;
        fn->base.rbParent = rbProc;

    } else if (rb_obj_is_kind_of(rbProc, rb_cProc) || rb_respond_to(rbProc, id_call)) {
        if (fn->info->closurePool == NULL) {
            fn->info->closurePool = rbffi_ClosurePool_New(sizeof(ffi_closure), callback_prep, fn->info);
            if (fn->info->closurePool == NULL) {
                rb_raise(rb_eNoMemError, "failed to create closure pool");
            }
        }

#if defined(DEFER_ASYNC_CALLBACK)
        if (async_cb_thread == Qnil) {
#if !defined(HAVE_RB_THREAD_BLOCKING_REGION)
            pipe(async_cb_pipe);
            fcntl(async_cb_pipe[0], F_SETFL, fcntl(async_cb_pipe[0], F_GETFL) | O_NONBLOCK);
            fcntl(async_cb_pipe[1], F_SETFL, fcntl(async_cb_pipe[1], F_GETFL) | O_NONBLOCK);
#endif
            async_cb_thread = rb_thread_create(async_cb_event, NULL);
        }

#endif

        fn->closure = rbffi_Closure_Alloc(fn->info->closurePool);
        fn->closure->info = fn;
        fn->base.memory.address = fn->closure->code;
        fn->base.memory.size = sizeof(*fn->closure);
        fn->autorelease = true;

    } else {
        rb_raise(rb_eTypeError, "wrong argument type %s, expected pointer or proc",
                rb_obj_classname(rbProc));
    }
    
    fn->rbProc = rbProc;

    return self;
}
Esempio n. 8
0
void ruby_watcher_wrapper(watcher_fn cb, zhandle_t *zh, int type,int state, char *path, void *ctx)
{
    struct watcher_data *w;

    w = (watcher_data *)malloc(sizeof(watcher_data));
    w->cb = (void *)cb;
    w->zh = zh;
    w->type = type;
    w->state = state;
    w->path = path ? strdup(path) : 0;
    w->ctx = ctx ? strdup(ctx) : 0;

    rb_thread_create((void *)ruby_watcher_wrapper_2, w);
}
Esempio n. 9
0
static void do_watch(struct watcher_data *w)
  {
  struct stat statbuf;
  time_t time0 = time(NULL);

  printf("(watching)"); fflush(stdout);
  while(1)
    {
    if (stat(w->path, &statbuf) == 0)
      {
      w->s = statbuf.st_mtime-time0;
      unlink(w->path);
      rb_thread_create((void *)cb_ruby, w);
      }
    sleep(1);
    }
  }
Esempio n. 10
0
/* Temporary fix for
 * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
 * Transports in idle channels can get destroyed. Normally c-core re-connects,
 * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
 * only calls c-core's "completion_queu_pluck" API.
 * This uses a global background thread that calls
 * "completion_queue_next" on registered "watch_channel_connectivity_state"
 * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
 * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
 */
void grpc_rb_channel_polling_thread_start() {
  VALUE background_thread = Qnil;

  GPR_ASSERT(!abort_channel_polling);
  GPR_ASSERT(!channel_polling_thread_started);
  GPR_ASSERT(channel_polling_cq == NULL);

  gpr_mu_init(&global_connection_polling_mu);
  gpr_cv_init(&global_connection_polling_cv);

  channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
  background_thread = rb_thread_create(run_poll_channels_loop, NULL);

  if (!RTEST(background_thread)) {
    gpr_log(GPR_DEBUG, "GRPC_RUBY: failed to spawn channel polling thread");
    rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
                               NULL, NULL);
  }
}
Esempio n. 11
0
/* This thread constantly monitors the connection queue, launching new fetcher threads as
 * needed */
VALUE mtserver_monitor_thread(VALUE self)
{
   struct timeval delay;
   VALUE sockets=rb_iv_get(self,"@sockets");
   delay.tv_usec=1000000;
   while (1)
     {
	if (mtserver_get_avail(self)*2<RARRAY(sockets)->len)
	  {
	     if (RMTServer(self)->workers<FIX2INT(rb_iv_get(self,"@maxWorkers")))
	       {		    
		  rb_thread_create(mtserver_fetcher_thread,(void *)self);
	       }
		 
//	     printf("Add\n");
	  }
	
	rb_thread_wait_for(delay);
	
     }
   return Qtrue;
}
Esempio n. 12
0
void RubyVPI_user_init()
{
    // mailbox init
    RubyVPI_util_debug("User: mailbox init");
    RubyVPI_user_require("ruby-vpi/boot/relay");


    // ruby thread init
    RubyVPI_util_debug("User: ruby thread init");
    rb_thread_create(RubyVPI_user_body, "ruby-vpi/boot/loader");


    // wait for thread to pause
    RubyVPI_util_debug("User: calling RubyVPI.attach");

    RubyVPI_user__module_RubyVPI = rb_const_get(rb_cObject, rb_intern("RubyVPI"));
    rb_funcall(RubyVPI_user__module_RubyVPI, rb_intern("attach"), 0);

    RubyVPI_util_debug("User: calling RubyVPI.attach DONE");


    RubyVPI_util_debug("User: ruby thread is active & ran once");
    RubyVPI_user__symbol_resume = rb_intern("resume");
}
Esempio n. 13
0
static void rbthreads_loop() {
	struct uwsgi_plugin *rup = uwsgi_plugin_get("rack");
	// disable init_thread warning
	if (rup) {
		rup->init_thread = rbthread_noop;
	}

	// override read/write nb hooks
	urbts.orig_wait_write_hook = uwsgi.wait_write_hook;
	urbts.orig_wait_read_hook = uwsgi.wait_read_hook;
	urbts.orig_wait_milliseconds_hook = uwsgi.wait_milliseconds_hook;
	uwsgi.wait_write_hook = rbthreads_wait_fd_write;
        uwsgi.wait_read_hook = rbthreads_wait_fd_read;
        uwsgi.wait_milliseconds_hook = rbthreads_wait_milliseconds;

	int i;
	for(i=1;i<uwsgi.threads;i++) {
		long y = i;
		rb_thread_create(uwsgi_rb_thread_core, (void *) y);
	}
	long y = 0;
	uwsgi_rb_thread_core((void *) y);
	// never here
}
Esempio n. 14
0
/*
 * This is the method that actually puts your code into a loop and allows it
 * to run as a service.  The code that is actually run while in the mainloop
 * is what you defined in your own Daemon#service_main method.
 */
static VALUE daemon_mainloop(VALUE self)
{
   DWORD ThreadId;
   HANDLE events[2];
   DWORD index;
   VALUE result, EventHookHash;
   int status = 0;

   dwServiceState = 0;

   // Redirect STDIN, STDOUT and STDERR to the NUL device if they're still
   // associated with a tty. This helps newbs avoid Errno::EBADF errors.
   if(rb_funcall(rb_stdin, rb_intern("isatty"), 0) == Qtrue)
      rb_funcall(rb_stdin, rb_intern("reopen"), 1, rb_str_new2("NUL"));

   if(rb_funcall(rb_stdout, rb_intern("isatty"), 0) == Qtrue)
      rb_funcall(rb_stdout, rb_intern("reopen"), 1, rb_str_new2("NUL"));

   if(rb_funcall(rb_stderr, rb_intern("isatty"), 0) == Qtrue)
      rb_funcall(rb_stderr, rb_intern("reopen"), 1, rb_str_new2("NUL"));

   // Use a markable instance variable to prevent the garbage collector
   // from freeing the hash before Ruby_Service_Ctrl exits, or just
   // at any ole time while running the service
   EventHookHash = rb_hash_new();
   rb_ivar_set(self, rb_intern("@event_hooks"), EventHookHash);

   // Event hooks
   if(rb_respond_to(self, rb_intern("service_stop"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_STOP),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_stop"))));
   }

   if(rb_respond_to(self, rb_intern("service_pause"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_PAUSE),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_pause"))));
   }

   if(rb_respond_to(self, rb_intern("service_resume"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_CONTINUE),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_resume"))));
   }

   if(rb_respond_to(self, rb_intern("service_interrogate"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_INTERROGATE),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_interrogate"))));
   }

   if(rb_respond_to(self, rb_intern("service_shutdown"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_SHUTDOWN),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_shutdown"))));
   }

#ifdef SERVICE_CONTROL_PARAMCHANGE
   if(rb_respond_to(self, rb_intern("service_paramchange"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_PARAMCHANGE),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_paramchange"))));
   }
#endif

#ifdef SERVICE_CONTROL_NETBINDADD
   if(rb_respond_to(self, rb_intern("service_netbindadd"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_NETBINDADD),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_netbindadd"))));
   }
#endif

#ifdef SERVICE_CONTROL_NETBINDREMOVE
   if(rb_respond_to(self, rb_intern("service_netbindremove"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_NETBINDREMOVE),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_netbindremove"))));
   }
#endif

#ifdef SERVICE_CONTROL_NETBINDENABLE
   if(rb_respond_to(self, rb_intern("service_netbindenable"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_NETBINDENABLE),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_netbindenable"))));
   }
#endif

#ifdef SERVICE_CONTROL_NETBINDDISABLE
   if(rb_respond_to(self, rb_intern("service_netbinddisable"))){
      rb_hash_aset(EventHookHash, INT2NUM(SERVICE_CONTROL_NETBINDDISABLE),
         rb_ary_new3(2, self, INT2NUM(rb_intern("service_netbinddisable"))));
   }
#endif

   // Calling init here so that init failures never even tries to
   // start the service... of course that means that init methods
   // must be very quick, because the SCM will be receiving no
   // START_PENDING messages while init's running - I may fix this
   // later
   if(rb_respond_to(self, rb_intern("service_init")))
      rb_funcall(self, rb_intern("service_init"),0);

   // Create the event to signal the service to start.
   hStartEvent = CreateEvent(NULL, TRUE, FALSE, NULL);

   if(hStartEvent == NULL)
      rb_raise(cDaemonError, ErrorDescription(GetLastError()));

   // Create the event to signal the service to stop.
   hStopEvent = CreateEvent(NULL, TRUE, FALSE, NULL);

   if(hStopEvent == NULL)
      rb_raise(cDaemonError, ErrorDescription(GetLastError()));

   // Create the event to signal the service that stop has completed
   hStopCompletedEvent = CreateEvent(NULL, TRUE, FALSE, NULL);

   if(hStopCompletedEvent == NULL)
      rb_raise(cDaemonError, ErrorDescription(GetLastError()));

   // Create Thread for service main
   hThread = CreateThread(NULL, 0, ThreadProc, 0, 0, &ThreadId);

   if(hThread == INVALID_HANDLE_VALUE)
      rb_raise(cDaemonError, ErrorDescription(GetLastError()));

   events[0] = hThread;
   events[1] = hStartEvent;

   // wait for Service_Main function to either start the service OR terminate
   while((index = WaitForMultipleObjects(2,events,FALSE,1000)) == WAIT_TIMEOUT)
   {
   }

   // thread exited, so the show is off
   if(index == WAIT_OBJECT_0)
      rb_raise(cDaemonError, "Service_Main thread exited abnormally");

   // from this point onward, stopevent must be triggered!

   // Create the green thread to poll for Service_Ctrl events
   rb_thread_create(Ruby_Service_Ctrl, (void *)self);

   result = rb_protect(daemon_mainloop_protect, self, &status);

   // service_main raised an exception
   if(status){
      daemon_mainloop_ensure(self);
      rb_jump_tag(status);
   }

   // service_main exited cleanly
   return daemon_mainloop_ensure(self);
}
Esempio n. 15
0
/* Temporary fix for
 * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
 * Transports in idle channels can get destroyed. Normally c-core re-connects,
 * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
 * only calls c-core's "completion_queu_pluck" API.
 * This uses a global background thread that calls
 * "completion_queue_next" on registered "watch_channel_connectivity_state"
 * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
 * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
 */
static void start_poll_channels_loop() {
  channel_polling_cq = grpc_completion_queue_create(NULL);
  gpr_mu_init(&global_connection_polling_mu);
  abort_channel_polling = 0;
  rb_thread_create(run_poll_channels_loop, NULL);
}