Пример #1
0
int start_native_gui(wxe_data *sd)
{
  int res;
  wxe_status_m = erl_drv_mutex_create((char *) "wxe_status_m");
  wxe_status_c = erl_drv_cond_create((char *)"wxe_status_c");

  wxe_batch_locker_m = erl_drv_mutex_create((char *)"wxe_batch_locker_m");
  wxe_batch_locker_c = erl_drv_cond_create((char *)"wxe_batch_locker_c");
  init_caller = driver_connected(sd->port); 

  if((res = erl_drv_thread_create((char *)"wxwidgets",
				  &wxe_thread,wxe_main_loop,(void *) sd->pdl,NULL)) == 0) {
    erl_drv_mutex_lock(wxe_status_m);
    for(;wxe_status == WXE_NOT_INITIATED;) {
      erl_drv_cond_wait(wxe_status_c, wxe_status_m);
    }
    erl_drv_mutex_unlock(wxe_status_m);
    return wxe_status;
  } else {
    wxString msg;
    msg.Printf(wxT("Erlang failed to create wxe-thread %d\r\n"), res);
    send_msg("error", &msg);
    return -1;
  }
}
Пример #2
0
// Should have  erl_drv_mutex_lock(wxe_batch_locker_m); 
// when entering this function and it should be released 
// afterwards
int WxeApp::dispatch(wxList * batch, int blevel, int list_type) 
{
  int ping = 0;
  // erl_drv_mutex_lock(wxe_batch_locker_m);  must be locked already
  while(true) 
    {
      if (batch->size() > 0) {
	for( wxList::compatibility_iterator node = batch->GetFirst();
	     node;
	     node = batch->GetFirst())
	  {
	    wxeCommand *event = (wxeCommand *)node->GetData();
	    batch->Erase(node);
	    switch(event->op) {
	    case WXE_BATCH_END:
	      {--blevel; }
	      break;
	    case WXE_BATCH_BEGIN:
	      {blevel++; }
	      break;
	    case WXE_DEBUG_PING:
	      // When in debugger we don't want to hang waiting for a BATCH_END 
	      // that never comes, because a breakpoint have hit.
	      ping++;
	      if(ping > 2) 
		blevel = 0;
	      break;
	    case WXE_CB_RETURN:
	      // erl_drv_mutex_unlock(wxe_batch_locker_m); should be called after
	      // whatever cleaning is necessary
	      memcpy(cb_buff, event->buffer, event->len);
	      return blevel;
	    default:
	      erl_drv_mutex_unlock(wxe_batch_locker_m);	      
	      if(event->op < OPENGL_START) {
		// fprintf(stderr, "  c %d (%d) \r\n", event->op, blevel);
		wxe_dispatch(*event);
	      } else {
		gl_dispatch(event->op,event->buffer,event->caller,event->bin);
	      }
	      erl_drv_mutex_lock(wxe_batch_locker_m);
	      break;
	    }
	    delete event;
	  }
      } else {
	if((list_type == WXE_STORED) || (blevel <= 0 && list_type == WXE_NORMAL)) {
	  // erl_drv_mutex_unlock(wxe_batch_locker_m); should be called after
	  // whatever cleaning is necessary
	  return blevel;
	}
	// sleep until something happens
	//fprintf(stderr, "%s:%d sleep %d %d %d %d \r\n", __FILE__, __LINE__, batch->size(), callback_returned, blevel, is_callback);fflush(stderr);
	wxe_batch_caller++;
	while(batch->size() == 0) {
	  erl_drv_cond_wait(wxe_batch_locker_c, wxe_batch_locker_m);
	}
      }
    }
}
Пример #3
0
void WxeApp::dispatch_cb(wxList * batch, wxList * temp, ErlDrvTermData process) {
  int callback_returned = 0;
  while(true) {
    if (batch->size() > 0) {
      for( wxList::compatibility_iterator node = batch->GetFirst();
	   node;
	   node = batch->GetFirst())
	{
	  wxeCommand *event = (wxeCommand *)node->GetData();
	  wxeMemEnv *memenv = getMemEnv(event->port);
	  batch->Erase(node);
	  if(event->caller == process ||  // Callbacks from CB process only 
	     event->op == WXE_CB_START || // Recursive event callback allow
	     // Allow connect_cb during CB i.e. msg from wxe_server.
	     (memenv && event->caller == memenv->owner)) 
	    {
	      switch(event->op) {
	      case WXE_BATCH_END:
	      case WXE_BATCH_BEGIN:
	      case WXE_DEBUG_PING:
		break;
	      case WXE_CB_RETURN:
		memcpy(cb_buff, event->buffer, event->len);
		callback_returned = 1;
		return;
	      case WXE_CB_START:
		// CB start from now accept message from CB process only
		process = event->caller;
		break;
	      default:
		erl_drv_mutex_unlock(wxe_batch_locker_m);
		if(event->op < OPENGL_START) {
		  // fprintf(stderr, "  cb %d \r\n", event->op);
		  wxe_dispatch(*event);
		} else {
		  gl_dispatch(event->op,event->buffer,event->caller,event->bin);
		}
		erl_drv_mutex_lock(wxe_batch_locker_m);
		break;
		if(callback_returned) 
		  return;
	      }
	      delete event;
	    } else {
	    // fprintf(stderr, "  sav %d \r\n", event->op);
	    temp->Append(event);
	  }
	}
    } else {
      if(callback_returned) {
	return;
      }
      // sleep until something happens
      //fprintf(stderr, "%s:%d sleep %d %d %d %d \r\n", __FILE__, __LINE__, batch->size(), callback_returned, blevel, is_callback);fflush(stderr);
      while(batch->size() == 0) {
	erl_drv_cond_wait(wxe_batch_locker_c, wxe_batch_locker_m);
      }
    }
  }
}
Пример #4
0
static void innostore_drv_stop(ErlDrvData handle)
{
    PortState* state = (PortState*)handle;

    // Grab the worker lock, in case we have an job running
    erl_drv_mutex_lock(state->worker_lock);

    // Signal the shutdown and wait until the current operation has completed
    state->shutdown_flag = 1;
    erl_drv_cond_signal(state->worker_cv);

    while (state->op)
    {
        erl_drv_cond_wait(state->worker_cv, state->worker_lock);
    }

    // If the port state is not marked as READY, close the cursor and abort the txn
    if (state->port_state != STATE_READY)
    {
        ib_cursor_close(state->cursor);
        ib_trx_rollback(state->txn);
    }

    // No pending jobs and we have the lock again -- join our worker thread
    erl_drv_cond_signal(state->worker_cv);
    erl_drv_mutex_unlock(state->worker_lock);
    erl_drv_thread_join(state->worker, 0);

    // Cleanup
    erl_drv_cond_destroy(state->worker_cv);
    erl_drv_mutex_destroy(state->worker_lock);
    driver_free(handle);
}
Пример #5
0
void bdberl_tpool_stop(TPool* tpool)
{
    LOCK(tpool);

    // Set the shutdown flag and broadcast a notification
    tpool->shutdown = 1;
    erl_drv_cond_broadcast(tpool->work_cv);

    // Clean out the queue of pending jobs -- invoke their cleanup function

    // Wait for until active_threads hits zero
    while (tpool->active_threads > 0)
    {
        erl_drv_cond_wait(tpool->work_cv, tpool->lock);
    }
    
    // Join up with all the workers
    int i = 0;
    for (i = 0; i < tpool->thread_count; i++)
    {
        erl_drv_thread_join(tpool->threads[i], 0);
    }

    // Cleanup 
    erl_drv_cond_destroy(tpool->work_cv);
    erl_drv_cond_destroy(tpool->cancel_cv);
    driver_free(tpool->threads);
    UNLOCK(tpool);
    erl_drv_mutex_destroy(tpool->lock);
    driver_free(tpool);
}
Пример #6
0
int start_native_gui(wxe_data *sd)
{
  int res;
  wxe_status_m = erl_drv_mutex_create((char *) "wxe_status_m");
  wxe_status_c = erl_drv_cond_create((char *)"wxe_status_c");

  wxe_batch_locker_m = erl_drv_mutex_create((char *)"wxe_batch_locker_m");
  wxe_batch_locker_c = erl_drv_cond_create((char *)"wxe_batch_locker_c");
  init_caller = driver_connected(sd->port_handle);

#ifdef __DARWIN__
  res = erl_drv_steal_main_thread((char *)"wxwidgets",
				  &wxe_thread,wxe_main_loop,(void *) sd->pdl,NULL);
#else
  ErlDrvThreadOpts *opts = erl_drv_thread_opts_create((char *)"wx thread");
  opts->suggested_stack_size = 8192;
  res = erl_drv_thread_create((char *)"wxwidgets",
			      &wxe_thread,wxe_main_loop,(void *) sd->pdl,opts);
  erl_drv_thread_opts_destroy(opts);
#endif
  if(res == 0) {
    erl_drv_mutex_lock(wxe_status_m);
    for(;wxe_status == WXE_NOT_INITIATED;) {
      erl_drv_cond_wait(wxe_status_c, wxe_status_m);
    }
    erl_drv_mutex_unlock(wxe_status_m);
    return wxe_status;
  } else {
    wxString msg;
    msg.Printf(wxT("Erlang failed to create wxe-thread %d\r\n"), res);
    send_msg("error", &msg);
    return -1;
  }
}
Пример #7
0
static void* bdberl_tpool_main(void* arg)
{
    TPool* tpool = (TPool*)arg;

    LOCK(tpool);

    tpool->active_threads++;

    while(1)
    {
        // Check for shutdown...
        if (tpool->shutdown)
        {
            tpool->active_threads--;
            erl_drv_cond_broadcast(tpool->work_cv);
            UNLOCK(tpool);
            return 0;
        }

        // Get the next job
        TPoolJob* job = next_job(tpool);
        if (job)
        {
            // Unlock to avoid blocking others
            UNLOCK(tpool);

            // Invoke the function
            (*(job->main_fn))(job->arg);

            // Relock
            LOCK(tpool);

            // Mark the job as not running (important for cancellation to know it's done)
            job->running = 0;

            // If the job was cancelled, signal the cancellation cv so that anyone waiting on the
            // job knows it's complete
            if (job->canceled)
            {
                erl_drv_cond_broadcast(tpool->cancel_cv);
            }
        
            // Cleanup the job (remove from active list, free, etc.)
            cleanup_job(tpool, job);
        }
        else
        {
            // Wait for a job to come available then jump back to top of loop
            erl_drv_cond_wait(tpool->work_cv, tpool->lock);
        }
    }

    return 0;
}
Пример #8
0
void bdberl_tpool_cancel(TPool* tpool, TPoolJob* job)
{
    LOCK(tpool);

    // Remove the job from the pending queue 
    if (remove_pending_job(tpool, job))
    {
        // Job was removed from pending -- unlock and notify the job that it got canceled
        UNLOCK(tpool);

        if (job->cancel_fn)
        {
            (*(job->cancel_fn))(job->arg);
        }

        // Delete the job structure
        driver_free(job);
        return;
    }

    // Job not in the pending queue -- check the active queue. 
    if (is_active_job(tpool, job))
    {
        // Job is currently active -- mark it as cancelled (so we get notified) and wait for it
        job->canceled = 1;
        while (job->running)
        {
            erl_drv_cond_wait(tpool->cancel_cv, tpool->lock);
        }

        // Job is no longer running and should now be considered dead. Cleanup is handled by 
        // the worker.
        UNLOCK(tpool);
        return;
    }

    // Job was neither active nor pending -- it must have gotten run/cleaned up while we
    // were waiting on the thread pool lock. Regardless, it's now done/gone and the cancel
    // is a success.
    UNLOCK(tpool);
}
Пример #9
0
static void* innostore_worker(void* arg)
{
    PortState* state = (PortState*)arg;
    erl_drv_mutex_lock(state->worker_lock);
    while (1)
    {
        //
        // NOTE: Holds the worker lock for the duration of the loop !!
        //
        if (state->shutdown_flag)
        {
            driver_free(state->work_buffer);
            state->work_buffer = 0;
            if (state->op != 0)
            {
                send_error_atom(state, "stopping");
                state->op = 0;
            }
            erl_drv_cond_signal(state->worker_cv);
            erl_drv_mutex_unlock(state->worker_lock);
            break;
        }

        if (state->op)
        {
            state->op(state);
            state->op = 0;
            driver_free(state->work_buffer);
            state->work_buffer = 0;
        }
        else
        {
            erl_drv_cond_wait(state->worker_cv, state->worker_lock);
        }
    }
    return 0;
}
Пример #10
0
void enif_cond_wait(ErlNifCond *cnd, ErlNifMutex *mtx) { erl_drv_cond_wait(cnd,mtx); }
Пример #11
0
int
erts_cond_wait(erl_cond_t cnd, erl_mutex_t mtx)
{
    erl_drv_cond_wait((ErlDrvCond *) cnd, (ErlDrvMutex *) mtx);
    return 0;
}
Пример #12
0
void WxeApp::dispatch_cb(wxeFifo * batch, wxeFifo * temp, ErlDrvTermData process) {
  wxeCommand *event;
  erl_drv_mutex_lock(wxe_batch_locker_m);
  while(true) {
    while((event = batch->Get()) != NULL) {
      erl_drv_mutex_unlock(wxe_batch_locker_m);
      wxeMemEnv *memenv = getMemEnv(event->port);
      // fprintf(stderr, "  Ev %d %lu\r\n", event->op, event->caller);
      if(event->caller == process ||  // Callbacks from CB process only
	 event->op == WXE_CB_START || // Event callback start change process
	 event->op == WXE_CB_DIED ||  // Event callback process died
	 // Allow connect_cb during CB i.e. msg from wxe_server.
	 (memenv && event->caller == memenv->owner)) {
	switch(event->op) {
	case -1:
	case WXE_BATCH_END:
	case WXE_BATCH_BEGIN:
	case WXE_DEBUG_PING:
	  break;
	case WXE_CB_RETURN:
	  if(event->len > 0) {
	    cb_buff = (char *) driver_alloc(event->len);
	    memcpy(cb_buff, event->buffer, event->len);
	  }  // continue
	case WXE_CB_DIED:
	  event->Delete();
	  return;
	case WXE_CB_START:
	  // CB start from now accept message from CB process only
	  process = event->caller;
	  break;
	default:
	  size_t start=temp->m_n;
	  if(event->op < OPENGL_START) {
	    // fprintf(stderr, "  cb %d \r\n", event->op);
	    wxe_dispatch(*event);
	  } else {
	    gl_dispatch(event->op,event->buffer,event->caller,event->bin);
	  }
	  if(temp->m_n > start) {
	    erl_drv_mutex_lock(wxe_batch_locker_m);
	    // We have recursed dispatch_cb and messages for this
	    // callback may be saved on temp list move them
	    // to orig list
	    for(unsigned int i=start; i < temp->m_n; i++) {
	      wxeCommand *ev = &temp->m_q[(temp->m_first+i) % temp->m_max];
	      if(ev->caller == process) {
		batch->Append(ev);
	      }
	    }
	    erl_drv_mutex_unlock(wxe_batch_locker_m);
	  }
	  break;
	}
	event->Delete();
      } else {
	// fprintf(stderr, "  save %d %lu\r\n", event->op, event->caller);
	temp->Append(event);
      }
      erl_drv_mutex_lock(wxe_batch_locker_m);
    }
    // sleep until something happens
    // fprintf(stderr, "%s:%d sleep %d %d\r\n", __FILE__, __LINE__,
    //         batch->m_n, temp->m_n);fflush(stderr);
    wxe_needs_signal = 1;
    while(batch->m_n == 0) {
      erl_drv_cond_wait(wxe_batch_locker_c, wxe_batch_locker_m);
    }
    wxe_needs_signal = 0;
  }
}
Пример #13
0
int WxeApp::dispatch(wxeFifo * batch, int blevel, int list_type)
{
  int ping = 0;
  wxeCommand *event;
  if(list_type == WXE_NORMAL) erl_drv_mutex_lock(wxe_batch_locker_m);
  while(true) {
    while((event = batch->Get()) != NULL) {
      if(list_type == WXE_NORMAL) erl_drv_mutex_unlock(wxe_batch_locker_m);
      switch(event->op) {
      case -1:
	break;
      case WXE_BATCH_END:
	{--blevel; }
	break;
      case WXE_BATCH_BEGIN:
	{blevel++; }
	break;
      case WXE_DEBUG_PING:
	// When in debugger we don't want to hang waiting for a BATCH_END
	// that never comes, because a breakpoint have hit.
	ping++;
	if(ping > 2)
	  blevel = 0;
	break;
      case WXE_CB_RETURN:
	if(event->len > 0) {
	  cb_buff = (char *) driver_alloc(event->len);
	  memcpy(cb_buff, event->buffer, event->len);
	}
	event->Delete();
	return blevel;
      default:
	if(event->op < OPENGL_START) {
	  // fprintf(stderr, "  c %d (%d) \r\n", event->op, blevel);
	  wxe_dispatch(*event);
	} else {
	  gl_dispatch(event->op,event->buffer,event->caller,event->bin);
	}
	break;
      }
      event->Delete();
      if(list_type == WXE_NORMAL) erl_drv_mutex_lock(wxe_batch_locker_m);
    }
    if(list_type == WXE_STORED)
      return blevel;
    if(blevel <= 0) { // list_type == WXE_NORMAL
      if(wxe_queue->m_old) {
	driver_free(wxe_queue->m_old);
	wxe_queue->m_old = NULL;
      }
      erl_drv_mutex_unlock(wxe_batch_locker_m);
      return blevel;
    }
    // sleep until something happens
    //fprintf(stderr, "%s:%d sleep %d %d\r\n", __FILE__, __LINE__, batch->m_n, blevel);fflush(stderr);
    wxe_needs_signal = 1;
    while(batch->m_n == 0) {
      erl_drv_cond_wait(wxe_batch_locker_c, wxe_batch_locker_m);
    }
    wxe_needs_signal = 0;
  }
}