Exemplo n.º 1
0
void
lookup_remote_credentials(NiceAgent* agent, guint stream_id) {
  guint retval;

  gchar lookup_cmd[1024];
  g_snprintf(lookup_cmd, sizeof(lookup_cmd), "./niceexchange.sh 0 %s lookup dummy", remote_hostname);
  if(is_caller)
    lookup_cmd[18] = '1';

  gchar *stdout;
  gchar *stderr;
  retval = execute_sync(lookup_cmd, NULL, &stdout, &stderr);
  if(retval != 0) {
    g_critical("niceexchange lookup returned a non-zero return value (%i)!", retval);
    if(stderr != NULL)
      g_critical("This was written to stderr:\n%s", stderr);
    g_free(stdout);
    g_free(stderr);
 
    g_main_loop_unref(gloop);
    g_object_unref(agent);

    exit(1);
  }
  parse_remote_data(agent, stream_id, 1, stdout, strlen(stdout));
  g_free(stdout);
  g_free(stderr);
}
Exemplo n.º 2
0
void
publish_local_credentials(NiceAgent* agent, guint stream_id) {
  gint retval;

  // publish local credentials
  gchar publish_cmd[1024];
  g_snprintf(publish_cmd, sizeof(publish_cmd), "./niceexchange.sh 0 %s publish dummy", remote_hostname);
  if(is_caller)
    publish_cmd[18] = '1';

  gchar *stdin;
  local_credentials_to_string(agent, stream_id, 1, &stdin);

  retval = execute_sync(publish_cmd, stdin, NULL, NULL);
  if(retval != 0) {
    g_critical("niceexchange publish returned a non-zero return value (%i)!", retval);
 
    g_main_loop_unref(gloop);
    g_object_unref(agent);

    exit(1);
  }
  g_free(stdin);
  g_debug("published local credentials\n");
}
Exemplo n.º 3
0
//queue up a received datagram for eventual handlng via IDA's execute_sync mechanism
//call no sdk functions other than execute_sync
void disp_request_t::queueBuffer(Buffer *b) {
   if (buffers.enqueue(b)) {
      //only invoke execute_sync if the buffer just added was at the head of he queue
      //in theory this allows multiple datagrams to get queued for handling
      //in a single execute_sync callback
      execute_sync(*this, MFF_WRITE);
   }
}
Exemplo n.º 4
0
void sdl_window_info::notify_changed()
{
	auto wp = std::make_unique<worker_param>(std::static_pointer_cast<sdl_window_info>(shared_from_this()));
	if (SDL_ThreadID() == main_threadid)
	{
		execute_async_wait(&notify_changed_wt, std::move(wp));
	}
	else
		execute_sync(&notify_changed_wt, std::move(wp));
}
Exemplo n.º 5
0
void sdl_window_info::notify_changed()
{
	worker_param wp;

	if (SDL_ThreadID() == main_threadid)
	{
		execute_async_wait(&notify_changed_wt, worker_param(this));
	}
	else
		execute_sync(&notify_changed_wt, worker_param(this));
}
Exemplo n.º 6
0
//------------------------------------------------------------------------
static int py_execute_sync(PyObject *py_callable, int reqf)
{
  PYW_GIL_CHECK_LOCKED_SCOPE();
  int rc = -1;
  // Callable?
  if ( PyCallable_Check(py_callable) )
  {
    struct py_exec_request_t : exec_request_t
    {
      ref_t py_callable;
      virtual int idaapi execute()
      {
        PYW_GIL_GET;
        newref_t py_result(PyObject_CallFunctionObjArgs(py_callable.o, NULL));
        int ret = py_result == NULL || !PyInt_Check(py_result.o)
                ? -1
                : PyInt_AsLong(py_result.o);
        // if the requesting thread decided not to wait for the request to
        // complete, we have to self-destroy, nobody else will do it
        if ( (code & MFF_NOWAIT) != 0 )
          delete this;
        return ret;
      }
      py_exec_request_t(PyObject *pyc)
      {
        // No need to GIL-ensure here, since this is created
        // within the py_execute_sync() scope.
        py_callable = borref_t(pyc);
      }
      virtual ~py_exec_request_t()
      {
        // Need to GIL-ensure here, since this might be called
        // from the main thread.
        PYW_GIL_GET;
        py_callable = ref_t(); // Release callable
      }
    };
    py_exec_request_t *req = new py_exec_request_t(py_callable);

    // Release GIL before executing, or if this is running in the
    // non-main thread, this will wait on the req.sem, while the main
    // thread might be waiting for the GIL to be available.
    Py_BEGIN_ALLOW_THREADS;
    rc = execute_sync(*req, reqf);
    Py_END_ALLOW_THREADS;
    // destroy the request once it is finished. exception: NOWAIT requests
    // will be handled in the future, so do not destroy them yet!
    if ( (reqf & MFF_NOWAIT) == 0 )
      delete req;
  }
  return rc;
}
Exemplo n.º 7
0
//queue up a received datagram for eventual handlng via IDA's execute_sync mechanism
//call no sdk functions other than execute_sync
void disp_request_t::queueObject(json_object *obj) {
   bool call_exec = false;
   qmutex_lock(mtx);
   objects.push_back(obj);
   call_exec = objects.size() == 1;
   qmutex_unlock(mtx);

   if (call_exec) {
      //only invoke execute_sync if the buffer just added was at the head of the queue
      //in theory this allows multiple datagrams to get queued for handling
      //in a single execute_sync callback
      execute_sync(*this, MFF_WRITE);
   }
}
Exemplo n.º 8
0
void
unpublish_local_credentials(NiceAgent* agent, guint stream_id) {
  guint retval;

  g_debug("lookup remote credentials done\n");
  gchar unpublish_cmd[1024];
  g_snprintf(unpublish_cmd, sizeof(unpublish_cmd), "./niceexchange.sh 0 %s unpublish dummy", remote_hostname);
  if(is_caller)
    unpublish_cmd[18] = '1';

  retval = execute_sync(unpublish_cmd, NULL, NULL, NULL);
  if(retval != 0) {
    g_critical("niceexchange unpublish returned a non-zero return value (%i)!", retval);
 
    g_main_loop_unref(gloop);
    g_object_unref(agent);

    exit(1);
  }
  g_debug("unpublish local credentials done\n");
}