コード例 #1
0
ファイル: obj-store.c プロジェクト: hangshisitu/seafile
int
seaf_obj_store_async_write (struct SeafObjStore *obj_store,
                            guint32 writer_id,
                            const char *obj_id,
                            const void *obj_data,
                            int data_len,
                            gboolean need_sync)
{
    AsyncTask *task = g_new0 (AsyncTask, 1);
    GError *error = NULL;

    task->rw_id = writer_id;
    memcpy (task->obj_id, obj_id, 41);
    task->data = g_memdup (obj_data, data_len);
    task->len = data_len;
    task->need_sync = need_sync;

    g_thread_pool_push (obj_store->write_tpool, task, &error);
    if (error) {
        seaf_warning ("Failed to start aysnc write of %s.\n", obj_id);
        return -1;
    }

    return 0;
}
コード例 #2
0
ファイル: fileops_trash.c プロジェクト: CannedFish/dde
void fileops_empty_trash ()
{
    if (pool == NULL) {
        pool = g_thread_pool_new(_empty_trash_job, NULL, -1, FALSE, NULL);
        atexit(destroy_thread_pool);
    }
    GList* trash_list = NULL;

    GVolumeMonitor* vol_monitor = g_volume_monitor_get ();
    GList* mount_list = g_volume_monitor_get_mounts (vol_monitor);
    g_object_unref (vol_monitor);

    //iterate through all mounts
    GList* l;
    for (l = mount_list; l != NULL; l = l->next)
    {
        trash_list = g_list_concat (trash_list,
                                    _get_trash_dirs_for_mount (l->data));
    }
    g_list_free_full (mount_list, g_object_unref);
    //add 'trash:' prefix
    trash_list = g_list_prepend (trash_list,
                                 g_file_new_for_uri ("trash:"));

    g_thread_pool_push(pool, trash_list, NULL);
}
コード例 #3
0
ファイル: run.c プロジェクト: BackupTheBerlios/upwatch-svn
void add_probe(gpointer key, gpointer value, gpointer user_data)
{
  GThreadPool *gtpool = (GThreadPool *) user_data;

  g_thread_pool_push(gtpool, value, NULL);
//probe(value, user_data);
}
コード例 #4
0
static void
test_thread_idle_time ()
{
  guint limit = 50;
  guint interval = 10000;
  gint i;

  idle_pool = g_thread_pool_new (test_thread_idle_time_entry_func, 
				 NULL, 
				 MAX_THREADS,
				 FALSE,
				 NULL);

  g_thread_pool_set_max_unused_threads (MAX_UNUSED_THREADS);  
  g_thread_pool_set_max_idle_time (interval); 

  g_assert (g_thread_pool_get_max_unused_threads () == MAX_UNUSED_THREADS);   
  g_assert (g_thread_pool_get_max_idle_time () == interval);

  for (i = 0; i < limit; i++) {
    g_thread_pool_push (idle_pool, GUINT_TO_POINTER (i + 1), NULL); 
    DEBUG_MSG (("[idle] ===> pushed new thread with id:%d, "
		"number of threads:%d, unprocessed:%d",
		i,
		g_thread_pool_get_num_threads (idle_pool),
		g_thread_pool_unprocessed (idle_pool)));
  }

  g_timeout_add ((interval - 1000),
		 test_thread_idle_timeout, 
		 GUINT_TO_POINTER (interval));
}
コード例 #5
0
static gboolean
g_threaded_socket_service_incoming (GSocketService    *service,
                                    GSocketConnection *connection,
                                    GObject           *source_object)
{
  GThreadedSocketService *threaded;
  GThreadedSocketServiceData *data;

  threaded = G_THREADED_SOCKET_SERVICE (service);

  data = g_slice_new (GThreadedSocketServiceData);
  data->service = g_object_ref (service);
  data->connection = g_object_ref (connection);
  if (source_object)
    data->source_object = g_object_ref (source_object);
  else
    data->source_object = NULL;

  G_LOCK (job_count);
  if (++threaded->priv->job_count == threaded->priv->max_threads)
    g_socket_service_stop (service);
  G_UNLOCK (job_count);

  g_thread_pool_push (threaded->priv->thread_pool, data, NULL);



  return FALSE;
}
コード例 #6
0
ファイル: CMiniCacheTest.c プロジェクト: tanec/fcache
int main(int argc, char **argv)
{
    if (argc != 3) {
        fprintf(stderr, "usage: %s threadnum datanum\n", argv[0]);
    } else {
        int tn = 0, dn = 0, i;

        tn = atoi(argv[1]);
        dn = atoi(argv[2]);

        if (tn > 0) {
            cache = CMiniCache_alloc(400000, 500000, 400000, 5000, dfree);

            g_thread_init(NULL);
            GThreadPool *tp = g_thread_pool_new(
                                  test, &dn, tn, false, NULL);
            if (tp == NULL) {
                perror("can not initialize thread pool!");
                exit(1);
            }

            int iarr[tn];
            for (i=0; i<tn; i++) {
                iarr[i] = i+1;
                g_thread_pool_push(tp, &(iarr[i]), NULL);
                run ++;
            }

            while (run > 0);
            printf("done.\n");
        }
    }
    return ret;
}
コード例 #7
0
DpPopulation*dp_evaluation_population_init(DpEvaluationCtrl*hevalctrl, int size, double noglobal_eps)
{
	DpPopulation*pop;
	int i, istart = 0;
	//gboolean immediate_stop = FALSE;
	gboolean immediate_stop = TRUE;
	//gboolean wait_finish = TRUE;
	gboolean wait_finish = FALSE;
	GError *gerror = NULL;
	GMainContext *gcontext = g_main_context_default();
	gulong microseconds = G_USEC_PER_SEC / 1000;
	pop = dp_population_new(size, hevalctrl->eval->size, hevalctrl->eval_target->size, hevalctrl->eval_target->precond_size, hevalctrl->seed);
	if ( noglobal_eps == 0 ) {
		dp_evaluation_individ_set(hevalctrl, pop->individ[0]);
		pop->individ[0]->user_data = dp_target_eval_get_user_data(hevalctrl->eval_target);
		istart = 1;
		pop->individ[0]->cost = G_MAXDOUBLE;
	}
	for ( i = istart; i < size; i++) {
		dp_evaluation_individ_scramble(hevalctrl, pop->individ[i], noglobal_eps);
		pop->individ[i]->user_data = dp_target_eval_get_user_data(hevalctrl->eval_target);
		pop->individ[i]->cost = G_MAXDOUBLE;
	}
#ifdef MPIZE
/* MPI initialization steps */
	int world_id = 0, world_count = 1;
	MPI_Comm_size(MPI_COMM_WORLD, &world_count);
	MPI_Comm_rank(MPI_COMM_WORLD, &world_id);
	int ind_per_node = (int)ceil(pop->size / world_count);
	int ind_per_last_node = pop->size - ind_per_node * (world_count - 1);
	dp_population_mpi_distribute(pop, world_id, world_count);
#endif
	if ( hevalctrl->eval_max_threads > 0 ) {
		hevalctrl->gthreadpool = g_thread_pool_new ((GFunc) dp_evaluation_population_init_func, (gpointer) hevalctrl, hevalctrl->eval_max_threads, hevalctrl->exclusive, &gerror);
		if ( gerror != NULL ) {
			g_error("%s", gerror->message);
		}
		for ( i = pop->slice_a; i < pop->slice_b; i++) {
			g_thread_pool_push (hevalctrl->gthreadpool, (gpointer)(pop->individ[i]), &gerror);
			if ( gerror != NULL ) {
				g_error("%s", gerror->message);
			}
		}
		while(g_thread_pool_unprocessed (hevalctrl->gthreadpool) > 0) {
			g_main_context_iteration(gcontext, FALSE);
            g_usleep (microseconds);
        }
		g_thread_pool_free (hevalctrl->gthreadpool, immediate_stop, wait_finish);
	} else {
		for ( i = pop->slice_a; i < pop->slice_b; i++) {
			dp_evaluation_population_init_func ((gpointer)(pop->individ[i]), (gpointer) hevalctrl);
		}
	}
#ifdef MPIZE
	dp_population_mpi_gather(pop, world_id, world_count);
#endif
	dp_population_update(pop, 0, pop->size);
	return pop;
}
コード例 #8
0
ファイル: dirwatch.c プロジェクト: tklengyel/drakvuf
static inline void start(struct start_drakvuf* start, char* sample)
{
    if ( shutting_down || !start || !sample )
        return;

    start->input = g_strdup(sample);
    g_thread_pool_push(pool, start, NULL);
}
コード例 #9
0
ファイル: tpm.c プロジェクト: 01org/qemu-lite
void tpm_backend_thread_end(TPMBackendThread *tbt)
{
    if (tbt->pool) {
        g_thread_pool_push(tbt->pool, (gpointer)TPM_BACKEND_CMD_END, NULL);
        g_thread_pool_free(tbt->pool, FALSE, TRUE);
        tbt->pool = NULL;
    }
}
コード例 #10
0
ファイル: tpm.c プロジェクト: 01org/qemu-lite
void tpm_backend_thread_create(TPMBackendThread *tbt,
                               GFunc func, gpointer user_data)
{
    if (!tbt->pool) {
        tbt->pool = g_thread_pool_new(func, user_data, 1, TRUE, NULL);
        g_thread_pool_push(tbt->pool, (gpointer)TPM_BACKEND_CMD_INIT, NULL);
    }
}
コード例 #11
0
ファイル: vm.c プロジェクト: jgraef/PUSH
void push_vm_run(push_vm_t *vm, push_t *push) {
  g_return_if_null(vm);

  g_mutex_lock(vm->mutex);
  vm->processes = g_list_prepend(vm->processes, push);
  g_thread_pool_push(vm->threads, push, NULL);
  g_mutex_unlock(vm->mutex);
}
コード例 #12
0
void
test_nfs_multi_thread_start(void)
{
	int i;

	for(i = 0; i < 11; i++) {
		g_thread_pool_push(nfs_test_threads, dummy1, NULL);
	}
}
コード例 #13
0
ファイル: f_ssl.c プロジェクト: dgyuri92/libmongo-client
void 
test_func_mongo_ssl_multithread (void)
{
  // 1. Many threads sharing the same context previously set up
  GThreadPool *thread_pool = g_thread_pool_new (ssl_query_thread, config.ssl_settings, THREAD_POOL_SIZE, TRUE, NULL);
  guint i;
  for (i = 0; i < THREAD_POOL_SIZE; ++i)
    g_thread_pool_push (thread_pool, config.ssl_settings, NULL);

  g_thread_pool_free (thread_pool, FALSE, TRUE);
  // 2. Many threads sharing the same context each manipulating the context
  srand (time (NULL));
  thread_pool = g_thread_pool_new (ssl_ping_thread, config.ssl_settings, THREAD_POOL_SIZE, TRUE, NULL);
  for (i = 0; i < THREAD_POOL_SIZE; ++i)
    g_thread_pool_push (thread_pool, config.ssl_settings, NULL);

  g_thread_pool_free (thread_pool, FALSE, TRUE);
}
コード例 #14
0
void
disk_mgr_input(nkn_task_id_t id)
{
	struct nkn_task *ntask = nkn_task_get_task(id);

	glob_dm_input ++;
	assert(ntask);
	nkn_task_set_state(id, TASK_STATE_EVENT_WAIT);
	g_thread_pool_push(old_dm_disk_thread_pool, ntask, NULL);
}
コード例 #15
0
ファイル: render.c プロジェクト: shizeeg/zathura
bool
render_page(render_thread_t* render_thread, zathura_page_t* page)
{
  if (render_thread == NULL || page == NULL || render_thread->pool == NULL || render_thread->about_to_close == true) {
    return false;
  }

  g_thread_pool_push(render_thread->pool, page, NULL);
  return true;
}
コード例 #16
0
ファイル: pool.c プロジェクト: piotras/MDTs
int
main (int argc, char **argv)
{
	midgard_init ();	

	MidgardConfig *config = midgard_config_new ();
	midgard_config_read_file_at_path (config, "/tmp/test_SQLITE.conf", NULL);

	MidgardConnection *mgd = midgard_connection_new ();
	midgard_connection_open_config (mgd, config);

	GThreadPool *pool = g_thread_pool_new (pool_func, (gpointer) mgd, 10, TRUE, NULL);


	//midgard_storage_create_base_storage (mgd);
	//midgard_storage_create (mgd,"midgard_snippetdir");
	
	g_print ("START OPERATIONS \n");

	MidgardObject *obj = midgard_object_new (mgd, "midgard_snippetdir", NULL);
	g_thread_pool_push (pool, (gpointer) obj, NULL);

	MidgardObject *obja = midgard_object_new (mgd, "midgard_snippetdir", NULL);
	g_thread_pool_push (pool, (gpointer) obja, NULL);

	MidgardObject *objb = midgard_object_new (mgd, "midgard_snippetdir", NULL);
	g_thread_pool_push (pool, (gpointer) objb, NULL);
	
	MidgardObject *objc = midgard_object_new (mgd, "midgard_snippetdir", NULL);
	g_thread_pool_push (pool, (gpointer) objc, NULL);
	
	MidgardObject *objd = midgard_object_new (mgd, "midgard_snippetdir", NULL);
	g_thread_pool_push (pool, (gpointer) objd, NULL);

	g_print ("END OPERATIONS \n");

	g_print ("THREADS REMAIN (%d) \n", g_thread_pool_unprocessed (pool));	

	g_thread_pool_free (pool, FALSE, TRUE);

	return 0;
}
コード例 #17
0
ファイル: emc_server.c プロジェクト: regit/nufw
static void emc_server_accept_cb (struct ev_loop *loop, ev_io *w, int revents)
{
	struct emc_tls_server_context *ctx;
	struct sockaddr_storage sockaddr;
	struct sockaddr_in *sockaddr4 = (struct sockaddr_in *) &sockaddr;
	struct sockaddr_in6 *sockaddr6 = (struct sockaddr_in6 *) &sockaddr;
	struct in6_addr addr;
	unsigned int len_inet = sizeof sockaddr;
	nussl_session *nussl_sess;
	int socket;
	int sport;
	char address[INET6_ADDRSTRLEN];
	struct emc_client_context *client_ctx = NULL;

	ctx = w->data;

	nussl_sess = nussl_session_accept(ctx->nussl);
	if (nussl_sess == NULL) {
		log_printf(DEBUG_LEVEL_WARNING, "Error while accepting new connection: %s",
				nussl_get_error(ctx->nussl));
		return;
	}

	if (nussl_session_getpeer(nussl_sess, (struct sockaddr *) &sockaddr, &len_inet) != NUSSL_OK)
	{
		log_printf(DEBUG_LEVEL_WARNING, "WARNING New client connection failed during nussl_session_getpeer(): %s", nussl_get_error(ctx->nussl));
		free(nussl_sess);
		return;
	}

	socket = nussl_session_get_fd(nussl_sess);

	/* Extract client address (convert it to IPv6 if it's IPv4) */
	if (sockaddr6->sin6_family == AF_INET) {
		ipv4_to_ipv6(sockaddr4->sin_addr, &addr);
		sport = ntohs(sockaddr4->sin_port);
	} else {
		addr = sockaddr6->sin6_addr;
		sport = ntohs(sockaddr6->sin6_port);
	}

	format_ipv6(&addr, address, sizeof(address), NULL);
	log_printf(DEBUG_LEVEL_DEBUG, "DEBUG emc: user connection attempt from %s",
			address);

	client_ctx = malloc(sizeof(struct emc_client_context));
	client_ctx->nussl = nussl_sess;
	strncpy(client_ctx->address, address, sizeof(client_ctx->address));
	client_ctx->tls_server_ctx = ctx;
	client_ctx->state = EMC_CLIENT_STATE_HANDSHAKE;

	g_thread_pool_push(server_ctx->pool_tls_handshake, client_ctx, NULL);

}
コード例 #18
0
ファイル: hippo-http.c プロジェクト: manoj-makkuboy/magnetism
void
hippo_http_get(const char   *url,
               HippoHttpFunc func,
               void         *data)
{
    Task *task;
    GError *error;
    
    if (active_task_count == 0) {
        GIOChannel *pipe_read_channel;
        
        g_debug("Global http subsystem init");
        
        if (pipe(pipe_fds) < 0) {
            /* should not happen in any reasonable scenario... */
            GString *str;  
            g_warning("Could not create pipe: %s", strerror(errno));
            str = g_string_new("Failed to create pipe");
            (* func)(NULL, str, data);
            g_string_free(str, TRUE);
            return;
        }

        pipe_read_channel = g_io_channel_unix_new(pipe_fds[READ_END]);
        pipe_io_watch = g_io_add_watch(pipe_read_channel, G_IO_IN, pipe_read_callback, NULL);
        g_io_channel_unref(pipe_read_channel);
        
        /* not passing in the SSL flag, we don't need SSL 
         * and we'd have to do openssl's special thread setup
         * whatever that is, according to curl docs 
         */
        curl_global_init(0);

        error = NULL;
        pool = g_thread_pool_new(do_task, NULL, 8, FALSE, &error);
        if (pool == NULL) {
            g_error("Can't create thread pool: %s", error->message);
            g_error_free(error); /* not reached */
        }        
    }

    g_debug("Starting new http GET task for '%s'", url);

    task = task_new(url, func, data);

    error = NULL;
    g_thread_pool_push(pool, task, &error);
    if (error != NULL) {
        g_error("Can't create a new thread: %s", error->message);
        g_error_free(error); /* not reached */
    }
    
    active_task_count += 1;
}
コード例 #19
0
ファイル: utilities.c プロジェクト: icedawn/rmlint
/* wrapper for g_thread_pool_push with error reporting */
bool rm_util_thread_pool_push(GThreadPool *pool, gpointer data) {
    GError *error = NULL;
    g_thread_pool_push(pool, data, &error);
    if(error != NULL) {
        rm_log_error_line("Unable to push thread to pool %p: %s", pool, error->message);
        g_error_free(error);
        return false;
    } else {
        return true;
    }
}
コード例 #20
0
static void
test_thread_sort (gboolean sort)
{
  GThreadPool *pool;
  guint limit;
  guint max_threads;
  gint i;

  limit = MAX_THREADS * 10;

  if (sort) {
    max_threads = 1;
  } else {
    max_threads = MAX_THREADS;
  }

  /* It is important that we only have a maximum of 1 thread for this
   * test since the results can not be guranteed to be sorted if > 1.
   * 
   * Threads are scheduled by the operating system and are executed at
   * random. It cannot be assumed that threads are executed in the
   * order they are created. This was discussed in bug #334943.
   */
  
  pool = g_thread_pool_new (test_thread_sort_entry_func, 
			    GINT_TO_POINTER (sort), 
			    max_threads, 
			    FALSE,
			    NULL);

  g_thread_pool_set_max_unused_threads (MAX_UNUSED_THREADS); 

  if (sort) {
    g_thread_pool_set_sort_function (pool, 
				     test_thread_sort_compare_func,
				     GUINT_TO_POINTER (69));
  }
  
  for (i = 0; i < limit; i++) {
    guint id;

    id = g_random_int_range (1, limit) + 1;
    g_thread_pool_push (pool, GUINT_TO_POINTER (id), NULL);
    DEBUG_MSG (("%s ===> pushed new thread with id:%d, number "
		"of threads:%d, unprocessed:%d",
		sort ? "[  sorted]" : "[unsorted]", 
		id, 
		g_thread_pool_get_num_threads (pool),
		g_thread_pool_unprocessed (pool)));
  }

  g_assert (g_thread_pool_get_max_threads (pool) == max_threads);
  g_assert (g_thread_pool_get_num_threads (pool) == g_thread_pool_get_max_threads (pool));
}
コード例 #21
0
ファイル: gck-call.c プロジェクト: bhull2010/mate-keyring
void
_gck_call_async_go (GckCall *call)
{
	g_assert (GCK_IS_CALL (call));

	/* To keep things balanced, process at one completed event */
	process_completed(GCK_CALL_GET_CLASS (call));

	g_assert (GCK_CALL_GET_CLASS (call)->thread_pool);
	g_thread_pool_push (GCK_CALL_GET_CLASS (call)->thread_pool, call, NULL);
}
コード例 #22
0
ファイル: gfuse-loop.c プロジェクト: madbob/FSter
static gboolean manage_fuse_mt (GIOChannel *source, GIOCondition condition, gpointer data)
{
    int res;
    char *buf;
    size_t bufsize;
    struct fuse *fuse;
    struct fuse_session *se;
    struct fuse_chan *ch;
    GThreadPool *pool;
    GError *error;
    ThreadsData *info;

    fuse = (struct fuse*) data;

    error = NULL;
    pool = g_thread_pool_new (manage_request, fuse, -1, FALSE, &error);
    if (pool == NULL) {
        g_warning ("Unable to start thread pool: %s", error->message);
        g_error_free (error);
        return NULL;
    }

    se = fuse_get_session (fuse);
    ch = fuse_session_next_chan (se, NULL);
    bufsize = fuse_chan_bufsize (ch);

    while (1) {
        buf = (char*) malloc (bufsize);
        res = fuse_chan_recv (&ch, buf, bufsize);

        if (res == -EINTR) {
            free (buf);
            continue;
        }
        else if (res <= 0) {
            free (buf);
            break;
        }

        info = do_threads_data (buf, res);

        error = NULL;
        g_thread_pool_push (pool, info, &error);
        if (error != NULL) {
            g_warning ("Unable to start processing request: %s", error->message);
            g_error_free (error);
            free_threads_data (info);
        }
    }

    g_thread_pool_free (pool, TRUE, TRUE);
    return NULL;
}
コード例 #23
0
EXPORT_C
#endif 
gboolean
gst_task_pause (GstTask * task)
{
  GstTaskState old;

  g_return_val_if_fail (GST_IS_TASK (task), FALSE);

  GST_DEBUG_OBJECT (task, "Pausing task %p", task);

  GST_OBJECT_LOCK (task);
  if (G_UNLIKELY (GST_TASK_GET_LOCK (task) == NULL))
    goto no_lock;

  old = task->state;
  task->state = GST_TASK_PAUSED;
  switch (old) {
    case GST_TASK_STOPPED:
    {
      GstTaskClass *tclass;

      if (task->running)
        break;

      gst_object_ref (task);
      task->running = TRUE;

      tclass = GST_TASK_GET_CLASS (task);

      g_static_mutex_lock (&pool_lock);
      g_thread_pool_push (tclass->pool, task, NULL);
      g_static_mutex_unlock (&pool_lock);
      break;
    }
    case GST_TASK_PAUSED:
      break;
    case GST_TASK_STARTED:
      break;
  }
  GST_OBJECT_UNLOCK (task);

  return TRUE;

  /* ERRORS */
no_lock:
  {
    GST_WARNING_OBJECT (task, "pausing task without a lock");
    GST_OBJECT_UNLOCK (task);
    g_warning ("pausing task without a lock");
    return FALSE;
  }
}
コード例 #24
0
ファイル: httpserver.c プロジェクト: i4tv/gstreamill
static gpointer listen_thread (gpointer data)
{
    HTTPServer *http_server = (HTTPServer *)data;
    struct epoll_event event_list[kMaxRequests];
    gint n, i;

    for (;;) {
        n = epoll_wait (http_server->epollfd, event_list, kMaxRequests, -1);
        if (n == -1) {
            GST_WARNING ("epoll_wait error %s", g_strerror (errno));
            continue;
        }
        for (i = 0; i < n; i++) {
            RequestData *request_data;

            if (event_list[i].data.ptr == NULL) {
                /* new request arrived */
                accept_socket (http_server);
                continue;
            }

            request_data = *(RequestData **)(event_list[i].data.ptr);
            g_mutex_lock (&(request_data->events_mutex));
            request_data->events |= event_list[i].events;
            g_mutex_unlock (&(request_data->events_mutex));

            /* push to thread pool queue */
            if ((event_list[i].events & EPOLLIN) && (request_data->status == HTTP_CONNECTED)) {
                GError *err = NULL;

                GST_DEBUG ("event on sock %d events %d", request_data->sock, request_data->events);
                request_data->status = HTTP_REQUEST;
                g_thread_pool_push (http_server->thread_pool, event_list[i].data.ptr, &err);
                if (err != NULL) {
                    GST_FIXME ("Thread pool push error %s", err->message);
                    g_error_free (err);
                }
            } 

            if (event_list[i].events & (EPOLLOUT | EPOLLIN | EPOLLHUP | EPOLLERR)) {
                if ((request_data->status == HTTP_BLOCK) || (request_data->status == HTTP_REQUEST)) {
                    g_mutex_lock (&(http_server->block_queue_mutex));
                    g_cond_signal (&(http_server->block_queue_cond));
                    g_mutex_unlock (&(http_server->block_queue_mutex));
                }
            }

            GST_DEBUG ("event on sock %d events %s", request_data->sock, epoll_event_string (event_list[i]));
        }
    }

    return NULL;
}
コード例 #25
0
ファイル: tasklet.c プロジェクト: Sciumo/lighttpd2
void li_tasklet_push(liTaskletPool* pool, liTaskletRunCB run, liTaskletFinishedCB finished, gpointer data) {
	liTasklet *t = g_slice_new0(liTasklet);
	t->run_cb = run;
	t->finished_cb = finished;
	t->data = data;

	if (NULL != pool->threadpool) {
		g_thread_pool_push(pool->threadpool, t, NULL);
	} else {
		run_tasklet(t, pool);
	}
}
コード例 #26
0
ファイル: interface.c プロジェクト: fossology/fossology
/**
 * Given a new socket, this will create the interface connection structure.
 *
 * @param conn    The socket that this interface is connected to
 * @param threads Thread pool handling sockets
 * @return the newly allocated and populated interface connection
 */
static interface_connection* interface_conn_init(
    GSocketConnection* conn, GThreadPool* threads)
{
  interface_connection* inter = g_new0(interface_connection, 1);

  inter->conn = conn;
  inter->istr = g_io_stream_get_input_stream((GIOStream*)inter->conn);
  inter->ostr = g_io_stream_get_output_stream((GIOStream*)inter->conn);
  g_thread_pool_push(threads, inter, NULL);

  return inter;
}
コード例 #27
0
ファイル: grustna.c プロジェクト: sp3d/grust
static void
add_call_minder (RustCallData *call_data)
{
  static GOnce pool_once = G_ONCE_INIT;

  GThreadPool *pool;

  g_once (&pool_once, create_call_minder_pool, NULL);
  pool = pool_once.retval;

  g_thread_pool_push (pool, call_data, NULL);
}
コード例 #28
0
ファイル: dirwatch.c プロジェクト: aoshiken/drakvuf
static void prepare(char *sample, struct start_drakvuf *start)
{
    if (!sample && !start)
        return;

    domid_t cloneID = 0;
    char *clone_name = NULL;
    int threadid;

    if (!start)
        threadid = find_thread();
    else
        threadid = start->threadid;

    while(threadid<0) {
        printf("Waiting for a thread to become available..\n");
        sleep(1);
        threadid = find_thread();
    }

    printf("Making clone %i to run %s in thread %u\n", threadid+1, sample ? sample : start->input, threadid);
    make_clone(xen, &cloneID, threadid+1, &clone_name);

    while(!clone_name || !cloneID) {
        printf("Clone creation failed, trying again\n");
        free(clone_name);
        clone_name = NULL;
        cloneID = 0;

        make_clone(xen, &cloneID, threadid+1, &clone_name);
    }

    //g_mutex_lock(&prepare_lock);
    //uint64_t shared = xen_memshare(xen, domID, cloneID);
    //printf("Shared %"PRIu64" pages\n", shared);
    //g_mutex_unlock(&prepare_lock);

    if(!start && sample) {
        start = g_malloc0(sizeof(struct start_drakvuf));
        start->input = sample;
        start->threadid = threadid;
        g_mutex_init(&start->timer_lock);
    }

    start->cloneID = cloneID;
    start->clone_name = clone_name;

    if(sample) {
        g_thread_pool_push(pool, start, NULL);
    }
}
コード例 #29
0
ファイル: shd-engine.c プロジェクト: anupam-das/shadow
/*
 * check all nodes, moving events that are within the execute window
 * from their mailbox into their priority queue for execution. all
 * nodes that have executable events are placed in the thread pool and
 * processed by a worker thread.
 *
 * @warning multiple threads are running as soon as the first node is
 * pushed into the thread pool
 */
static SimulationTime _engine_syncEvents(Engine* engine, GList* nodeList) {
	/* we want to return the minimum time of all events, in case we can
	 * fast-forward the next time window */
	SimulationTime minEventTime = 0;
	gboolean isMinEventTimeInitiated = FALSE;

	/* iterate the list of nodes by stepping through the items */
	GList* item = g_list_first(nodeList);
	while(item) {
		Node* node = item->data;

		/* peek mail from mailbox to check that its in our time window */
		Event* event = node_peekMail(node);

		if(event) {
			/* the first event is used to track the min event time of all nodes */
			if(isMinEventTimeInitiated) {
				minEventTime = MIN(minEventTime, event->time);
			} else {
				minEventTime = event->time;
				isMinEventTimeInitiated = TRUE;
			}
			while(event && (event->time < engine->executeWindowEnd) &&
					(event->time < engine->endTime)) {
				g_assert(event->time >= engine->executeWindowStart);

				/* this event now becomes a task that a worker will execute */
				node_pushTask(node, node_popMail(node));

				/* get the next event, if any */
				event = node_peekMail(node);
			}
		}

		/* see if this node actually has work for a worker */
		guint numTasks = node_getNumTasks(node);
		if(numTasks > 0) {
			/* we just added another node that must be processed */
			g_atomic_int_inc(&(engine->protect.nNodesToProcess));

			/* now let the worker handle all the node's events */
			g_thread_pool_push(engine->workerPool, node, NULL);
		}

		/* get the next node, if any */
		item = g_list_next(item);
	}

	/* its ok if it wasnt initiated, b/c we have a min time jump override */
	return minEventTime;
}
コード例 #30
0
static void
do_in_thread(GThreadPool *pool, ThreadData *data)
{
    int notify_fds[2];
    GError *error = NULL;

    if (pipe(notify_fds) != 0)
        rb_sys_fail("failed to create a pipe to synchronize threaded operation");

    data->errno_on_write = 0;
    data->notify_read_fd = notify_fds[0];
    data->notify_write_fd = notify_fds[1];
    g_thread_pool_push(pool, data, &error);
    if (error) {
        close(notify_fds[0]);
        close(notify_fds[1]);
        RAISE_GERROR(error);
    }

    rb_thread_wait_fd(notify_fds[0]);

#define BUFFER_SIZE 512
    if (data->errno_on_write == 0) {
        char buf[NOTIFY_MESSAGE_SIZE];
        ssize_t read_size;
        int saved_errno = 0;
        read_size = read(notify_fds[0], buf, NOTIFY_MESSAGE_SIZE);
        if (read_size != NOTIFY_MESSAGE_SIZE) {
            saved_errno = errno;
        }

        close(notify_fds[0]);
        close(notify_fds[1]);

        if (saved_errno != 0) {
            char buffer[BUFFER_SIZE];
            snprintf(buffer, BUFFER_SIZE - 1,
                     "failed to read notify pipe on %s", data->context);
            errno = saved_errno;
            rb_sys_fail(buffer);
        }
    } else {
        char buffer[BUFFER_SIZE];
        snprintf(buffer, BUFFER_SIZE - 1,
                 "failed to write notify pipe on %s", data->context);
        errno = data->errno_on_write;
        rb_sys_fail(buffer);
    }
#undef BUFFER_SIZE
}