void ags_thread_pool_init(AgsThreadPool *thread_pool) { AgsThread *thread; GList *list; guint i; g_atomic_int_set(&(thread_pool->flags), 0); g_atomic_int_set(&(thread_pool->max_unused_threads), AGS_THREAD_POOL_DEFAULT_MAX_UNUSED_THREADS); g_atomic_int_set(&(thread_pool->max_threads), AGS_THREAD_POOL_DEFAULT_MAX_THREADS); thread_pool->thread = (pthread_t *) malloc(sizeof(pthread_t)); thread_pool->creation_mutex = (pthread_mutex_t *) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(thread_pool->creation_mutex, NULL); thread_pool->creation_cond = (pthread_cond_t *) malloc(sizeof(pthread_cond_t)); pthread_cond_init(thread_pool->creation_cond, NULL); g_atomic_int_set(&(thread_pool->n_threads), g_atomic_int_get(&(thread_pool->max_unused_threads))); g_atomic_int_set(&(thread_pool->newly_pulled), 0); g_atomic_int_set(&(thread_pool->queued), 0); thread_pool->parent = NULL; list = NULL; for(i = 0; i < g_atomic_int_get(&(thread_pool->max_unused_threads)); i++){ thread = (AgsThread *) ags_returnable_thread_new(thread_pool); list = g_list_prepend(list, thread); } g_atomic_pointer_set(&(thread_pool->returnable_thread), list); g_atomic_pointer_set(&(thread_pool->running_thread), NULL); thread_pool->pull_mutex = (pthread_mutex_t *) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(thread_pool->pull_mutex, NULL); thread_pool->return_mutex = (pthread_mutex_t *) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(thread_pool->return_mutex, NULL); thread_pool->return_cond = (pthread_cond_t *) malloc(sizeof(pthread_cond_t)); pthread_cond_init(thread_pool->return_cond, NULL); }
/** * ags_thread_pool_pull: * @thread_pool: the #AgsThreadPool * * Pull a previously instantiated #AgsReturnableThread. Note this * function may block until a new thread is available. * * Returns: a new #AgsThread * * Since: 0.4 */ AgsThread* ags_thread_pool_pull(AgsThreadPool *thread_pool) { AgsReturnableThread *returnable_thread; GList *list, *tmplist; guint max_threads, n_threads; auto void ags_thread_pool_pull_running(); void ags_thread_pool_pull_running(){ g_atomic_int_inc(&(thread_pool->newly_pulled)); do{ pthread_mutex_lock(thread_pool->creation_mutex); if((AGS_THREAD_POOL_READY & (g_atomic_int_get(&(thread_pool->flags)))) != 0){ pthread_cond_signal(thread_pool->creation_cond); } pthread_mutex_unlock(thread_pool->creation_mutex); list = g_atomic_pointer_get(&(thread_pool->returnable_thread)); while(list != NULL){ returnable_thread = AGS_RETURNABLE_THREAD(list->data); if((AGS_RETURNABLE_THREAD_IN_USE & (g_atomic_int_get(&(returnable_thread->flags)))) == 0){ pthread_mutex_lock(thread_pool->creation_mutex); tmplist = g_atomic_pointer_get(&(thread_pool->returnable_thread)); g_atomic_pointer_set(&(thread_pool->returnable_thread), g_list_remove(tmplist, returnable_thread)); pthread_mutex_unlock(thread_pool->creation_mutex); pthread_mutex_lock(thread_pool->pull_mutex); tmplist = g_atomic_pointer_get(&(thread_pool->running_thread)); g_atomic_pointer_set(&(thread_pool->running_thread), g_list_prepend(tmplist, returnable_thread)); pthread_mutex_unlock(thread_pool->pull_mutex); break; } list = list->next; } }while(list == NULL); }
int hlfs_set_user_ctrl_region(struct hlfs_ctrl *ctrl,CTRL_REGION_T* ctrl_region) { //HLOG_DEBUG("enter func %s", __func__); g_atomic_pointer_set(&ctrl->ctrl_region,ctrl_region); //HLOG_DEBUG("leave func %s", __func__); return 0; }
static GRecMutex * g_static_rec_mutex_get_rec_mutex_impl (GStaticRecMutex* mutex) { GRecMutex *result; if (!g_thread_supported ()) return NULL; result = g_atomic_pointer_get (&mutex->mutex.mutex); if (!result) { G_LOCK (g_static_mutex); result = (GRecMutex *) mutex->mutex.mutex; if (!result) { result = g_slice_new (GRecMutex); g_rec_mutex_init (result); g_atomic_pointer_set (&mutex->mutex.mutex, result); } G_UNLOCK (g_static_mutex); } return result; }
void ipc_endpoint_connect_to_socket(ipc_endpoint_t *ipc, int sock) { g_assert(ipc); g_assert(ipc->status == IPC_ENDPOINT_DISCONNECTED); ipc_recv_state_t *state = &ipc->recv_state; state->queued_ipcs = g_ptr_array_new(); GIOChannel *channel = g_io_channel_unix_new(sock); g_io_channel_set_encoding(channel, NULL, NULL); g_io_channel_set_buffered(channel, FALSE); state->watch_in_id = g_io_add_watch(channel, G_IO_IN, (GIOFunc)ipc_recv, ipc); state->watch_hup_id = g_io_add_watch(channel, G_IO_HUP, (GIOFunc)ipc_hup, ipc); /* Atomically update ipc->channel. This is done because on the web extension * thread, logging spawns a message send thread, which may attempt to write * to the uninitialized channel after it has been created with * g_io_channel_unix_new(), but before it has been set up fully */ g_atomic_pointer_set(&ipc->channel, channel); ipc->status = IPC_ENDPOINT_CONNECTED; if (!endpoints) endpoints = g_ptr_array_sized_new(1); /* Add the endpoint; it should never be present already */ g_assert(!g_ptr_array_remove_fast(endpoints, ipc)); g_ptr_array_add(endpoints, ipc); }
/** * free the data-structures for a event-thread * * joins the event-thread, closes notification-pipe and free's the event-base */ void chassis_event_thread_free(chassis_event_thread_t *thread) { if (!thread) return; if (thread->thr) g_thread_join(thread->thr); if (thread->notify_receive_fd != -1) { event_del(&(thread->notify_fd_event)); closesocket(thread->notify_receive_fd); } if (thread->notify_send_fd != -1) { closesocket(thread->notify_send_fd); } /* we don't want to free the global event-base */ if (thread->thr != NULL && thread->event_base) event_base_free(thread->event_base); network_mysqld_con* con = NULL; while ((con = g_async_queue_try_pop(thread->event_queue))) { network_mysqld_con_free(con); } g_atomic_pointer_set(&(thread->thread_status_var.thread_stat[THREAD_STAT_EVENT_WAITING]), 0); g_async_queue_unref(thread->event_queue); g_rw_lock_clear(&thread->connection_lock); if (thread->connection_list != NULL) { g_list_free_full(thread->connection_list, (GDestroyNotify)network_mysqld_con_free); } g_free(thread); }
/** * g_static_mutex_get_mutex: * @mutex: a #GStaticMutex. * * For some operations (like g_cond_wait()) you must have a #GMutex * instead of a #GStaticMutex. This function will return the * corresponding #GMutex for @mutex. * * Returns: the #GMutex corresponding to @mutex. * * Deprecated: 2.32: Just use a #GMutex */ GMutex * g_static_mutex_get_mutex_impl (GStaticMutex* mutex) { GMutex *result; if (!g_thread_supported ()) return NULL; result = g_atomic_pointer_get (&mutex->mutex); if (!result) { G_LOCK (g_static_mutex); result = mutex->mutex; if (!result) { result = g_mutex_new (); g_atomic_pointer_set (&mutex->mutex, result); } G_UNLOCK (g_static_mutex); } return result; }
/** * g_datalist_init: * @datalist: a pointer to a pointer to a datalist. * * Resets the datalist to %NULL. It does not free any memory or call * any destroy functions. **/ void g_datalist_init (GData **datalist) { g_return_if_fail (datalist != NULL); g_atomic_pointer_set (datalist, NULL); }
/** * @brief Stop the fill thread of a resource * * @param resource The resource to stop the fill thread of * * This function takes care of stopping the fill thread for a * resource, either for pausing it or before freeing it. It's * accomplished by first setting @ref Resource::fill_pool attribute to * NULL, then it stops the pool, dropping further threads and waiting * for the last one to complete. */ static void r_stop_fill(Resource *resource) { GThreadPool *pool; if ( (pool = resource->fill_pool) ) { g_atomic_pointer_set(&resource->fill_pool, NULL); g_thread_pool_free(pool, true, true); } }
static void ev_job_thread (EvJob *job) { gboolean result; ev_debug_message (DEBUG_JOBS, "%s", EV_GET_TYPE_NAME (job)); do { if (g_cancellable_is_cancelled (job->cancellable)) result = FALSE; else { g_atomic_pointer_set (&running_job, job); result = ev_job_run (job); } } while (result); g_atomic_pointer_set (&running_job, NULL); }
ws_deque * ws_queue_build() { ws_deque *q = (ws_deque*) malloc(sizeof(ws_deque)); q->bottom = 0; g_atomic_pointer_set(&(q->top), (long int) 0); q->active_array = ca_build(LOG_INITIAL_SIZE); return q; }
/** * @brief Resume a paused (or non-standard) non-live resource * * @param resource The resource to resume * * This functions creates a new instance for @ref Resource::fill_pool, * and sets it into the resource so that it can be used to fill the * queue. */ void r_resume(Resource *resource) { GThreadPool *pool; /* running already */ if ( g_atomic_pointer_get(&resource->fill_pool) != NULL ) return; pool = g_thread_pool_new(r_read_cb, resource, 1, true, NULL); g_atomic_pointer_set(&resource->fill_pool, pool); }
static GObject * constructor(GType type, guint n, GObjectConstructParam *params) { GObject *self = g_atomic_pointer_get(&singleton); if (!self) { self = G_OBJECT_CLASS(log4g_log_manager_parent_class)-> constructor(type, n, params); g_atomic_pointer_set(&singleton, self); } else { g_object_ref(self); } return self; }
static gpointer mh_main_thread( gpointer data ) { XfceMailwatchMHMailbox *mh = XFCE_MAILWATCH_MH_MAILBOX( data ); while( !g_atomic_pointer_get( &mh->thread ) && g_atomic_int_get( &mh->running ) ) g_thread_yield(); if( g_atomic_int_get( &mh->running ) ) mh_check_mail( mh ); g_atomic_pointer_set( &mh->thread, NULL ); return ( NULL ); }
static gpointer mbox_check_mail_thread( gpointer data ) { XfceMailwatchMboxMailbox *mbox = XFCE_MAILWATCH_MBOX_MAILBOX( data ); while( !g_atomic_pointer_get( &mbox->thread ) && g_atomic_int_get( &mbox->running ) ) g_thread_yield(); if( g_atomic_int_get( &mbox->running ) ) mbox_check_mail( mbox ); g_atomic_pointer_set( &mbox->thread, NULL ); return NULL; }
static gpointer maildir_main_thread( gpointer data ) { XfceMailwatchMaildirMailbox *maildir = data; DBG( "-->>" ); while( !g_atomic_pointer_get( &maildir->thread ) && g_atomic_int_get( &maildir->running ) ) g_thread_yield(); if( g_atomic_int_get( &maildir->running ) ) maildir_check_mail( maildir ); g_atomic_pointer_set( &maildir->thread, NULL ); return ( NULL ); }
/** * iris_set_default_work_scheduler: * @scheduler: An #IrisScheduler * * Allows the caller to set the default work scheduler. */ void iris_set_default_work_scheduler (IrisScheduler *new_scheduler) { IrisScheduler *old_scheduler; g_return_if_fail (new_scheduler != NULL); G_LOCK (default_work_scheduler); old_scheduler = g_atomic_pointer_get (&default_work_scheduler); g_object_ref (new_scheduler); g_atomic_pointer_set (&default_work_scheduler, new_scheduler); G_UNLOCK (default_work_scheduler); if (old_scheduler) g_object_unref ((gpointer)old_scheduler); }
/** * g_once_init_leave: * @location: location of a static initializable variable containing 0 * @result: new non-0 value for *@value_location * * Counterpart to g_once_init_enter(). Expects a location of a static * 0-initialized initialization variable, and an initialization value * other than 0. Sets the variable to the initialization value, and * releases concurrent threads blocking in g_once_init_enter() on this * initialization variable. * * Since: 2.14 */ void (g_once_init_leave) (volatile void *location, gsize result) { volatile gsize *value_location = location; g_return_if_fail (g_atomic_pointer_get (value_location) == NULL); g_return_if_fail (result != 0); g_return_if_fail (g_once_init_list != NULL); g_atomic_pointer_set (value_location, result); g_mutex_lock (&g_once_mutex); g_once_init_list = g_slist_remove (g_once_init_list, (void*) value_location); g_cond_broadcast (&g_once_cond); g_mutex_unlock (&g_once_mutex); }
static void iris_thread_worker_transient (IrisThread *thread, IrisQueue *queue) { IrisThreadWork *thread_work = NULL; GTimeVal tv_timeout = {0,0}; gboolean remove_work; iris_debug (IRIS_DEBUG_THREAD); /* The transient mode worker is responsible for helping finish off as * many of the work items as fast as possible. It is not responsible * for asking for more helpers, just processing work items. When done * processing work items, it will yield itself back to the scheduler * manager. */ do { g_get_current_time (&tv_timeout); g_time_val_add (&tv_timeout, POP_WAIT_TIMEOUT); thread_work = iris_queue_timed_pop_or_close (queue, &tv_timeout); if (thread_work != NULL) { if (!g_atomic_int_compare_and_exchange(&thread_work->taken, FALSE, TRUE)) { remove_work = g_atomic_int_get (&thread_work->remove); if (!remove_work) continue; } else remove_work = g_atomic_int_get (&thread_work->remove); if (!remove_work) iris_thread_work_run (thread_work); iris_thread_work_free (thread_work); } } while (thread_work != NULL); /* Remove the thread from the scheduler (if it's not already removed us due * to being in finalization), and yield our thread back to the scheduler manager */ if (g_atomic_int_get (&thread->scheduler->in_finalize) == FALSE) iris_scheduler_remove_thread (thread->scheduler, thread); g_atomic_pointer_set (&thread->scheduler, NULL); iris_scheduler_manager_yield (thread); }
static gboolean maildir_check_mail_timeout( gpointer data ) { XfceMailwatchMaildirMailbox *maildir = XFCE_MAILWATCH_MAILDIR_MAILBOX( data ); GThread *th; if( g_atomic_pointer_get( &maildir->thread ) ) { xfce_mailwatch_log_message( maildir->mailwatch, XFCE_MAILWATCH_MAILBOX( maildir ), XFCE_MAILWATCH_LOG_WARNING, _( "Previous thread hasn't exited yet, not checking mail this time." ) ); return TRUE; } th = g_thread_create( maildir_main_thread, maildir, FALSE, NULL ); g_atomic_pointer_set( &maildir->thread, th ); return TRUE; }
/** * g_static_resource_fini: * @static_resource: pointer to a static #GStaticResource * * Finalized a GResource initialized by g_static_resource_init(). * * This is normally used by code generated by * <link linkend="glib-compile-resources">glib-compile-resources</link> * and is not typically used by other code. * * Since: 2.32 **/ void g_static_resource_fini (GStaticResource *static_resource) { GResource *resource; g_rw_lock_writer_lock (&resources_lock); register_lazy_static_resources_unlocked (); resource = g_atomic_pointer_get (&static_resource->resource); if (resource) { g_atomic_pointer_set (&static_resource->resource, NULL); g_resources_unregister_unlocked (resource); g_resource_unref (resource); } g_rw_lock_writer_unlock (&resources_lock); }
/** * @brief Pause a running non-live resource * * @param resource The resource to pause * * This function stops the "fill thread" for the resource, when it is * not shared among clients (i.e.: it's not a live resource). * * It removes @ref Resource::fill_pool and sets it to NULL to stop the * thread running. * * @note This function will lock the @ref Resource::lock mutex. */ void r_pause(Resource *resource) { GThreadPool *pool; /* Don't even try to pause a live source! */ if ( resource->demuxer->source == LIVE_SOURCE ) return; /* we paused already */ if ( resource->fill_pool == NULL ) return; g_mutex_lock(resource->lock); pool = resource->fill_pool; g_atomic_pointer_set(&resource->fill_pool, NULL); g_thread_pool_free(pool, true, true); g_mutex_unlock(resource->lock); }
static int core_registerlock (lua_State *L) { void (*set_lock_functions)(GCallback, GCallback); LgiStateMutex *mutex; GRecMutex *wait_on; unsigned i; /* Get registration function. */ luaL_checktype (L, 1, LUA_TLIGHTUSERDATA); set_lock_functions = lua_touserdata (L, 1); luaL_argcheck (L, set_lock_functions != NULL, 1, "NULL function"); /* Check, whether this package was already registered. */ for (i = 0; i < G_N_ELEMENTS (package_lock_register) && package_lock_register[i] != set_lock_functions; i++) { if (package_lock_register[i] == NULL) { /* Register our package lock functions. */ package_lock_register[i] = set_lock_functions; set_lock_functions (package_lock_enter, package_lock_leave); break; } } /* Switch our statelock to actually use packagelock. */ lua_pushlightuserdata (L, &call_mutex); lua_rawget (L, LUA_REGISTRYINDEX); mutex = lua_touserdata (L, -1); wait_on = g_atomic_pointer_get (&mutex->mutex); if (wait_on != &package_mutex) { g_rec_mutex_lock (&package_mutex); g_atomic_pointer_set (&mutex->mutex, &package_mutex); g_rec_mutex_unlock (wait_on); } return 0; }
static void register_lazy_static_resources_unlocked (void) { GStaticResource *list; do list = lazy_register_resources; while (!g_atomic_pointer_compare_and_exchange (&lazy_register_resources, list, NULL)); while (list != NULL) { GBytes *bytes = g_bytes_new_static (list->data, list->data_len); GResource *resource = g_resource_new_from_data (bytes, NULL); if (resource) { g_resources_register_unlocked (resource); g_atomic_pointer_set (&list->resource, resource); } g_bytes_unref (bytes); list = list->next; } }
static gboolean gst_mfx_window_wayland_render (GstMfxWindow * window, GstMfxSurface * surface, const GstMfxRectangle * src_rect, const GstMfxRectangle * dst_rect) { GstMfxWindowWaylandPrivate *const priv = GST_MFX_WINDOW_WAYLAND_GET_PRIVATE (window); GstMfxDisplayWaylandPrivate *const display_priv = GST_MFX_DISPLAY_WAYLAND_GET_PRIVATE (GST_MFX_WINDOW_DISPLAY (window)); struct wl_display *const display = GST_MFX_DISPLAY_HANDLE (GST_MFX_WINDOW_DISPLAY (window)); GstMfxPrimeBufferProxy *buffer_proxy; struct wl_buffer *buffer; FrameState *frame; guintptr fd = 0; guint32 drm_format = 0; gint offsets[3] = { 0 }, pitches[3] = { 0 }, num_planes = 0, i = 0; VaapiImage *vaapi_image; buffer_proxy = gst_mfx_prime_buffer_proxy_new_from_surface (surface); if (!buffer_proxy) return FALSE; fd = GST_MFX_PRIME_BUFFER_PROXY_HANDLE (buffer_proxy); vaapi_image = gst_mfx_prime_buffer_proxy_get_vaapi_image (buffer_proxy); num_planes = vaapi_image_get_plane_count (vaapi_image); if ((dst_rect->height != src_rect->height) || (dst_rect->width != src_rect->width)) { #ifdef USE_WESTON_4_0 if (priv->wp_viewport) { wp_viewport_set_destination (priv->wp_viewport, dst_rect->width, dst_rect->height); } #else if (priv->viewport) { wl_viewport_set_destination (priv->viewport, dst_rect->width, dst_rect->height); } #endif } for (i = 0; i < num_planes; i++) { offsets[i] = vaapi_image_get_offset (vaapi_image, i); pitches[i] = vaapi_image_get_pitch (vaapi_image, i); } if (GST_VIDEO_FORMAT_NV12 == vaapi_image_get_format (vaapi_image)) { drm_format = WL_DRM_FORMAT_NV12; } else if (GST_VIDEO_FORMAT_BGRA == vaapi_image_get_format (vaapi_image)) { drm_format = WL_DRM_FORMAT_ARGB8888; } if (!drm_format) goto error; if (!display_priv->drm) goto error; GST_MFX_DISPLAY_LOCK (GST_MFX_WINDOW_DISPLAY (window)); buffer = wl_drm_create_prime_buffer (display_priv->drm, fd, src_rect->width, src_rect->height, drm_format, offsets[0], pitches[0], offsets[1], pitches[1], offsets[2], pitches[2]); GST_MFX_DISPLAY_UNLOCK (GST_MFX_WINDOW_DISPLAY (window)); if (!buffer) { GST_ERROR ("No wl_buffer created\n"); goto error; } frame = frame_state_new (window); if (!frame) goto error; g_atomic_pointer_set (&priv->last_frame, frame); g_atomic_int_inc (&priv->num_frames_pending); GST_MFX_DISPLAY_LOCK (GST_MFX_WINDOW_DISPLAY (window)); wl_surface_attach (priv->surface, buffer, 0, 0); wl_surface_damage (priv->surface, 0, 0, dst_rect->width, dst_rect->height); if (priv->opaque_region) { wl_surface_set_opaque_region (priv->surface, priv->opaque_region); wl_region_destroy (priv->opaque_region); priv->opaque_region = NULL; } wl_proxy_set_queue ((struct wl_proxy *) buffer, priv->event_queue); wl_buffer_add_listener (buffer, &frame_buffer_listener, frame); frame->callback = wl_surface_frame (priv->surface); wl_callback_add_listener (frame->callback, &frame_callback_listener, frame); wl_surface_commit (priv->surface); wl_display_flush (display); GST_MFX_DISPLAY_UNLOCK (GST_MFX_WINDOW_DISPLAY (window)); vaapi_image_unref (vaapi_image); gst_mfx_prime_buffer_proxy_unref (buffer_proxy); return TRUE; error: { vaapi_image_unref (vaapi_image); gst_mfx_prime_buffer_proxy_unref (buffer_proxy); return FALSE; } }
void ags_test_launch(gboolean single_thread) { AgsThread *audio_loop, *polling_thread, *gui_thread, *task_thread; AgsThreadPool *thread_pool; AgsConfig *config; GList *start_queue; g_object_get(ags_application_context, "config", &config, "main-loop", &audio_loop, "task-thread", &task_thread, NULL); g_object_get(task_thread, "thread-pool", &thread_pool, NULL); polling_thread = ags_thread_find_type(audio_loop, AGS_TYPE_POLLING_THREAD); gui_thread = ags_thread_find_type(audio_loop, AGS_TYPE_GUI_THREAD); /* start engine */ pthread_mutex_lock(audio_loop->start_mutex); start_queue = NULL; start_queue = g_list_prepend(start_queue, polling_thread); start_queue = g_list_prepend(start_queue, task_thread); // start_queue = g_list_prepend(start_queue, // gui_thread); g_atomic_pointer_set(&(audio_loop->start_queue), start_queue); pthread_mutex_unlock(audio_loop->start_mutex); /* start audio loop and thread pool*/ ags_thread_start(audio_loop); ags_thread_pool_start(thread_pool); if(!single_thread){ /* wait for audio loop */ pthread_mutex_lock(audio_loop->start_mutex); if(g_atomic_int_get(&(audio_loop->start_wait)) == TRUE){ g_atomic_int_set(&(audio_loop->start_done), FALSE); while(g_atomic_int_get(&(audio_loop->start_wait)) == TRUE && g_atomic_int_get(&(audio_loop->start_done)) == FALSE){ pthread_cond_wait(audio_loop->start_cond, audio_loop->start_mutex); } } pthread_mutex_unlock(audio_loop->start_mutex); /* start gui thread */ ags_thread_start(gui_thread); /* wait for gui thread */ pthread_mutex_lock(gui_thread->start_mutex); if(g_atomic_int_get(&(gui_thread->start_done)) == FALSE){ g_atomic_int_set(&(gui_thread->start_wait), TRUE); while(g_atomic_int_get(&(gui_thread->start_done)) == FALSE){ g_atomic_int_set(&(gui_thread->start_wait), TRUE); pthread_cond_wait(gui_thread->start_cond, gui_thread->start_mutex); } } pthread_mutex_unlock(gui_thread->start_mutex); g_atomic_int_set(&(AGS_XORG_APPLICATION_CONTEXT(ags_application_context)->gui_ready), 1); /* autosave thread */ if(!g_strcmp0(ags_config_get_value(config, AGS_CONFIG_GENERIC, "autosave-thread\0"), "true\0")){ pthread_mutex_lock(audio_loop->start_mutex); start_queue = g_atomic_pointer_get(&(audio_loop->start_queue)); start_queue = g_list_prepend(start_queue, task_thread); g_atomic_pointer_set(&(audio_loop->start_queue), start_queue); pthread_mutex_unlock(audio_loop->start_mutex); } }else{ AgsSingleThread *single_thread; /* single thread */ single_thread = ags_single_thread_new((GObject *) ags_sound_provider_get_soundcard(AGS_SOUND_PROVIDER(ags_application_context))->data); /* add known threads to single_thread */ ags_thread_add_child(AGS_THREAD(single_thread), audio_loop); /* autosave thread */ if(!g_strcmp0(ags_config_get_value(config, AGS_CONFIG_GENERIC, "autosave-thread\0"), "true\0")){ pthread_mutex_lock(audio_loop->start_mutex); start_queue = g_atomic_pointer_get(&(audio_loop->start_queue)); start_queue = g_list_prepend(start_queue, task_thread); g_atomic_pointer_set(&(audio_loop->start_queue), start_queue); pthread_mutex_unlock(audio_loop->start_mutex); } /* start thread tree */ ags_thread_start((AgsThread *) single_thread); } }
void ags_test_launch_filename(gchar *filename, gboolean single_thread) { AgsThread *audio_loop, *polling_thread, *gui_thread, *task_thread; AgsThreadPool *thread_pool; AgsConfig *config; GList *start_queue; /* get threads, thread pool and config */ g_object_get(ags_application_context, "config", &config, "main-loop", &audio_loop, "task-thread", &task_thread, NULL); g_object_get(task_thread, "thread-pool", &thread_pool, NULL); polling_thread = ags_thread_find_type(audio_loop, AGS_TYPE_POLLING_THREAD); gui_thread = ags_thread_find_type(audio_loop, AGS_TYPE_GUI_THREAD); /* open file */ if(g_strcmp0(ags_config_get_value(config, AGS_CONFIG_GENERIC, "simple-file\0"), "false\0")){ AgsSimpleFile *simple_file; AgsSimpleFileRead *simple_file_read; GError *error; simple_file = (AgsSimpleFile *) g_object_new(AGS_TYPE_SIMPLE_FILE, "application-context\0", ags_application_context, "filename\0", filename, NULL); error = NULL; ags_simple_file_open(simple_file, &error); if(error != NULL){ ags_test_show_file_error(filename, error); ags_application_context_quit(ags_application_context); } /* start engine */ pthread_mutex_lock(audio_loop->start_mutex); start_queue = NULL; start_queue = g_list_prepend(start_queue, polling_thread); start_queue = g_list_prepend(start_queue, task_thread); // start_queue = g_list_prepend(start_queue, // gui_thread); g_atomic_pointer_set(&(audio_loop->start_queue), start_queue); pthread_mutex_unlock(audio_loop->start_mutex); /* start audio loop and thread pool */ ags_thread_start(audio_loop); ags_thread_pool_start(thread_pool); if(!single_thread){ /* wait for audio loop */ pthread_mutex_lock(audio_loop->start_mutex); if(g_atomic_int_get(&(audio_loop->start_wait)) == TRUE){ g_atomic_int_set(&(audio_loop->start_done), FALSE); while(g_atomic_int_get(&(audio_loop->start_wait)) == TRUE && g_atomic_int_get(&(audio_loop->start_done)) == FALSE){ pthread_cond_wait(audio_loop->start_cond, audio_loop->start_mutex); } } pthread_mutex_unlock(audio_loop->start_mutex); /* start gui thread */ ags_thread_start(gui_thread); /* wait for gui thread */ pthread_mutex_lock(gui_thread->start_mutex); if(g_atomic_int_get(&(gui_thread->start_done)) == FALSE){ g_atomic_int_set(&(gui_thread->start_wait), TRUE); while(g_atomic_int_get(&(gui_thread->start_done)) == FALSE){ g_atomic_int_set(&(gui_thread->start_wait), TRUE); pthread_cond_wait(gui_thread->start_cond, gui_thread->start_mutex); } } pthread_mutex_unlock(gui_thread->start_mutex); /* autosave thread */ if(!g_strcmp0(ags_config_get_value(config, AGS_CONFIG_GENERIC, "autosave-thread\0"), "true\0")){ pthread_mutex_lock(audio_loop->start_mutex); start_queue = g_atomic_pointer_get(&(audio_loop->start_queue)); start_queue = g_list_prepend(start_queue, task_thread); g_atomic_pointer_set(&(audio_loop->start_queue), start_queue); pthread_mutex_unlock(audio_loop->start_mutex); } /* now start read task */ simple_file_read = ags_simple_file_read_new(simple_file); ags_task_thread_append_task((AgsTaskThread *) task_thread, (AgsTask *) simple_file_read); }else{ AgsFile *file; GError *error; file = g_object_new(AGS_TYPE_FILE, "application-context\0", ags_application_context, "filename\0", filename, NULL); error = NULL; ags_file_open(file, &error); if(error != NULL){ ags_test_show_file_error(filename, error); ags_application_context_quit(ags_application_context); } ags_file_read(file); ags_file_close(file); } } }
mrpc_status_t do_ping(void *conn_data, struct mrpc_message *msg) { g_atomic_pointer_set(&last_request, msg); return MINIRPC_PENDING; }
static gboolean gst_vaapi_window_wayland_render (GstVaapiWindow * window, GstVaapiSurface * surface, const GstVaapiRectangle * src_rect, const GstVaapiRectangle * dst_rect, guint flags) { GstVaapiWindowWaylandPrivate *const priv = GST_VAAPI_WINDOW_WAYLAND_GET_PRIVATE (window); GstVaapiDisplay *const display = GST_VAAPI_OBJECT_DISPLAY (window); struct wl_display *const wl_display = GST_VAAPI_OBJECT_NATIVE_DISPLAY (window); struct wl_buffer *buffer; FrameState *frame; guint width, height, va_flags; VAStatus status; gboolean need_vpp = FALSE; /* Check that we don't need to crop source VA surface */ gst_vaapi_surface_get_size (surface, &width, &height); if (src_rect->x != 0 || src_rect->y != 0) need_vpp = TRUE; if (src_rect->width != width || src_rect->height != height) need_vpp = TRUE; /* Check that we don't render to a subregion of this window */ if (dst_rect->x != 0 || dst_rect->y != 0) need_vpp = TRUE; if (dst_rect->width != window->width || dst_rect->height != window->height) need_vpp = TRUE; /* Try to construct a Wayland buffer from VA surface as is (without VPP) */ if (!need_vpp) { GST_VAAPI_OBJECT_LOCK_DISPLAY (window); va_flags = from_GstVaapiSurfaceRenderFlags (flags); status = vaGetSurfaceBufferWl (GST_VAAPI_DISPLAY_VADISPLAY (display), GST_VAAPI_OBJECT_ID (surface), va_flags & (VA_TOP_FIELD | VA_BOTTOM_FIELD), &buffer); GST_VAAPI_OBJECT_UNLOCK_DISPLAY (window); if (status == VA_STATUS_ERROR_FLAG_NOT_SUPPORTED) need_vpp = TRUE; else if (!vaapi_check_status (status, "vaGetSurfaceBufferWl()")) return FALSE; } /* Try to construct a Wayland buffer with VPP */ if (need_vpp) { if (priv->use_vpp) { GstVaapiSurface *const vpp_surface = vpp_convert (window, surface, src_rect, dst_rect, flags); if (G_UNLIKELY (!vpp_surface)) need_vpp = FALSE; else { surface = vpp_surface; width = window->width; height = window->height; } } GST_VAAPI_OBJECT_LOCK_DISPLAY (window); status = vaGetSurfaceBufferWl (GST_VAAPI_DISPLAY_VADISPLAY (display), GST_VAAPI_OBJECT_ID (surface), VA_FRAME_PICTURE, &buffer); GST_VAAPI_OBJECT_UNLOCK_DISPLAY (window); if (!vaapi_check_status (status, "vaGetSurfaceBufferWl()")) return FALSE; } /* Wait for the previous frame to complete redraw */ if (!gst_vaapi_window_wayland_sync (window)) { wl_buffer_destroy (buffer); return !priv->sync_failed; } frame = frame_state_new (window); if (!frame) return FALSE; g_atomic_pointer_set (&priv->last_frame, frame); g_atomic_int_inc (&priv->num_frames_pending); if (need_vpp && priv->use_vpp) { frame->surface = surface; frame->surface_pool = gst_vaapi_video_pool_ref (priv->surface_pool); } /* XXX: attach to the specified target rectangle */ GST_VAAPI_OBJECT_LOCK_DISPLAY (window); wl_surface_attach (priv->surface, buffer, 0, 0); wl_surface_damage (priv->surface, 0, 0, width, height); if (priv->opaque_region) { wl_surface_set_opaque_region (priv->surface, priv->opaque_region); wl_region_destroy (priv->opaque_region); priv->opaque_region = NULL; } wl_proxy_set_queue ((struct wl_proxy *) buffer, priv->event_queue); wl_buffer_add_listener (buffer, &frame_buffer_listener, frame); frame->callback = wl_surface_frame (priv->surface); wl_callback_add_listener (frame->callback, &frame_callback_listener, frame); wl_surface_commit (priv->surface); wl_display_flush (wl_display); GST_VAAPI_OBJECT_UNLOCK_DISPLAY (window); return TRUE; }
static void maildir_check_mail( XfceMailwatchMaildirMailbox *maildir ) { gchar *path = NULL; struct stat st; DBG( "-->>" ); g_mutex_lock( maildir->mutex ); if ( !maildir->path || !*(maildir->path) ) { goto out; } path = g_build_filename( maildir->path, "new", NULL ); if ( stat( path, &st ) < 0 ) { xfce_mailwatch_log_message( maildir->mailwatch, XFCE_MAILWATCH_MAILBOX( maildir ), XFCE_MAILWATCH_LOG_ERROR, _( "Failed to get status of file %s: %s" ), path, g_strerror( errno ) ); goto out; } if ( !S_ISDIR( st.st_mode ) ) { xfce_mailwatch_log_message( maildir->mailwatch, XFCE_MAILWATCH_MAILBOX( maildir ), XFCE_MAILWATCH_LOG_ERROR, _( "%s is not a directory. Is %s really a valid maildir?" ), path, maildir->path ); goto out; } if ( st.st_mtime > maildir->mtime ) { GDir *dir; GError *error = NULL; dir = g_dir_open( path, 0, &error ); if ( dir ) { int count_new = 0; const gchar *entry; while ( ( entry = g_dir_read_name( dir ) ) ) { count_new++; /* only check every 25 entries */ if( !( count_new % 25 ) ) { if( !g_atomic_int_get( &maildir->running ) ) { g_dir_close( dir ); g_atomic_pointer_set( &maildir->thread, NULL ); return; } } } g_dir_close( dir ); xfce_mailwatch_signal_new_messages( maildir->mailwatch, (XfceMailwatchMailbox *) maildir, count_new ); } else { xfce_mailwatch_log_message( maildir->mailwatch, XFCE_MAILWATCH_MAILBOX( maildir ), XFCE_MAILWATCH_LOG_ERROR, "%s", error->message ); g_error_free( error ); } maildir->mtime = st.st_mtime; } out: g_mutex_unlock( maildir->mutex ); if ( path ) { g_free( path ); } DBG( "<<--" ); }