static gboolean setup_transport_agents() { g_print("Setting up transport agents\n"); // LEFT left_transport_agent = owr_transport_agent_new(FALSE); g_assert(OWR_IS_TRANSPORT_AGENT(left_transport_agent)); owr_transport_agent_set_local_port_range(left_transport_agent, 5000, 5999); owr_transport_agent_add_local_address(left_transport_agent, "127.0.0.1"); // RIGHT right_transport_agent = owr_transport_agent_new(TRUE); g_assert(OWR_IS_TRANSPORT_AGENT(right_transport_agent)); owr_transport_agent_set_local_port_range(right_transport_agent, 5000, 5999); owr_transport_agent_add_local_address(right_transport_agent, "127.0.0.1"); left_session = owr_data_session_new(TRUE); right_session = owr_data_session_new(FALSE); g_object_set(left_session, "sctp-local-port", 5000, "sctp-remote-port", 5000, NULL); g_object_set(right_session, "sctp-local-port", 5000, "sctp-remote-port", 5000, NULL); g_signal_connect(left_session, "on-new-candidate", G_CALLBACK(got_candidate), right_session); g_signal_connect(right_session, "on-new-candidate", G_CALLBACK(got_candidate), left_session); owr_transport_agent_add_session(left_transport_agent, OWR_SESSION(left_session)); owr_transport_agent_add_session(right_transport_agent, OWR_SESSION(right_session)); if (wait_for_dtls) { gboolean peer_certificate_received; GAsyncQueue *msg_queue = g_async_queue_new(); g_signal_connect(left_session, "notify::dtls-peer-certificate", G_CALLBACK(on_dtls_peer_certificate), msg_queue); g_signal_connect(right_session, "notify::dtls-peer-certificate", G_CALLBACK(on_dtls_peer_certificate), msg_queue); g_print("waiting for dtls handshake to complete\n"); peer_certificate_received = !!g_async_queue_timeout_pop(msg_queue, 5000000); peer_certificate_received &= !!g_async_queue_timeout_pop(msg_queue, 5000000); g_async_queue_unref(msg_queue); if (!peer_certificate_received) { g_print("dtls handshake timed out\n"); return FALSE; } g_print("dtls handshake to completed\n"); } return TRUE; }
void remmina_rdp_cliprdr_request_data(GtkClipboard *clipboard, GtkSelectionData *selection_data, guint info, RemminaProtocolWidget* gp) { GdkAtom target; gpointer data; RDP_CB_DATA_REQUEST_EVENT* event; rfContext* rfi = GET_DATA(gp); target = gtk_selection_data_get_target(selection_data); rfi->format = remmina_rdp_cliprdr_get_format_from_gdkatom(target); rfi->clipboard_queue = g_async_queue_new(); /* Request Clipboard data of the server */ event = (RDP_CB_DATA_REQUEST_EVENT*) freerdp_event_new(CliprdrChannel_Class, CliprdrChannel_DataRequest, NULL, NULL); event->format = rfi->format; freerdp_channels_send_event(rfi->instance->context->channels, (wMessage*) event); data = g_async_queue_timeout_pop(rfi->clipboard_queue, 1000000); if (data != NULL) { if (info == CB_FORMAT_PNG || info == CB_FORMAT_DIB || info == CB_FORMAT_JPEG) { gtk_selection_data_set_pixbuf(selection_data, data); g_object_unref(data); } else { gtk_selection_data_set_text(selection_data, data, -1); } } }
/** * gst_vaapi_encoder_get_buffer_with_timeout: * @encoder: a #GstVaapiEncoder * @out_codedbuf_proxy_ptr: the next coded buffer as a #GstVaapiCodedBufferProxy * @timeout: the number of microseconds to wait for the coded buffer, at most * * Upon successful return, *@out_codedbuf_proxy_ptr contains the next * coded buffer as a #GstVaapiCodedBufferProxy. The caller owns this * object, so gst_vaapi_coded_buffer_proxy_unref() shall be called * after usage. Otherwise, @GST_VAAPI_DECODER_STATUS_ERROR_NO_BUFFER * is returned if no coded buffer is available so far (timeout). * * The parent frame is available as a #GstVideoCodecFrame attached to * the user-data anchor of the output coded buffer. Ownership of the * frame is transferred to the coded buffer. * * Return value: a #GstVaapiEncoderStatus */ GstVaapiEncoderStatus gst_vaapi_encoder_get_buffer_with_timeout (GstVaapiEncoder * encoder, GstVaapiCodedBufferProxy ** out_codedbuf_proxy_ptr, guint64 timeout) { GstVaapiEncPicture *picture; GstVaapiCodedBufferProxy *codedbuf_proxy; codedbuf_proxy = g_async_queue_timeout_pop (encoder->codedbuf_queue, timeout); if (!codedbuf_proxy) return GST_VAAPI_ENCODER_STATUS_NO_BUFFER; /* Wait for completion of all operations and report any error that occurred */ picture = gst_vaapi_coded_buffer_proxy_get_user_data (codedbuf_proxy); if (!gst_vaapi_surface_sync (picture->surface)) goto error_invalid_buffer; gst_vaapi_coded_buffer_proxy_set_user_data (codedbuf_proxy, gst_video_codec_frame_ref (picture->frame), (GDestroyNotify) gst_video_codec_frame_unref); if (out_codedbuf_proxy_ptr) *out_codedbuf_proxy_ptr = gst_vaapi_coded_buffer_proxy_ref (codedbuf_proxy); gst_vaapi_coded_buffer_proxy_unref (codedbuf_proxy); return GST_VAAPI_ENCODER_STATUS_SUCCESS; /* ERRORS */ error_invalid_buffer: { GST_ERROR ("failed to encode the frame"); gst_vaapi_coded_buffer_proxy_unref (codedbuf_proxy); return GST_VAAPI_ENCODER_STATUS_ERROR_INVALID_SURFACE; } }
/*! \brief The thread which pops stuff off the RTV queue and processes them \param data is the pointer to the queue to pop entries off of \returns NULL */ G_MODULE_EXPORT void *rtv_subscriber(gpointer data) { GAsyncQueue *queue = (GAsyncQueue *)data; static GMutex *mutex = NULL; LibreEMS_Packet *packet = NULL; ENTER(); mutex = (GMutex *)DATA_GET(global_data,"rtv_subscriber_mutex"); if (!mutex) g_thread_exit(0); while (!DATA_GET(global_data,"rtv_subscriber_thread_exit")) { /* Wait up to 0.25 seconds for thread to exit */ packet = (LibreEMS_Packet *)g_async_queue_timeout_pop(queue,250000); g_mutex_unlock(mutex); if (packet) { DATA_SET(global_data,"rt_goodread_count",GINT_TO_POINTER((GINT)DATA_GET(global_data,"rt_goodread_count")+1)); process_rt_vars_f(packet->data+packet->payload_base_offset,packet->payload_length); libreems_packet_cleanup(packet); } } g_thread_exit(0); EXIT(); return NULL; }
static gboolean run_prenegotiated_channel_test(gboolean wait_until_ready) { OwrDataChannel *left; OwrDataChannel *right; guint id = channel_id++ * 2; g_print("\n >>> Running prenegotiated channels test\n\n"); /* ordered, max_packet_life_time, max_retransmits, protocol, negotiated, id, label */ left = owr_data_channel_new(FALSE, 5000, -1, "OTP", TRUE, id, "prenegotiated"); right = owr_data_channel_new(FALSE, 5000, -1, "OTP", TRUE, id, "prenegotiated"); owr_data_session_add_data_channel(left_session, left); owr_data_session_add_data_channel(right_session, right); if (wait_until_ready) { GAsyncQueue *msg_queue = g_async_queue_new(); gboolean data_channels_ready; g_signal_connect(left, "notify::ready-state", G_CALLBACK(on_ready_state), msg_queue); g_signal_connect(right, "notify::ready-state", G_CALLBACK(on_ready_state), msg_queue); g_print("waiting for data channels to become ready\n"); data_channels_ready = !!g_async_queue_timeout_pop(msg_queue, 5000000); data_channels_ready &= !!g_async_queue_timeout_pop(msg_queue, 5000000); g_async_queue_unref(msg_queue); if (!data_channels_ready) { g_print("data channel setup timed out\n"); return FALSE; } g_print("data channels are now ready, running test\n"); } else { g_print("data channels are expected to be ready immediately, running test\n"); } return run_datachannel_test("prenegotiated", left, right); }
static gboolean run_datachannel_test(const gchar *label, OwrDataChannel *left, OwrDataChannel *right) { GAsyncQueue *msg_queue = g_async_queue_new(); gchar *received_message; gint expected_message_count = 4; const gchar *binary_message; int i; g_print("[%s] starting\n", label); g_signal_connect(left, "on-data", G_CALLBACK(on_data), msg_queue); g_signal_connect(right, "on-data", G_CALLBACK(on_data), msg_queue); g_signal_connect(left, "on-binary-data", G_CALLBACK(on_binary_data), msg_queue); g_signal_connect(right, "on-binary-data", G_CALLBACK(on_binary_data), msg_queue); g_print("[%s] sending messages\n", label); owr_data_channel_send(left, "text: left->right"); owr_data_channel_send(right, "text: right->left"); binary_message = "binary: left->right"; owr_data_channel_send_binary(left, (const guint8 *) binary_message, strlen(binary_message)); binary_message = "binary: right->left"; owr_data_channel_send_binary(right, (const guint8 *) binary_message, strlen(binary_message)); g_print("[%s] expecting messages\n", label); for (i = 0; i < expected_message_count; i++) { received_message = g_async_queue_timeout_pop(msg_queue, 5000000); if (received_message) { g_print("[%s] received message: %s\n", label, received_message); g_free(received_message); } else { g_print("[%s] *** timeout while waiting for message\n", label); break; } } g_signal_handlers_disconnect_by_data(left, msg_queue); g_signal_handlers_disconnect_by_data(right, msg_queue); g_async_queue_unref(msg_queue); if (i >= expected_message_count) { g_print("[%s] Success, ", label); } else { g_print("[%s] Failure, ", label); } g_print("received %d / %d messages\n", i, expected_message_count); return i >= expected_message_count; }
static gpointer renderer_thread (gpointer data) { App *const app = data; RenderFrame *rfp; g_print ("Render thread started\n"); while (!app->render_thread_cancel) { rfp = g_async_queue_timeout_pop (app->decoder_queue, 1000000); if (rfp && !renderer_process (app, rfp)) break; } return NULL; }
ArvBuffer * arv_stream_timeout_pop_buffer (ArvStream *stream, guint64 timeout) { #if GLIB_CHECK_VERSION(2,32,0) g_return_val_if_fail (ARV_IS_STREAM (stream), NULL); return g_async_queue_timeout_pop (stream->priv->output_queue, timeout); #else GTimeVal end_time; g_return_val_if_fail (ARV_IS_STREAM (stream), NULL); g_get_current_time (&end_time); g_time_val_add (&end_time, timeout); return g_async_queue_timed_pop (stream->priv->output_queue, &end_time); #endif }
static inline GstVideoCodecFrame * pop_frame (GstVaapiDecoder * decoder, guint64 timeout) { GstVideoCodecFrame *frame; GstVaapiSurfaceProxy *proxy; if (G_LIKELY (timeout > 0)) frame = g_async_queue_timeout_pop (decoder->frames, timeout); else frame = g_async_queue_try_pop (decoder->frames); if (!frame) return NULL; proxy = frame->user_data; GST_DEBUG ("pop frame %d (surface 0x%08x)", frame->system_frame_number, (proxy ? (guint32) GST_VAAPI_SURFACE_PROXY_SURFACE_ID (proxy) : VA_INVALID_ID)); return frame; }
static gpointer _gq2zmq_worker (struct _gq2zmq_ctx_s *ctx) { while (ctx->running (_gq2zmq_has_pending (ctx))) { gchar *tmp = (gchar*) g_async_queue_timeout_pop (ctx->queue, G_TIME_SPAN_SECOND); if (tmp && !_forward_event (ctx->zpush, tmp)) break; } for (;;) { /* manage what remains in the GQueue */ gchar *tmp = g_async_queue_try_pop (ctx->queue); if (!tmp || !_forward_event (ctx->zpush, tmp)) break; } zmq_send (ctx->zpush, "EOF", 0, 0); GRID_INFO ("Thread stopping [NOTIFY-GQ2ZMQ]"); return ctx; }
// TODO: performance should be tuned later: frame rate and consistent static gboolean idle_func(gpointer data) { FskGtkWindow win = (FskGtkWindow)data; gpointer thread_data = g_async_queue_timeout_pop(win->queue, ASYNC_TIMEOUT); if(thread_data) { if(win == thread_data) { if(GTK_IS_WIDGET(win->window)) gtk_widget_queue_draw(win->window); } else { //Show the dialog GtkWidget *dialog = thread_data; gtk_dialog_run(GTK_DIALOG(dialog)); // Model window gtk_widget_destroy(dialog); } } return TRUE; //TRUE means will process again (loop) }
/*! \brief attemps to retrieve a packet from the named queue if provided or the "queue" variable within the object \param object is a gconstpointer to an object \param queue_name is the name of the queue to pull the packet from \returns a pointer to a LibreEMS_Packet structure or NULL of no packet is found */ G_MODULE_EXPORT LibreEMS_Packet * retrieve_packet(gconstpointer *object,const gchar * queue_name) { LibreEMS_Packet *packet = NULL; GAsyncQueue *queue = NULL; ENTER(); g_return_val_if_fail(object,NULL); if (queue_name) queue = (GAsyncQueue *)DATA_GET(global_data,queue_name); else /* Use "queue" key via DATA_GET */ queue = (GAsyncQueue *)DATA_GET(object,"queue"); if (!queue) { EXIT(); return NULL; } packet = (LibreEMS_Packet *)g_async_queue_timeout_pop(queue,5000000); EXIT(); return packet; }
static gboolean run_requested_channel_test(gboolean left_to_right) { GAsyncQueue *msg_queue = g_async_queue_new(); OwrDataChannel *left; OwrDataChannel *right; OwrDataSession *session1; OwrDataSession *session2; guint id = channel_id++ * 2 + (left_to_right ? 0 : 1); if (left_to_right) { session1 = left_session; session2 = right_session; } else { session1 = right_session; session2 = left_session; } g_print("\n >>> Running requested channel test\n\n"); g_signal_connect(session2, "on-data-channel-requested", G_CALLBACK(on_data_channel_requested), msg_queue); /* ordered, max_packet_life_time, max_retransmits, protocol, negotiated, id, label */ left = owr_data_channel_new(FALSE, 5000, -1, "OTP", FALSE, id, "requested"); owr_data_session_add_data_channel(session1, left); right = g_async_queue_timeout_pop(msg_queue, 5000000); g_async_queue_unref(msg_queue); if (!right) { g_print("requested: timeout while waiting for data channel\n"); return FALSE; } owr_data_session_add_data_channel(session2, right); return run_datachannel_test("requested", left, right); }
static struct network_client_s * get_next_client(struct network_server_s *srv) { return g_async_queue_timeout_pop(srv->queue_events, 1000000); }
int32_t ppb_message_loop_run_int(PP_Resource message_loop, int nested, int increase_depth) { if (this_thread_message_loop != message_loop) { trace_error("%s, not attached to current thread\n", __func__); return PP_ERROR_WRONG_THREAD; } struct pp_message_loop_s *ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (!ml) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } // prevent nested loops if (!nested && ml->running) { trace_error("%s, trying to run nested loop without declaring as nested\n", __func__); pp_resource_release(message_loop); return PP_ERROR_INPROGRESS; } struct { int running; int teardown; } saved_state = { .running = ml->running, .teardown = ml->teardown, }; ml->running = 1; ml->teardown = 0; if (increase_depth) ml->depth++; int teardown = 0; int destroy_ml = 0; int depth = ml->depth; pp_resource_ref(message_loop); GAsyncQueue *async_q = ml->async_q; GQueue *int_q = ml->int_q; pp_resource_release(message_loop); while (1) { struct timespec now; struct message_loop_task_s *task = g_queue_peek_head(int_q); gint64 timeout = 1000 * 1000; if (task) { clock_gettime(CLOCK_REALTIME, &now); timeout = (task->when.tv_sec - now.tv_sec) * 1000 * 1000 + (task->when.tv_nsec - now.tv_nsec) / 1000; if (timeout <= 0) { // remove task from the queue g_queue_pop_head(int_q); // check if depth is correct if (task->depth > 0 && task->depth < depth) { // wrong, reschedule it a bit later task->when = add_ms(now, 10); g_queue_insert_sorted(int_q, task, time_compare_func, NULL); continue; } if (task->terminate) { if (depth > 1) { // exit at once, all remaining task will be processed by outer loop g_slice_free(struct message_loop_task_s, task); break; } // it's the outermost loop, we should wait for all tasks to be run ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (ml) { ml->teardown = 1; teardown = 1; destroy_ml = task->should_destroy_ml; pp_resource_release(message_loop); } g_slice_free(struct message_loop_task_s, task); continue; } // run task const struct PP_CompletionCallback ccb = task->ccb; if (ccb.func) { ccb.func(ccb.user_data, task->result_to_pass); } // free task g_slice_free(struct message_loop_task_s, task); continue; // run cycle again } } else if (teardown) { // teardown, no tasks in queue left break; } task = g_async_queue_timeout_pop(async_q, timeout); if (task) g_queue_insert_sorted(int_q, task, time_compare_func, NULL); } // mark thread as non-running ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (ml) { if (increase_depth) ml->depth--; ml->running = 0; if (nested) { ml->running = saved_state.running; ml->teardown = saved_state.teardown; } pp_resource_release(message_loop); } pp_resource_unref(message_loop); if (destroy_ml) pp_resource_unref(message_loop); return PP_OK; } int32_t ppb_message_loop_post_work_with_result(PP_Resource message_loop, struct PP_CompletionCallback callback, int64_t delay_ms, int32_t result_to_pass, int depth) { if (callback.func == NULL) { trace_error("%s, callback.func == NULL\n", __func__); return PP_ERROR_BADARGUMENT; } struct pp_message_loop_s *ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (!ml) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } if (ml->running && ml->teardown) { // message loop is in a teardown state pp_resource_release(message_loop); trace_error("%s, quit request received, no additional work could be posted\n", __func__); return PP_ERROR_FAILED; } struct message_loop_task_s *task = g_slice_alloc0(sizeof(*task)); task->result_to_pass = result_to_pass; task->ccb = callback; task->depth = depth; // calculate absolute time callback should be run at clock_gettime(CLOCK_REALTIME, &task->when); task->when.tv_sec += delay_ms / 1000; task->when.tv_nsec += (delay_ms % 1000) * 1000 * 1000; while (task->when.tv_nsec >= 1000 * 1000 * 1000) { task->when.tv_sec += 1; task->when.tv_nsec -= 1000 * 1000 * 1000; } g_async_queue_push(ml->async_q, task); pp_resource_release(message_loop); return PP_OK; } int32_t ppb_message_loop_post_work(PP_Resource message_loop, struct PP_CompletionCallback callback, int64_t delay_ms) { return ppb_message_loop_post_work_with_result(message_loop, callback, delay_ms, PP_OK, 0); } int32_t ppb_message_loop_post_quit_depth(PP_Resource message_loop, PP_Bool should_destroy, int depth) { struct pp_message_loop_s *ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (!ml) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } struct message_loop_task_s *task = g_slice_alloc0(sizeof(*task)); task->terminate = 1; task->depth = depth; task->should_destroy_ml = should_destroy; task->result_to_pass = PP_OK; clock_gettime(CLOCK_REALTIME, &task->when); // run as early as possible g_async_queue_push(ml->async_q, task); pp_resource_release(message_loop); return PP_OK; } int32_t ppb_message_loop_post_quit(PP_Resource message_loop, PP_Bool should_destroy) { int depth = ppb_message_loop_get_depth(message_loop); return ppb_message_loop_post_quit_depth(message_loop, should_destroy, depth); }
/*! \brief thread_dispatcher() runs continuously as a thread listening to the io_data_queue and running handlers as messages come in. After they are done it passes the message back to the gui via the dispatch_queue for further gui handling (for things that can't run in a thread context) \param data is unused */ G_MODULE_EXPORT void *thread_dispatcher(gpointer data) { GThread * repair_thread = NULL; Serial_Params *serial_params = NULL; Io_Message *message = NULL; GAsyncQueue *io_data_queue = NULL; CmdLineArgs *args = NULL; void *(*network_repair_thread)(gpointer data) = NULL; void *(*serial_repair_thread)(gpointer data) = NULL; /* GTimer *clock;*/ ENTER(); io_data_queue = (GAsyncQueue *)DATA_GET(global_data,"io_data_queue"); serial_params = (Serial_Params *)DATA_GET(global_data,"serial_params"); get_symbol("serial_repair_thread",(void **)&serial_repair_thread); args = (CmdLineArgs *)DATA_GET(global_data,"args"); if (args->network_mode) get_symbol("network_repair_thread",(void **)&network_repair_thread); g_return_val_if_fail(args,NULL); g_return_val_if_fail(io_data_queue,NULL); g_return_val_if_fail(serial_params,NULL); g_async_queue_ref(io_data_queue); /* clock = g_timer_new();*/ /* Endless Loop, wait for message, processs and repeat... */ while (TRUE) { if (DATA_GET(global_data,"thread_dispatcher_exit")) { fast_exit: /* drain queue and exit thread */ while ((message = (Io_Message *)g_async_queue_try_pop(io_data_queue)) != NULL) dealloc_io_message(message); g_async_queue_unref(io_data_queue); EXIT(); g_thread_exit(0); } message = (Io_Message *)g_async_queue_timeout_pop(io_data_queue,1000000); if (!message) /* NULL message */ { MTXDBG(THREADS|IO_MSG,_("No message received...\n")); continue; } else MTXDBG(THREADS|IO_MSG,_("MESSAGE ARRIVED on IO queue...\n")); if ((!DATA_GET(global_data,"offline")) && (((!DATA_GET(global_data,"connected")) && (serial_params->open)) || (!(serial_params->open)))) { /*printf("somehow somethign went wrong, connected is %i, offline is %i, serial_params->open is %i\n",DATA_GET(global_data,"connected"),DATA_GET(global_data,"offline"),serial_params->open);*/ if (args->network_mode) { MTXDBG(THREADS,_("LINK DOWN, Initiating NETWORK repair thread!\n")); repair_thread = g_thread_new("Network Repair thread",network_repair_thread,NULL); } else { MTXDBG(THREADS,_("LINK DOWN, Initiating serial repair thread!\n")); repair_thread = g_thread_new("Serial Repair thread",serial_repair_thread,NULL); } g_thread_join(repair_thread); } if ((!serial_params->open) && (!DATA_GET(global_data,"offline"))) { MTXDBG(THREADS,_("LINK DOWN, Can't process requested command, aborting call\n")); thread_update_logbar("comm_view","warning",g_strdup("Disconnected Serial Link. Check Communications link/cable...\n"),FALSE,FALSE); thread_update_widget("titlebar",MTX_TITLE,g_strdup("Disconnected link, check Communications tab...")); message->status = FALSE; continue; } switch ((CmdType)message->command->type) { case FUNC_CALL: if (!message->command->function) MTXDBG(CRITICAL|THREADS,_("CRITICAL ERROR, function \"%s()\" is not found!!\n"),message->command->func_call_name); else { /*printf("Calling FUNC_CALL, function \"%s()\" \n",message->command->func_call_name);*/ message->status = message->command->function(message->command,message->command->func_call_arg); /* if (!result) message->command->defer_post_functions=TRUE; */ } break; case WRITE_CMD: /*g_timer_start(clock);*/ message->status = write_data(message); if (!message->status) DATA_SET(global_data,"connected",GINT_TO_POINTER(FALSE)); /*printf("Write command elapsed time %f\n",g_timer_elapsed(clock,NULL));*/ if (message) { if (message->command) { if (message->command->helper_function) { message->command->helper_function(message, message->command->helper_func_arg); } } } /*printf("Write command with post function time %f\n",g_timer_elapsed(clock,NULL));*/ break; case NULL_CMD: /*printf("null_cmd, just passing thru\n");*/ break; default: MTXDBG(THREADS|CRITICAL,_("Hit default case, this SHOULD NOT HAPPEN it's a bug, notify author! \n")); break; } /* If set to defer post functions, it means they were passed via a function fall, thus dealloc it here., Otherwise push up the queue to the postfunction dispatcher */ if (message->command->defer_post_functions) dealloc_io_message(message); else g_idle_add(process_pf_message,message); } EXIT(); return 0; }
static gpointer egl_display_thread (gpointer data) { EglDisplay *const display = data; EGLDisplay gl_display = display->base.handle.p; EGLint major_version, minor_version; gchar **gl_apis, **gl_api; if (!display->base.is_wrapped) { gl_display = display->base.handle.p = eglGetDisplay (gl_display); if (!gl_display) goto error; if (!eglInitialize (gl_display, &major_version, &minor_version)) goto error; } display->gl_vendor_string = g_strdup (eglQueryString (gl_display, EGL_VENDOR)); display->gl_version_string = g_strdup (eglQueryString (gl_display, EGL_VERSION)); display->gl_apis_string = g_strdup (eglQueryString (gl_display, EGL_CLIENT_APIS)); GST_INFO ("EGL vendor: %s", display->gl_vendor_string); GST_INFO ("EGL version: %s", display->gl_version_string); GST_INFO ("EGL client APIs: %s", display->gl_apis_string); gl_apis = g_strsplit (display->gl_apis_string, " ", 0); if (!gl_apis) goto error; for (gl_api = gl_apis; *gl_api != NULL; gl_api++) { const GlVersionInfo *const vinfo = gl_version_info_lookup_by_api_name (*gl_api); if (vinfo) display->gl_apis |= vinfo->gl_api_bit; } g_strfreev (gl_apis); if (!display->gl_apis) goto error; display->base.is_valid = TRUE; g_cond_broadcast (&display->gl_thread_ready); while (!display->gl_thread_cancel) { EglMessage *const msg = g_async_queue_timeout_pop (display->gl_queue, 100000); if (msg) { if (msg->base.is_valid) { msg->func (msg->args); msg->base.is_valid = FALSE; g_cond_broadcast (&display->gl_thread_ready); } egl_object_unref (msg); } } done: if (gl_display != EGL_NO_DISPLAY && !display->base.is_wrapped) eglTerminate (gl_display); display->base.handle.p = NULL; g_cond_broadcast (&display->gl_thread_ready); return NULL; /* ERRORS */ error: { display->base.is_valid = FALSE; goto done; } }
static GRealThreadPool* g_thread_pool_wait_for_new_pool (void) { GRealThreadPool *pool; gint local_wakeup_thread_serial; guint local_max_unused_threads; gint local_max_idle_time; gint last_wakeup_thread_serial; gboolean have_relayed_thread_marker = FALSE; local_max_unused_threads = g_atomic_int_get (&max_unused_threads); local_max_idle_time = g_atomic_int_get (&max_idle_time); last_wakeup_thread_serial = g_atomic_int_get (&wakeup_thread_serial); g_atomic_int_inc (&unused_threads); do { if (g_atomic_int_get (&unused_threads) >= local_max_unused_threads) { /* If this is a superfluous thread, stop it. */ pool = NULL; } else if (local_max_idle_time > 0) { /* If a maximal idle time is given, wait for the given time. */ DEBUG_MSG (("thread %p waiting in global pool for %f seconds.", g_thread_self (), local_max_idle_time / 1000.0)); pool = g_async_queue_timeout_pop (unused_thread_queue, local_max_idle_time * 1000); } else { /* If no maximal idle time is given, wait indefinitely. */ DEBUG_MSG (("thread %p waiting in global pool.", g_thread_self ())); pool = g_async_queue_pop (unused_thread_queue); } if (pool == wakeup_thread_marker) { local_wakeup_thread_serial = g_atomic_int_get (&wakeup_thread_serial); if (last_wakeup_thread_serial == local_wakeup_thread_serial) { if (!have_relayed_thread_marker) { /* If this wakeup marker has been received for * the second time, relay it. */ DEBUG_MSG (("thread %p relaying wakeup message to " "waiting thread with lower serial.", g_thread_self ())); g_async_queue_push (unused_thread_queue, wakeup_thread_marker); have_relayed_thread_marker = TRUE; /* If a wakeup marker has been relayed, this thread * will get out of the way for 100 microseconds to * avoid receiving this marker again. */ g_usleep (100); } } else { if (g_atomic_int_add (&kill_unused_threads, -1) > 0) { pool = NULL; break; } DEBUG_MSG (("thread %p updating to new limits.", g_thread_self ())); local_max_unused_threads = g_atomic_int_get (&max_unused_threads); local_max_idle_time = g_atomic_int_get (&max_idle_time); last_wakeup_thread_serial = local_wakeup_thread_serial; have_relayed_thread_marker = FALSE; } } } while (pool == wakeup_thread_marker); g_atomic_int_add (&unused_threads, -1); return pool; }
static void test_refcounting() { OwrBus *bus; OwrMessageOrigin *origin; GWeakRef weak_ref; GAsyncQueue *queue; queue = g_async_queue_new(); bus = owr_bus_new(); owr_bus_set_message_callback(bus, on_message, queue, NULL); origin = mock_origin_new(); owr_bus_add_message_origin(bus, origin); g_weak_ref_init(&weak_ref, bus); OWR_POST_STATS(origin, TEST, NULL); g_object_unref(bus); /* this should finalize the bus, pending messages should not keep it alive */ assert_weak_ref(&weak_ref, FALSE, __LINE__); g_weak_ref_clear(&weak_ref); bus = owr_bus_new(); owr_bus_set_message_callback(bus, on_message, queue, NULL); owr_bus_add_message_origin(bus, origin); g_weak_ref_init(&weak_ref, origin); OWR_POST_STATS(origin, TEST, NULL); g_object_unref(origin); /* the origin should be kept alive though */ g_object_ref(origin); assert_weak_ref(&weak_ref, TRUE, __LINE__); g_object_unref(origin); g_assert(g_async_queue_timeout_pop(queue, G_USEC_PER_SEC)); g_usleep(1000); /* messages are cleaned up after all callbacks have happened, so wait a bit more */ assert_weak_ref(&weak_ref, FALSE, __LINE__); /* but be cleaned up after the message was handled */ g_weak_ref_clear(&weak_ref); /* same as previous tests, but with message filter */ origin = mock_origin_new(); owr_bus_add_message_origin(bus, origin); g_weak_ref_init(&weak_ref, origin); g_object_set(bus, "message-type-mask", OWR_MESSAGE_TYPE_STATS, NULL); OWR_POST_STATS(origin, TEST, NULL); OWR_POST_EVENT(origin, TEST, NULL); OWR_POST_ERROR(origin, TEST, NULL); g_object_unref(origin); g_object_ref(origin); assert_weak_ref(&weak_ref, TRUE, __LINE__); g_object_unref(origin); g_assert(g_async_queue_timeout_pop(queue, G_USEC_PER_SEC)); g_usleep(1000); assert_weak_ref(&weak_ref, FALSE, __LINE__); g_weak_ref_clear(&weak_ref); origin = mock_origin_new(); owr_bus_add_message_origin(bus, origin); g_weak_ref_init(&weak_ref, bus); OWR_POST_STATS(origin, TEST, NULL); OWR_POST_EVENT(origin, TEST, NULL); OWR_POST_ERROR(origin, TEST, NULL); g_object_unref(bus); assert_weak_ref(&weak_ref, FALSE, __LINE__); g_weak_ref_clear(&weak_ref); }
static void gst_scream_queue_srcpad_loop(GstScreamQueue *self) { GstScreamDataQueueItem *item; GstScreamDataQueueRtpItem *rtp_item; GstScreamStream *stream; guint stream_id; guint64 time_now_us, time_until_next_approve = 0; GstBuffer *buffer; time_now_us = get_gst_time_us(self); if (G_UNLIKELY(time_now_us == 0)) { goto end; } if (time_now_us >= self->next_approve_time) { time_until_next_approve = gst_scream_controller_approve_transmits(self->scream_controller, time_now_us); } else { GST_LOG_OBJECT(self, "Time is %" G_GUINT64_FORMAT ", waiting %" G_GUINT64_FORMAT, time_now_us, self->next_approve_time); } /* Send all approved packets */ while (!gst_data_queue_is_empty(self->approved_packets)) { if (G_UNLIKELY(!gst_data_queue_pop(self->approved_packets, (GstDataQueueItem **)&rtp_item))) { GST_WARNING_OBJECT(self, "Failed to pop from approved packets queue. Flushing?"); goto end; /* flushing */ } buffer = GST_BUFFER(((GstDataQueueItem *)rtp_item)->object); gst_pad_push(self->src_pad, buffer); GST_LOG_OBJECT(self, "pushing: pt = %u, seq: %u, pass: %u", rtp_item->rtp_pt, rtp_item->rtp_seq, self->pass_through); if (rtp_item->adapted) { guint tmp_time; stream_id = ((GstScreamDataQueueItem *)rtp_item)->rtp_ssrc; tmp_time = gst_scream_controller_packet_transmitted(self->scream_controller, stream_id, rtp_item->rtp_payload_size, rtp_item->rtp_seq, time_now_us); time_until_next_approve = MIN(time_until_next_approve, tmp_time); } g_slice_free(GstScreamDataQueueRtpItem, rtp_item); } self->next_approve_time = time_now_us + time_until_next_approve; GST_LOG_OBJECT(self, "Popping or waiting %" G_GUINT64_FORMAT, time_until_next_approve); item = (GstScreamDataQueueItem *)g_async_queue_timeout_pop(self->incoming_packets, time_until_next_approve); if (!item) { goto end; } stream_id = item->rtp_ssrc; if (item->type == GST_SCREAM_DATA_QUEUE_ITEM_TYPE_RTP) { GstScreamDataQueueRtpItem *rtp_item = (GstScreamDataQueueRtpItem *)item; stream = get_stream(self, item->rtp_ssrc, rtp_item->rtp_pt); if (!stream) { rtp_item->adapted = FALSE; GST_LOG_OBJECT(self, "!adapted, approving: pt = %u, seq: %u, pass: %u", rtp_item->rtp_pt, rtp_item->rtp_seq, self->pass_through); gst_data_queue_push(self->approved_packets, (GstDataQueueItem *)item); } else { gst_atomic_queue_push(stream->packet_queue, rtp_item); stream->enqueued_payload_size += rtp_item->rtp_payload_size; stream->enqueued_packets++; rtp_item->adapted = TRUE; self->next_approve_time = 0; gst_scream_controller_new_rtp_packet(self->scream_controller, stream_id, rtp_item->rtp_ts, rtp_item->enqueued_time, stream->enqueued_payload_size, rtp_item->rtp_payload_size); } } else { /* item->type == GST_SCREAM_DATA_QUEUE_ITEM_TYPE_RTCP */ GstScreamDataQueueRtcpItem *rtcp_item = (GstScreamDataQueueRtcpItem *)item; gst_scream_controller_incoming_feedback(self->scream_controller, stream_id, time_now_us, rtcp_item->timestamp, rtcp_item->highest_seq, rtcp_item->n_loss, rtcp_item->n_ecn, rtcp_item->qbit); ((GstDataQueueItem *)item)->destroy(item); } end: return; }
/*! \brief Simple communication test, Assembles a packet, sends it, subscribes to the response and waits for it, or a timeout. \returns TRUE on success, FALSE on failure */ G_MODULE_EXPORT gboolean comms_test(void) { GAsyncQueue *queue = NULL; LibreEMS_Packet *packet = NULL; GCond *cond = NULL; gboolean res = FALSE; gint len = 0; /* Packet sends back Interface Version */ /* START, Header, Payload ID H, PAyload ID L, CKsum, STOP */ guint8 *buf = NULL; /* Raw packet */ guint8 pkt[INTERFACE_VERSION_REQ_PKT_LEN]; gint tmit_len = 0; ENTER(); Serial_Params *serial_params = NULL; serial_params = (Serial_Params *)DATA_GET(global_data,"serial_params"); queue = (GAsyncQueue *)DATA_GET(global_data,"packet_queue"); MTXDBG(SERIAL_RD,_("Entered...\n")); if (!serial_params) { EXIT(); return FALSE; } queue = g_async_queue_new(); register_packet_queue(PAYLOAD_ID,queue,RESPONSE_BASIC_DATALOG); packet = (LibreEMS_Packet *)g_async_queue_timeout_pop(queue,250000); deregister_packet_queue(PAYLOAD_ID,queue,RESPONSE_BASIC_DATALOG); if (packet) { MTXDBG(SERIAL_RD,_("Found streaming ECU!!\n")); g_async_queue_unref(queue); libreems_packet_cleanup(packet); DATA_SET(global_data,"connected",GINT_TO_POINTER(TRUE)); EXIT(); return TRUE; } else { /* Assume ECU is in non-streaming mode, try and probe it */ gint sum = 0; MTXDBG(SERIAL_RD,_("Requesting LibreEMS Interface Version\n")); register_packet_queue(PAYLOAD_ID,queue,RESPONSE_INTERFACE_VERSION); pkt[HEADER_IDX] = 0; pkt[H_PAYLOAD_IDX] = (REQUEST_INTERFACE_VERSION & 0xff00 ) >> 8; pkt[L_PAYLOAD_IDX] = (REQUEST_INTERFACE_VERSION & 0x00ff ); for (gint i=0;i<INTERFACE_VERSION_REQ_PKT_LEN-1;i++) sum += pkt[i]; pkt[INTERFACE_VERSION_REQ_PKT_LEN-1] = sum; buf = finalize_packet((guint8 *)&pkt,INTERFACE_VERSION_REQ_PKT_LEN,&tmit_len); if (!write_wrapper_f(serial_params->fd, buf, tmit_len, &len)) { g_free(buf); deregister_packet_queue(PAYLOAD_ID,queue,RESPONSE_INTERFACE_VERSION); g_async_queue_unref(queue); EXIT(); return FALSE; } g_free(buf); packet = (LibreEMS_Packet *)g_async_queue_timeout_pop(queue,250000); deregister_packet_queue(PAYLOAD_ID,queue,RESPONSE_INTERFACE_VERSION); g_async_queue_unref(queue); if (packet) { MTXDBG(SERIAL_RD,_("Found via probing!!\n")); libreems_packet_cleanup(packet); DATA_SET(global_data,"connected",GINT_TO_POINTER(TRUE)); EXIT(); return TRUE; } } DATA_SET(global_data,"connected",GINT_TO_POINTER(FALSE)); MTXDBG(SERIAL_RD,_("No device found...\n")); EXIT(); return FALSE; }