static void process_video_events(void) { fs_mutex_lock(g_video_event_mutex); int count = g_queue_get_length(g_video_event_queue); for (int i = 0; i < count; i++) { int event = FS_POINTER_TO_INT(g_queue_pop_tail(g_video_event_queue)); if (event == FS_ML_VIDEO_EVENT_GRAB_INPUT) { fs_ml_set_input_grab(true); } else if (event == FS_ML_VIDEO_EVENT_UNGRAB_INPUT) { fs_ml_set_input_grab(false); } else if (event == FS_ML_VIDEO_EVENT_SHOW_CURSOR) { fs_ml_show_cursor(1, 1); } else if (event == FS_ML_VIDEO_EVENT_HIDE_CURSOR) { fs_ml_show_cursor(0, 1); } else if (event == FS_ML_VIDEO_EVENT_TOGGLE_FULLSCREEN) { fs_ml_toggle_fullscreen(); } else if (event == FS_ML_VIDEO_EVENT_ENABLE_FULLSCREEN) { fs_ml_set_fullscreen(true); } else if (event == FS_ML_VIDEO_EVENT_DISABLE_FULLSCREEN) { fs_ml_set_fullscreen(false); } } fs_mutex_unlock(g_video_event_mutex); }
static void _scheduler_stopHosts(Scheduler* scheduler) { /* free all applications before freeing any of the hosts since freeing * applications may cause close() to get called on sockets which needs * other host information. this may cause issues if the hosts are gone. * * do the following if it turns out we need each worker to free their assigned hosts. * i dont think it should be a problem to swap hosts between threads given our current * program state context switching, but am not sure about plugins that use other linked libs. * * **update** it doesnt work. for example, each instance of the tor plugin keeps track of * how many hosts it created, and then when that many hosts are freed, it frees openssl * structs. so if we let a single thread free everything, we run into issues. */ // GList* allHosts = g_hash_table_get_values(scheduler->hostIDToHostMap); if(scheduler->policy->getAssignedHosts) { GQueue* myHosts = scheduler->policy->getAssignedHosts(scheduler->policy); if(myHosts) { guint nHosts = g_queue_get_length(myHosts); message("starting to shut down %u hosts", nHosts); worker_freeHosts(myHosts); message("%u hosts are shut down", nHosts); } } }
void vfs_thumbnail_loader_cancel_all_requests( VFSDir* dir, gboolean is_big ) { GList* l; VFSThumbnailLoader* loader; ThumbnailRequest* req; if( G_UNLIKELY( (loader=dir->thumbnail_loader) ) ) { vfs_async_task_lock( loader->task ); /* g_debug( "TRY TO CANCEL REQUESTS!!" ); */ for( l = loader->queue->head; l; ) { req = (ThumbnailRequest*)l->data; --req->n_requests[ is_big ? LOAD_BIG_THUMBNAIL : LOAD_SMALL_THUMBNAIL ]; if( req->n_requests[0] <= 0 && req->n_requests[1] <= 0 ) /* nobody needs this */ { GList* next = l->next; g_queue_delete_link( loader->queue, l ); l = next; } else l = l->next; } if( g_queue_get_length( loader->queue ) == 0 ) { /* g_debug( "FREE LOADER IN vfs_thumbnail_loader_cancel_all_requests!" ); */ vfs_async_task_unlock( loader->task ); loader->dir->thumbnail_loader = NULL; vfs_thumbnail_loader_free( loader ); return; } vfs_async_task_unlock( loader->task ); } }
/** * anjuta_shell_remove_widget: * @shell: A #AnjutaShell interface * @widget: The widget to remove * @error: Error propagation object * * Removes the widget from shell. The widget should have been added before * with #anjuta_shell_add_widget. */ void anjuta_shell_remove_widget (AnjutaShell *shell, GtkWidget *widget, GError **error) { GQueue *queue; gboolean found_in_queue; g_return_if_fail (shell != NULL); g_return_if_fail (ANJUTA_IS_SHELL (shell)); g_return_if_fail (widget != NULL); g_return_if_fail (GTK_IS_WIDGET (widget)); /* If there is a queue, remove widgets from it */ found_in_queue = FALSE; queue = g_object_get_data (G_OBJECT (shell), "__widget_queue"); if (queue) { gint i; for (i = g_queue_get_length(queue) - 1; i >= 0; i--) { WidgetQueueData *qd; qd = g_queue_peek_nth (queue, i); if (qd->widget == widget) { g_queue_remove (queue, qd); on_widget_data_free (qd); found_in_queue = TRUE; break; } } } if (!found_in_queue) ANJUTA_SHELL_GET_IFACE (shell)->remove_widget (shell, widget, error); }
static void _scheduler_shuffleQueue(Scheduler* scheduler, GQueue* queue) { if(queue == NULL) { return; } /* convert queue to array */ guint length = g_queue_get_length(queue); gpointer array[length]; for(guint i = 0; i < length; i++) { array[i] = g_queue_pop_head(queue); } /* we now should have moved all elements from the queue to the array */ utility_assert(g_queue_is_empty(queue)); /* shuffle array - Fisher-Yates shuffle */ for(guint i = 0; i < length-1; i++) { gdouble randomFraction = random_nextDouble(scheduler->random); gdouble maxRange = (gdouble) length-i; guint j = (guint)floor(randomFraction * maxRange); /* handle edge case if we got 1.0 as a double */ if(j == length-i) { j--; } gpointer temp = array[i]; array[i] = array[i+j]; array[i+j] = temp; } /* reload the queue with the newly shuffled ordering */ for(guint i = 0; i < length; i++) { g_queue_push_tail(queue, array[i]); } }
/* must be called with lock held */ static void maybe_start_more (RhythmDBImportJob *job) { if (g_cancellable_is_cancelled (job->priv->cancel)) { return; } while (g_queue_get_length (job->priv->processing) < PROCESSING_LIMIT) { char *uri; uri = g_queue_pop_head (job->priv->outstanding); if (uri == NULL) { return; } g_queue_push_tail (job->priv->processing, uri); rhythmdb_add_uri_with_types (job->priv->db, uri, job->priv->entry_type, job->priv->ignore_type, job->priv->error_type); } }
static void gst_decklink_video_src_got_frame (GstElement * element, IDeckLinkVideoInputFrame * frame, GstDecklinkModeEnum mode, GstClockTime capture_time, GstClockTime capture_duration, guint hours, guint minutes, guint seconds, guint frames, BMDTimecodeFlags bflags) { GstDecklinkVideoSrc *self = GST_DECKLINK_VIDEO_SRC_CAST (element); GST_LOG_OBJECT (self, "Got video frame at %" GST_TIME_FORMAT, GST_TIME_ARGS (capture_time)); gst_decklink_video_src_convert_to_external_clock (self, &capture_time, &capture_duration); GST_LOG_OBJECT (self, "Actual timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (capture_time)); g_mutex_lock (&self->lock); if (!self->flushing) { CaptureFrame *f; const GstDecklinkMode *bmode; GstVideoTimeCodeFlags flags = GST_VIDEO_TIME_CODE_FLAGS_NONE; guint field_count = 0; while (g_queue_get_length (&self->current_frames) >= self->buffer_size) { f = (CaptureFrame *) g_queue_pop_head (&self->current_frames); GST_WARNING_OBJECT (self, "Dropping old frame at %" GST_TIME_FORMAT, GST_TIME_ARGS (f->capture_time)); capture_frame_free (f); } f = (CaptureFrame *) g_malloc0 (sizeof (CaptureFrame)); f->frame = frame; f->capture_time = capture_time; f->capture_duration = capture_duration; f->mode = mode; f->format = frame->GetPixelFormat (); bmode = gst_decklink_get_mode (mode); if (bmode->interlaced) { flags = (GstVideoTimeCodeFlags) (flags | GST_VIDEO_TIME_CODE_FLAGS_INTERLACED); if (bflags & bmdTimecodeFieldMark) field_count = 2; else field_count = 1; } if (bflags & bmdTimecodeIsDropFrame) flags = (GstVideoTimeCodeFlags) (flags | GST_VIDEO_TIME_CODE_FLAGS_DROP_FRAME); f->tc = gst_video_time_code_new (bmode->fps_n, bmode->fps_d, NULL, flags, hours, minutes, seconds, frames, field_count); frame->AddRef (); g_queue_push_tail (&self->current_frames, f); g_cond_signal (&self->cond); } g_mutex_unlock (&self->lock); }
static void meh_screen_popup_favorite_toggle(App* app, Screen* screen) { g_assert(app != NULL); g_assert(screen != NULL); PopupData* data = meh_screen_popup_get_data(screen); ExecutableListData* exec_list_data = meh_exec_list_get_data(data->src_screen); /* updates the value of the executable */ gboolean new_value = data->executable->favorite == 1 ? FALSE : TRUE; if (meh_db_set_executable_favorite(app->db, data->executable, new_value)) { data->executable->favorite = new_value; } /* re-position the executable in the executables list if necessary */ if (g_queue_get_length(exec_list_data->executables) > 1) { int prev_selected = exec_list_data->selected_executable; unsigned int i = 0; /* retrieves the one which will move in the list */ Executable* to_move = g_queue_pop_nth(exec_list_data->executables, exec_list_data->selected_executable); /* find the good position for the moved executable */ for (i = 0; i < g_queue_get_length(exec_list_data->executables); i++) { gboolean exit = FALSE; Executable* ex = g_queue_peek_nth(exec_list_data->executables, i); /* if favorite, ensure to stay in the favorite zone */ if (new_value == TRUE) { if (ex->favorite == FALSE) { exit = TRUE; } } gchar* first = g_utf8_strup(ex->display_name, g_utf8_strlen(ex->display_name, -1)); gchar* second = g_utf8_strup(data->executable->display_name, g_utf8_strlen(ex->display_name, -1)); if (g_utf8_collate(first, second) > 0) { if (new_value == TRUE && ex->favorite == TRUE) { exit = TRUE; } else if (new_value == FALSE && ex->favorite == FALSE) { exit = TRUE; } } g_free(first); g_free(second); if (exit) { break; } } GList* after = g_queue_peek_nth_link(exec_list_data->executables, i); /* re-add it to the good position */ g_queue_insert_before(exec_list_data->executables, after, to_move); /* notify the screen of the new selected executable */ exec_list_data->selected_executable = i; /* redraw the executables list texts */ meh_exec_list_refresh_executables_widget(app, data->src_screen); /* move and redraw the selection */ meh_exec_list_after_cursor_move(app, data->src_screen, prev_selected); } /* finally close the popup */ meh_screen_popup_close(screen); }
void queue_prev(gboolean notif) { int n, p; int len = g_queue_get_length(&g_queue); g_debug("Switching to previous track (current track: %d).", g_current_track); if (g_repeat && len == 1) { /* Easy case: replay the same track */ queue_seek(0); if (notif) queue_notify(); return; } if (g_shuffle) { /* Possible cases: g_repeat, g_current_track == -1, g_shuffle_first == -1 */ if (g_current_track == -1) { if (g_shuffle_first == -1) { /* Pick a random track */ n = g_random_int_range(0, len); /* Set the next one to be the first shuffle track... */ p = g_queue_index(&g_shuffle_queue, GINT_TO_POINTER(n)); if (p == -1) g_error("Can't find last track in shuffle queue"); p = (p+1) % len; g_shuffle_first = GPOINTER_TO_INT(g_queue_peek_nth(&g_shuffle_queue, p)); } else { /* Find the track that comes just before the first shuffle track */ p = g_queue_index(&g_shuffle_queue, GINT_TO_POINTER(g_shuffle_first)); if (p == -1) g_error("Can't find first track in shuffle queue"); p = (p-1) % len; n = GPOINTER_TO_INT(g_queue_peek_nth(&g_shuffle_queue, p)); } } else { if (g_shuffle_first == -1) { g_warning("g_shuffle_first == -1 in goto_prev()"); g_shuffle_first = g_current_track; } /* Is this the first track in non-repeat mode? */ if ((g_current_track == g_shuffle_first) && !g_repeat) { n = -1; } else { /* Find the index of the current track in the shuffle queue */ p = g_queue_index(&g_shuffle_queue, GINT_TO_POINTER(g_current_track)); if (p == -1) g_error("Can't find current track in shufflequeue"); /* Find the previous track in the shuffle queue */ p = (p+len-1) % len; n = GPOINTER_TO_INT(g_queue_peek_nth(&g_shuffle_queue, p)); } } } else { n = g_current_track - 1; if (g_repeat) n %= len; } queue_goto(FALSE, n, FALSE); if (notif) queue_notify(); }
guint queue_get_length(void) { return g_queue_get_length(queue); }
void detalgo_1(long int sim_time, int service_rates[2], long int *num_arrivals[2], gboolean *overheard[2], long int total_arrivals[2], long int *max_ID_ack[2], long int *backlog[2], long int thres, gboolean *stable){ long int i,j,k; int c[2]; long int lastID[2] = {0,0}; long int g[2],b[2],r[2]; int FAST,SLOW; long int ID; int debug = 0; int mg,Mg; int counter; int flow; /* Initializations */ r[0] = service_rates[0]; r[1] = service_rates[1]; FAST = service_rates[FLOW_1] <= service_rates[FLOW_2]?FLOW_2:FLOW_1; SLOW = fmod(FAST+1,2); if (stable!=NULL) {*stable = TRUE; } /* Initialize TCPchecklist library. */ TCPchecklist_init(); /* Create TCP checklists */ TCPchecklist[FLOW_1] = TCPchecklist_create(total_arrivals[FLOW_1]); TCPchecklist[FLOW_2] = TCPchecklist_create(total_arrivals[FLOW_2]); /* Begin simulation */ for (i = 0; i<sim_time; i++){ /* Get new arrivals */ for (k = 0; k<2; k++){ /* flow loop */ for (j = 0; j<num_arrivals[k][i]; j++){ /* arrival loop */ lastID[k]++; ID = lastID[k]; if (overheard[k][ID-1]){ g_queue_push_tail(queues[k][GOOD],GINT_TO_POINTER(ID)); } else { g_queue_push_tail(queues[k][BAD],GINT_TO_POINTER(ID)); } } } if(debug){ printf("slot %ld\n",i); printf("after arrivals\n"); print_queues(); printf("************************************************\n"); } /* Rename variables for easier reference. */ g[0] = g_queue_get_length(queues[0][GOOD]); g[1] = g_queue_get_length(queues[1][GOOD]); b[0] = g_queue_get_length(queues[0][BAD]); b[1] = g_queue_get_length(queues[1][BAD]); /* Take controls */ if (g[FAST] >= r[SLOW] && g[SLOW] >= r[SLOW]){ evac( queues[FAST][GOOD],TCPchecklist[FAST],r[SLOW]); evac( queues[SLOW][GOOD],TCPchecklist[SLOW],r[SLOW]); } else if (b[FAST] >= r[FAST] && b[SLOW] >= r[SLOW]){ k = (GPOINTER_TO_INT(g_queue_peek_head(queues[FAST][BAD])) < GPOINTER_TO_INT(g_queue_peek_head(queues[SLOW][BAD]))) ? FAST : SLOW; evac( queues[k][BAD],TCPchecklist[k],r[k]); } else if (b[SLOW] >= r[SLOW]){ evac( queues[SLOW][BAD],TCPchecklist[SLOW],r[SLOW]); } else if (b[FAST] >= r[FAST]){ evac( queues[FAST][BAD],TCPchecklist[FAST],r[FAST]); } else if (b[SLOW] >= r[SLOW]){ evac( queues[SLOW][BAD],TCPchecklist[SLOW],r[SLOW]); } else if (g[SLOW] >= r[SLOW]){ evac( queues[SLOW][GOOD],TCPchecklist[SLOW],r[SLOW]); } else if (g[FAST] >= r[FAST]){ evac( queues[FAST][GOOD],TCPchecklist[FAST],r[FAST]); } /* anti-idleness controls */ else{ /* Determine what flow can send the most packets. */ mg = MIN(g[SLOW],g[FAST]); Mg = MAX(g[SLOW],g[FAST]); counter=0; if ( MIN(r[SLOW],g[SLOW]+g[FAST] + b[SLOW] + b[FAST] ) >= MIN(r[FAST],g[FAST]+b[FAST]) ) { while(counter<r[SLOW]){ g[0] = g_queue_get_length(queues[0][GOOD]); g[1] = g_queue_get_length(queues[1][GOOD]); b[0] = g_queue_get_length(queues[0][BAD]); b[1] = g_queue_get_length(queues[1][BAD]); /* Evacuate the g+g. */ if (g[FLOW_1] && g[FLOW_2]){ evac( queues[FAST][GOOD],TCPchecklist[FAST],1); evac( queues[SLOW][GOOD],TCPchecklist[SLOW],1); counter++; continue; } /* Evacuate any bad packets. */ if (b[FAST] && b[SLOW] ){ /* Choose the queue with the largest backlog. */ flow = (MAX(b[FAST],b[SLOW]) == b[SLOW])?SLOW:FAST; evac( queues[flow][BAD],TCPchecklist[flow],1); counter++; continue; } if (b[SLOW]){ evac( queues[SLOW][BAD],TCPchecklist[SLOW],1); counter++; continue; } if (b[FAST]){ evac( queues[FAST][BAD],TCPchecklist[FAST],1); counter++; continue; } /* g controls */ if (g[SLOW]){ evac( queues[SLOW][GOOD],TCPchecklist[SLOW],1); counter++; continue; } if (g[FAST]){ evac( queues[FAST][GOOD],TCPchecklist[FAST],1); counter++; continue; } break; } } else { while(counter<r[FAST]){ g[FAST] = g_queue_get_length(queues[FAST][GOOD]); b[FAST] = g_queue_get_length(queues[FAST][BAD]); /* First evacuate the fast bad packets. */ if( b[FAST]){ evac(queues[FAST][BAD],TCPchecklist[FAST],1); counter++; continue; } /* Then evacuate the fast good packets. */ if(g[FAST]){ evac(queues[FAST][GOOD],TCPchecklist[FAST],1); counter++; continue; } break; } } } if(debug){ printf("after control\n"); print_queues(); printf("************************************************\n");} /* Get greatest ACKed ID for this slot. */ max_ID_ack[FAST][i] = TCPchecklist_get_largest_consecutive_ACK_id(TCPchecklist[FAST]); max_ID_ack[SLOW][i] = TCPchecklist_get_largest_consecutive_ACK_id(TCPchecklist[SLOW]); /* Check if system is stable. Otherwise stop*/ if (stable!=NULL && (lastID[FLOW_1]-max_ID_ack[FLOW_1][i] > thres || lastID[FLOW_2]-max_ID_ack[FLOW_2][i] > thres) ){ *stable = FALSE; break; } /* Rename variables for easier reference. */ g[0] = g_queue_get_length(queues[0][GOOD]); g[1] = g_queue_get_length(queues[1][GOOD]); b[0] = g_queue_get_length(queues[0][BAD]); b[1] = g_queue_get_length(queues[1][BAD]); /* Compute current backlog. */ backlog[FLOW_1][i] = g[FLOW_1]+ b[FLOW_1]; backlog[FLOW_2][i] = g[FLOW_2]+ b[FLOW_2]; if(debug){ printf("checklists largest ACK\n"); printf("flow 1: %ld\nflow 2: %ld\n",max_ID_ack[FLOW_1][i],max_ID_ack[FLOW_2][i]); printf("************************************************\n"); printf("give values\n"); scanf(" %ld ",&k);} } /* Finalize TCPchecklist. */ TCPchecklist_finit(); /* Evacuate the queue remnants. */ g_queue_clear(queues[FLOW_1][GOOD]); g_queue_clear(queues[FLOW_1][BAD]); g_queue_clear(queues[FLOW_2][GOOD]); g_queue_clear(queues[FLOW_2][BAD]); }
void json_message_process_token(JSONLexer *lexer, GString *input, JSONTokenType type, int x, int y) { JSONMessageParser *parser = container_of(lexer, JSONMessageParser, lexer); QObject *json = NULL; Error *err = NULL; JSONToken *token; switch (type) { case JSON_LCURLY: parser->brace_count++; break; case JSON_RCURLY: parser->brace_count--; break; case JSON_LSQUARE: parser->bracket_count++; break; case JSON_RSQUARE: parser->bracket_count--; break; case JSON_ERROR: error_setg(&err, "JSON parse error, stray '%s'", input->str); goto out_emit; case JSON_END_OF_INPUT: if (g_queue_is_empty(&parser->tokens)) { return; } json = json_parser_parse(&parser->tokens, parser->ap, &err); goto out_emit; default: break; } /* * Security consideration, we limit total memory allocated per object * and the maximum recursion depth that a message can force. */ if (parser->token_size + input->len + 1 > MAX_TOKEN_SIZE) { error_setg(&err, "JSON token size limit exceeded"); goto out_emit; } if (g_queue_get_length(&parser->tokens) + 1 > MAX_TOKEN_COUNT) { error_setg(&err, "JSON token count limit exceeded"); goto out_emit; } if (parser->bracket_count + parser->brace_count > MAX_NESTING) { error_setg(&err, "JSON nesting depth limit exceeded"); goto out_emit; } token = json_token(type, x, y, input); parser->token_size += input->len; g_queue_push_tail(&parser->tokens, token); if ((parser->brace_count > 0 || parser->bracket_count > 0) && parser->bracket_count >= 0 && parser->bracket_count >= 0) { return; } json = json_parser_parse(&parser->tokens, parser->ap, &err); out_emit: parser->brace_count = 0; parser->bracket_count = 0; json_message_free_tokens(parser); parser->token_size = 0; parser->emit(parser->opaque, json, err); }
static void gst_hls_sink2_handle_message (GstBin * bin, GstMessage * message) { GstHlsSink2 *sink = GST_HLS_SINK2_CAST (bin); switch (message->type) { case GST_MESSAGE_ELEMENT: { const GstStructure *s = gst_message_get_structure (message); if (message->src == GST_OBJECT_CAST (sink->splitmuxsink)) { if (gst_structure_has_name (s, "splitmuxsink-fragment-opened")) { g_free (sink->current_location); sink->current_location = g_strdup (gst_structure_get_string (s, "location")); gst_structure_get_clock_time (s, "running-time", &sink->current_running_time_start); } else if (gst_structure_has_name (s, "splitmuxsink-fragment-closed")) { GstClockTime running_time; gchar *entry_location; g_assert (strcmp (sink->current_location, gst_structure_get_string (s, "location")) == 0); gst_structure_get_clock_time (s, "running-time", &running_time); GST_INFO_OBJECT (sink, "COUNT %d", sink->index); if (sink->playlist_root == NULL) { entry_location = g_path_get_basename (sink->current_location); } else { gchar *name = g_path_get_basename (sink->current_location); entry_location = g_build_filename (sink->playlist_root, name, NULL); g_free (name); } gst_m3u8_playlist_add_entry (sink->playlist, entry_location, NULL, running_time - sink->current_running_time_start, sink->index++, FALSE); g_free (entry_location); gst_hls_sink2_write_playlist (sink); g_queue_push_tail (&sink->old_locations, g_strdup (sink->current_location)); while (g_queue_get_length (&sink->old_locations) > g_queue_get_length (sink->playlist->entries)) { gchar *old_location = g_queue_pop_head (&sink->old_locations); g_remove (old_location); g_free (old_location); } } } break; } case GST_MESSAGE_EOS:{ sink->playlist->end_list = TRUE; gst_hls_sink2_write_playlist (sink); break; } default: break; } GST_BIN_CLASS (parent_class)->handle_message (bin, message); }
static void gst_decklink_audio_src_got_packet (GstElement * element, IDeckLinkAudioInputPacket * packet, GstClockTime capture_time, GstClockTime packet_time, gboolean no_signal) { GstDecklinkAudioSrc *self = GST_DECKLINK_AUDIO_SRC_CAST (element); GstClockTime timestamp; GST_LOG_OBJECT (self, "Got audio packet at %" GST_TIME_FORMAT " / %" GST_TIME_FORMAT ", no signal %d", GST_TIME_ARGS (capture_time), GST_TIME_ARGS (packet_time), no_signal); g_mutex_lock (&self->input->lock); if (self->input->videosrc) { GstDecklinkVideoSrc *videosrc = GST_DECKLINK_VIDEO_SRC_CAST (gst_object_ref (self->input->videosrc)); if (videosrc->drop_no_signal_frames && no_signal) { g_mutex_unlock (&self->input->lock); return; } if (videosrc->first_time == GST_CLOCK_TIME_NONE) videosrc->first_time = packet_time; if (videosrc->skip_first_time > 0 && packet_time - videosrc->first_time < videosrc->skip_first_time) { GST_DEBUG_OBJECT (self, "Skipping frame as requested: %" GST_TIME_FORMAT " < %" GST_TIME_FORMAT, GST_TIME_ARGS (packet_time), GST_TIME_ARGS (videosrc->skip_first_time + videosrc->first_time)); g_mutex_unlock (&self->input->lock); return; } if (videosrc->output_stream_time) timestamp = packet_time; else timestamp = gst_clock_adjust_with_calibration (NULL, packet_time, videosrc->current_time_mapping.xbase, videosrc->current_time_mapping.b, videosrc->current_time_mapping.num, videosrc->current_time_mapping.den); } else { timestamp = capture_time; } g_mutex_unlock (&self->input->lock); GST_LOG_OBJECT (self, "Converted times to %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); g_mutex_lock (&self->lock); if (!self->flushing) { CapturePacket *p; while (g_queue_get_length (&self->current_packets) >= self->buffer_size) { p = (CapturePacket *) g_queue_pop_head (&self->current_packets); GST_WARNING_OBJECT (self, "Dropping old packet at %" GST_TIME_FORMAT, GST_TIME_ARGS (p->timestamp)); capture_packet_free (p); } p = (CapturePacket *) g_malloc0 (sizeof (CapturePacket)); p->packet = packet; p->timestamp = timestamp; p->no_signal = no_signal; packet->AddRef (); g_queue_push_tail (&self->current_packets, p); g_cond_signal (&self->cond); } g_mutex_unlock (&self->lock); }
/* Funcion loadDispatcher * Precondiciones: * Postcondiciones: * Entrada: * Salida: * Proceso: * */ gpointer loadDispatcher (gpointer data) { ThreadData* tData; Message* msg; Plugin* plugin; RoutingEntry* entry; GData** dPlugins; GAsyncQueue* qMessages; gint tableLength, i; GError* ioError; gchar** funcError; tData = data; dPlugins = tData->dPlugins; qMessages = tData->qMessages; tableLength = g_queue_get_length(routingTable); /* Now the dispatcher is fully functional. */ g_debug("Dispatcher up & running"); /* Keeps sending data */ while (dPlugins != NULL) { /* Gets a new message to dispatch. * Trying to pop a message is actually more efficient than * search for at least one element in the queue. * */ if ((msg = (Message*)g_async_queue_try_pop(qMessages)) != NULL) { /* Chooses a default plugin using the original message protocol. */ plugin = g_datalist_get_data(dPlugins, msg->proto); /* Checks if the message has already reached its destination. * - If it has, it will be logged into a file. * - If it has not reached its destination and there is not a * 'route' defined for it, dispatch. * - If it has not reached its destination and there is a * 'route' defined for it, dispatch it through that way. * */ if (g_str_equal(msg->dest, plugin->pluginAddress())) { ioError = NULL; /* Write to disk cache. */ if (!((g_io_channel_write_chars(msgLog, g_strconcat(msg->proto, DELIMITER, msg->src, DELIMITER, msg->data, EOL, NULL), -1, NULL, &ioError) == (G_IO_STATUS_ERROR | G_IO_STATUS_AGAIN)) && (g_io_channel_flush(msgLog, &ioError) == (G_IO_STATUS_ERROR | G_IO_STATUS_AGAIN)))) { if (ioError != NULL) { g_warning("%s: %s", CANNOTWRITEDATA, ioError->message); } else { g_warning("%s: %s", CANNOTWRITEDATA, NOERRORAVAILABLE); } } else { g_debug(MSGWRITEONLOG); } } else { i = 0; /* Search for a route in the route table. */ while (i < tableLength) { entry = g_queue_peek_nth(routingTable, i); if (g_str_equal(entry->msgProto, msg->proto) && g_pattern_match_string(entry->msgAddrPattern, msg->dest)) { plugin = g_datalist_get_data(dPlugins, entry->destProto); i = tableLength; } else { i++; } } if (!plugin->pluginSend((gpointer)msg->dest, (gpointer)msg, funcError)) { g_warning("%s: %s", CANNOTSENDDATA, *funcError); } } } else { g_usleep(WAITPERIOD); } } g_debug("End dispatching process"); return (NULL); }
static void json_message_process_token(JSONLexer *lexer, GString *input, JSONTokenType type, int x, int y) { JSONMessageParser *parser = container_of(lexer, JSONMessageParser, lexer); JSONToken *token; GQueue *tokens; switch (type) { case JSON_LCURLY: parser->brace_count++; break; case JSON_RCURLY: parser->brace_count--; break; case JSON_LSQUARE: parser->bracket_count++; break; case JSON_RSQUARE: parser->bracket_count--; break; default: break; } token = g_malloc(sizeof(JSONToken) + input->len + 1); token->type = type; memcpy(token->str, input->str, input->len); token->str[input->len] = 0; token->x = x; token->y = y; parser->token_size += input->len; g_queue_push_tail(parser->tokens, token); if (type == JSON_ERROR) { goto out_emit_bad; } else if (parser->brace_count < 0 || parser->bracket_count < 0 || (parser->brace_count == 0 && parser->bracket_count == 0)) { goto out_emit; } else if (parser->token_size > MAX_TOKEN_SIZE || g_queue_get_length(parser->tokens) > MAX_TOKEN_COUNT || parser->bracket_count + parser->brace_count > MAX_NESTING) { /* Security consideration, we limit total memory allocated per object * and the maximum recursion depth that a message can force. */ goto out_emit_bad; } return; out_emit_bad: /* * Clear out token list and tell the parser to emit an error * indication by passing it a NULL list */ json_message_free_tokens(parser); out_emit: /* send current list of tokens to parser and reset tokenizer */ parser->brace_count = 0; parser->bracket_count = 0; /* parser->emit takes ownership of parser->tokens. Remove our own * reference to parser->tokens before handing it out to parser->emit. */ tokens = parser->tokens; parser->tokens = g_queue_new(); parser->emit(parser, tokens); parser->token_size = 0; }
TorFlowManager* torflowmanager_new(gint argc, gchar* argv[], ShadowLogFunc slogf, ShadowCreateCallbackFunc scbf) { g_assert(slogf); g_assert(scbf); if(argc != 9) { slogf(SHADOW_LOG_LEVEL_WARNING, __FUNCTION__, USAGE); return NULL; } /* argv[0] is the 'program name' and should be ignored */ gchar* v3bwPath = argv[1]; gint pausetime = atoi(argv[2]); gint numWorkers = atoi(argv[3]); if(numWorkers < 1) { slogf(SHADOW_LOG_LEVEL_WARNING, __FUNCTION__, "Invalid number of torflow workers (%d). torflow will not operate.", numWorkers); return NULL; } gint slicesize = atoi(argv[4]); gdouble nodeCap = atof(argv[5]); gint hostControlPort = atoi(argv[6]); g_assert(hostControlPort <= G_MAXUINT16); // TODO log error instead in_port_t netControlPort = htons((in_port_t)hostControlPort); gint hostSocksPort = atoi(argv[7]); g_assert(hostSocksPort <= G_MAXUINT16); // TODO log error instead in_port_t netSocksPort = htons((in_port_t)hostSocksPort); /* get file server infos */ GQueue* fileservers = g_queue_new(); gchar** fsparts = g_strsplit(argv[8], ",", 0); gchar* fspart = NULL; for(gint i = 0; (fspart = fsparts[i]) != NULL; i++) { gchar** parts = g_strsplit(fspart, ":", 0); g_assert(parts[0] && parts[1]); /* the server domain name */ gchar* name = parts[0]; /* port in host order */ gchar* hostFilePortStr = parts[1]; gint hostFilePort = atoi(hostFilePortStr); g_assert(hostFilePort <= G_MAXUINT16); // TODO log error instead in_port_t netFilePort = htons((in_port_t)hostFilePort); TorFlowFileServer* fs = torflowfileserver_new(name, netFilePort); g_assert(fs); g_queue_push_tail(fileservers, fs); g_strfreev(parts); slogf(SHADOW_LOG_LEVEL_INFO, __FUNCTION__, "parsed file server %s at %s:%u", torflowfileserver_getName(fs), torflowfileserver_getHostIPStr(fs), ntohs(torflowfileserver_getNetPort(fs))); } g_strfreev(fsparts); g_assert(g_queue_get_length(fileservers) > 0); // TODO log error instead /* use epoll to asynchronously watch events for all of our sockets */ gint mainEpollDescriptor = epoll_create(1); g_assert(mainEpollDescriptor > 0); // TODO log error instead TorFlowManager* tfm = g_new0(TorFlowManager, 1); tfm->slogf = slogf; tfm->scbf = scbf; tfm->workers = numWorkers; tfm->ed = mainEpollDescriptor; tfm->slicesize = slicesize; tfm->AllRelaysByFingerprint = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, (GDestroyNotify)_torflowmanager_freeRelay); tfm->currentSlices = g_queue_new(); /* now start our controller to fetch descriptors */ tfm->baseED = epoll_create(1); g_assert(tfm->baseED > 0); // TODO log error torflowutil_epoll(tfm->ed, tfm->baseED, EPOLL_CTL_ADD, EPOLLIN, tfm->slogf); TorFlowEventCallbacks handlers; memset(&handlers, 0, sizeof(TorFlowEventCallbacks)); handlers.onBootstrapComplete = (BootstrapCompleteFunc) _torflowmanager_onBootstrapComplete; handlers.onDescriptorsReceived = (DescriptorsReceivedFunc) _torflowmanager_onDescriptorsReceived; torflowbase_init(&tfm->_base, &handlers, slogf, scbf, netControlPort, tfm->baseED, 0); torflowbase_start(&tfm->_base); /* helper to manage stat reports and create v3bw files */ tfm->tfa = torflowaggregator_new(slogf, v3bwPath, nodeCap); /* workers that will probe the relays */ tfm->probers = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, (GDestroyNotify) torflowbase_free); for(gint i = 1; i <= numWorkers; i++) { /* get the next fileserver */ TorFlowFileServer* probeFileServer = g_queue_pop_head(fileservers); TorFlowProber* prober = torflowprober_new(slogf, scbf, tfm, i, numWorkers, pausetime, netControlPort, netSocksPort, probeFileServer); g_assert(prober); // TODO log error instead /* make sure we watch the prober events on our main epoll */ gint proberED = torflow_getEpollDescriptor((TorFlow*)prober); torflowutil_epoll(tfm->ed, proberED, EPOLL_CTL_ADD, EPOLLIN, tfm->slogf); /* store the prober by its unique epoll descriptor */ g_hash_table_replace(tfm->probers, GINT_TO_POINTER(proberED), prober); /* reuse the file server in round robin fashion */ g_queue_push_tail(fileservers, probeFileServer); } /* the used file servers have been reffed by the probers; * the rest will be safely freed */ g_queue_free_full(fileservers, (GDestroyNotify)torflowfileserver_unref); tfm->slogf(SHADOW_LOG_LEVEL_MESSAGE, __FUNCTION__, "started torflow with %i workers on control port %i and socks port %i", numWorkers, hostControlPort, hostSocksPort); return tfm; }
static guint _torflowmanager_parseAndStoreRelays(TorFlowManager* tfm, GQueue* descriptorLines) { TorFlowRelay* currentRelay = NULL; while(descriptorLines != NULL && g_queue_get_length(descriptorLines) > 0) { gchar* line = g_queue_pop_head(descriptorLines); if(!line) continue; switch(line[0]) { case 'r': { gchar** parts = g_strsplit(line, " ", 4); GString* id64 = g_string_new(parts[2]); id64 = g_string_append_c(id64, '='); GString* id = torflowutil_base64ToBase16(id64); currentRelay = g_hash_table_lookup(tfm->AllRelaysByFingerprint, id->str); if(!currentRelay) { currentRelay = g_new0(TorFlowRelay, 1); currentRelay->identity = id; currentRelay->nickname = g_string_new(parts[1]); g_hash_table_replace(tfm->AllRelaysByFingerprint, id->str, currentRelay); } g_string_free(id64, TRUE); g_strfreev(parts); tfm->slogf(SHADOW_LOG_LEVEL_DEBUG, tfm->_base.id, "now getting descriptor for relay %s", currentRelay->nickname->str); break; } case 's': { if(g_strstr_len(line, -1, " Running")) { currentRelay->isRunning = TRUE; } else { currentRelay->isRunning = FALSE; } if(g_strstr_len(line, -1, " Fast")) { currentRelay->isFast = TRUE; } else { currentRelay->isFast = FALSE; } if(g_strstr_len(line, -1, " Exit")) { currentRelay->isExit = TRUE; if(g_strstr_len(line, -1, " BadExit")) { currentRelay->isRunning = FALSE; } } else { currentRelay->isExit = FALSE; } break; } case 'w': { currentRelay->descriptorBandwidth = atoi(g_strstr_len(line, -1, "Bandwidth=") + 10); /* normally we would use advertised BW, but that is not available */ currentRelay->advertisedBandwidth = currentRelay->descriptorBandwidth; break; } case '.': //meaningless; squelch break; default: tfm->slogf(SHADOW_LOG_LEVEL_MESSAGE, tfm->_base.id, "don't know what to do with response '%s'", line); break; } g_free(line); } return g_hash_table_size(tfm->AllRelaysByFingerprint); }
int dispatch(void * data, fdsession_t * fdsess, bool cache_flag) { btlsw_proto_t * pkg = reinterpret_cast<btlsw_proto_t *>(data); uint32_t len = pkg->len; uint16_t cmd = pkg->cmd; uint32_t seq = pkg->seq; int fd = fdsess->fd; uint32_t uid = pkg->id; TRACE_TLOG("dispatch[%u] sender=%u, fd=%u, seq=%u, len=%u, cache_flag=%d", cmd, uid, fd, seq, len, cache_flag); c_online *p_online = get_online_by_fd(fd); if (cmd == btlsw_online_register_cmd) { if (p_online) { /* 注册时, 发现通过fd连过来的online, 重复发来注册的包 */ ERROR_TLOG("dup reg online, fd=%d, olip=0x%X, u=%u", fdsess->fd, fdsess->remote_ip, uid); return -1; } uint32_t online_id = *((uint32_t *)pkg->body); p_online = new c_online(fdsess, online_id); add_online(p_online); DEBUG_TLOG("reg_online, fd=%d, olip=0x%X, olid=%u", fdsess->fd, fdsess->remote_ip, online_id); return 0; } if (!p_online) { /* 到此, 无论是新注册, 还是注册后协议, * 都应该有 online 了, 找不到就是有问题 */ ERROR_TLOG("nofound p_online by fd=%d, olip=0x%X, cmd=%u, u=%u", fd, fdsess->remote_ip, cmd, uid); return -1; } c_player *p_player = p_online->get_player(uid); if (cmd == btlsw_player_enter_hall_cmd) { if (p_player) { DEBUG_TLOG("player re_enter_hall, lastinfo: u=%u, role_tm=%u, olid=%u", p_player->m_role_tm, p_player->m_server_id, p_player->m_server_id); destroy_player(p_player); p_player = NULL; } uint32_t online_id = *((uint32_t *)pkg->body); uint32_t role_tm = *((uint32_t *)(pkg->body + 4)); p_player = alloc_player(p_online, uid, online_id, role_tm); } if (!p_player) { ERROR_TLOG("nofound c_player, cmd=%u, u=%u, olid=%u", cmd, uid, p_online->m_id); return -1; } if (cache_flag && p_player->m_waitcmd) { if (g_queue_get_length(p_player->m_pkg_queue) < MAX_CACHE_PKG) { DEBUG_TLOG("cache a pkg u=%u, cmd=%u, wcmd=%u", p_player->m_id, cmd, p_player->m_waitcmd); cache_a_pkg(p_player, pkg, len); return 0; } else { WARN_TLOG("too many cache pkg, u=%u, cmd=%u, wcmd=%u", p_player->m_id, cmd, p_player->m_waitcmd); return 0; } } p_player->m_waitcmd = cmd; p_player->m_seq = seq; p_player->m_ret = 0; p_player->m_last_pkg_time = get_now_tv()->tv_sec; bind_proto_cmd_t * p_cmd = NULL; if (0 != find_btlsw_cmd_bind(cmd, &p_cmd)) { ERROR_TLOG("btl sw cmdid not existed: %u", cmd); return 0; } uint32_t body_len = len - sizeof(btlsw_proto_t); bool read_ret = p_cmd->p_in->read_from_buf_ex((char *)data + sizeof(btlsw_proto_t), body_len); if (!read_ret) { ERROR_TLOG("read_from_buf_ex error cmd=%u, u=%u", cmd, p_player->m_id); return -1; } int cmd_ret = p_cmd->func(p_player, p_cmd->p_in, p_cmd->p_out, NULL); return cmd_ret; }
static void listbox_draw (WListbox * l, gboolean focused) { Widget *w = WIDGET (l); const WDialog *h = w->owner; const gboolean disabled = (w->options & W_DISABLED) != 0; const int normalc = disabled ? DISABLED_COLOR : h->color[DLG_COLOR_NORMAL]; /* *INDENT-OFF* */ int selc = disabled ? DISABLED_COLOR : focused ? h->color[DLG_COLOR_HOT_FOCUS] : h->color[DLG_COLOR_FOCUS]; /* *INDENT-ON* */ int length = 0; GList *le = NULL; int pos; int i; int sel_line = -1; if (l->list != NULL) { length = g_queue_get_length (l->list); le = g_queue_peek_nth_link (l->list, (guint) l->top); } /* pos = (le == NULL) ? 0 : g_list_position (l->list, le); */ pos = (le == NULL) ? 0 : l->top; for (i = 0; i < w->lines; i++) { const char *text = ""; /* Display the entry */ if (pos == l->pos && sel_line == -1) { sel_line = i; tty_setcolor (selc); } else tty_setcolor (normalc); widget_move (l, i, 1); if (l->list != NULL && le != NULL && (i == 0 || pos < length)) { WLEntry *e = LENTRY (le->data); text = e->text; le = g_list_next (le); pos++; } tty_print_string (str_fit_to_term (text, w->cols - 2, J_LEFT_FIT)); } l->cursor_y = sel_line; if (l->scrollbar && length > w->lines) { tty_setcolor (normalc); listbox_drawscroll (l); } }
/** * gst_bus_timed_pop_filtered: * @bus: a #GstBus to pop from * @timeout: a timeout in nanoseconds, or GST_CLOCK_TIME_NONE to wait forever * @types: message types to take into account, GST_MESSAGE_ANY for any type * * Get a message from the bus whose type matches the message type mask @types, * waiting up to the specified timeout (and discarding any messages that do not * match the mask provided). * * If @timeout is 0, this function behaves like gst_bus_pop_filtered(). If * @timeout is #GST_CLOCK_TIME_NONE, this function will block forever until a * matching message was posted on the bus. * * Returns: a #GstMessage matching the filter in @types, or NULL if no matching * message was found on the bus until the timeout expired. * The message is taken from the bus and needs to be unreffed with * gst_message_unref() after usage. * * MT safe. * * Since: 0.10.15 */ GstMessage * gst_bus_timed_pop_filtered (GstBus * bus, GstClockTime timeout, GstMessageType types) { GstMessage *message; GTimeVal *timeval, abstimeout; gboolean first_round = TRUE; g_return_val_if_fail (GST_IS_BUS (bus), NULL); g_return_val_if_fail (types != 0, NULL); g_mutex_lock (bus->queue_lock); while (TRUE) { GST_LOG_OBJECT (bus, "have %d messages", g_queue_get_length (bus->queue)); while ((message = g_queue_pop_head (bus->queue))) { GST_DEBUG_OBJECT (bus, "got message %p, %s, type mask is %u", message, GST_MESSAGE_TYPE_NAME (message), (guint) types); if ((GST_MESSAGE_TYPE (message) & types) != 0) { /* exit the loop, we have a message */ goto beach; } else { GST_DEBUG_OBJECT (bus, "discarding message, does not match mask"); gst_message_unref (message); message = NULL; } } /* no need to wait, exit loop */ if (timeout == 0) break; if (timeout == GST_CLOCK_TIME_NONE) { /* wait forever */ timeval = NULL; } else if (first_round) { glong add = timeout / 1000; if (add == 0) /* no need to wait */ break; /* make timeout absolute */ g_get_current_time (&abstimeout); g_time_val_add (&abstimeout, add); timeval = &abstimeout; first_round = FALSE; GST_DEBUG_OBJECT (bus, "blocking for message, timeout %ld", add); } else { /* calculated the absolute end time already, no need to do it again */ GST_DEBUG_OBJECT (bus, "blocking for message, again"); timeval = &abstimeout; /* fool compiler */ } if (!g_cond_timed_wait (bus->priv->queue_cond, bus->queue_lock, timeval)) { GST_INFO_OBJECT (bus, "timed out, breaking loop"); break; } else { GST_INFO_OBJECT (bus, "we got woken up, recheck for message"); } } beach: g_mutex_unlock (bus->queue_lock); return message; }
void server(session_t* session) { gchar **lines, **tokens, **chunks; char verb[VERB_SIZE], resource[RESOURCE_SIZE], protocol[PROTOCOL_SIZE]; char buffer[BUFFER_SIZE]; int selectStatus, currentReadFd, readBytes; session->q = g_queue_new(); struct timeval timer; timer.tv_sec = 30; timer.tv_usec = 0; // Setup read file descriptor set and timer for select() fd_set reader; FD_ZERO(&reader); session->read_fds = newFdSet(); // Keep track of largest file descriptor session->listener = createSocket(session->server); session->maxFd = session->listener; session->connections = g_hash_table_new (g_direct_hash, g_direct_equal); FD_SET(session->listener, &session->read_fds); // Main Loop for (;;) { reader = session->read_fds; selectStatus = select(session->maxFd + 1, &reader, NULL, NULL, &timer); if (selectStatus == -1) { fprintf(stderr, "Select failed\n"); exit(1); } // Handle timeouts else if (selectStatus == 0) { if (g_queue_get_length(session->q) > 0) { session->maxFd = closeSocket(GPOINTER_TO_INT(g_queue_pop_tail(session->q)), session); continue; } } // There's something to read for(currentReadFd = 0; currentReadFd <= session->maxFd; currentReadFd++) { if (!FD_ISSET(currentReadFd, &reader)) { continue; } if (currentReadFd == session->listener) { newConnection(session); } else { memset(buffer, '\0', BUFFER_SIZE); memset(verb, '\0', VERB_SIZE); memset(resource, '\0', RESOURCE_SIZE); memset(protocol, '\0', PROTOCOL_SIZE); readBytes = recv(currentReadFd, buffer, BUFFER_SIZE - 1, 0);; if (readBytes <= 0) { closeSocket(currentReadFd, session); continue; } chunks = g_strsplit(buffer, "\r\n\r\n", 2); lines = g_strsplit(chunks[0], "\r\n", 20); tokens = g_strsplit(lines[0], " ", 3); strncpy(verb, tokens[0], strlen(tokens[0])); strncpy(resource, tokens[1], strlen(tokens[1])); strncpy(protocol, tokens[2], strlen(tokens[2])); setSessionHeaders(session, lines); setSessionVerb(session, verb); if (session->verb == VERB_HEAD || session->verb == VERB_GET) { logToFile(session, currentReadFd, resource, verb, 200); handleRequest(session, currentReadFd, resource, NULL); } else if (session->verb == VERB_POST) { logToFile(session, currentReadFd, resource, verb, 200); handleRequest(session, currentReadFd, resource, chunks[1]); } else { logToFile(session, currentReadFd, resource, verb, 200); } gchar* connection = (gchar *) g_hash_table_lookup(session->headers, "Connection"); g_queue_remove(session->q, GINT_TO_POINTER(currentReadFd)); g_queue_push_head(session->q, GINT_TO_POINTER(currentReadFd)); if ((g_strcmp0(protocol, "HTTP/1.0") == 0 && g_strcmp0(connection, "keep-alive") != 0) || (g_strcmp0(connection, "close") == 0)) { closeSocket(currentReadFd, session); } } } } g_hash_table_destroy(session->connections); FD_ZERO(&session->read_fds); close(session->listener); }
gboolean ide_xml_find_opening_element (const GtkTextIter *start, const GtkTextIter *end, GtkTextIter *found_element_start, GtkTextIter *found_element_end) { IdeXmlElementTagType tag_type; GQueue *element_queue; guint element_queue_length = 0; gchar *element_name = NULL; g_return_val_if_fail (found_element_start != NULL, FALSE); g_return_val_if_fail (found_element_end != NULL, FALSE); tag_type = ide_xml_get_element_tag_type (start, end); if (tag_type != IDE_XML_ELEMENT_TAG_END) return FALSE; element_name = ide_xml_get_element_name (start, end); if (element_name == NULL) return FALSE; element_queue = g_queue_new(); g_queue_push_head(element_queue, element_name); while (g_queue_get_length (element_queue) > 0 && ide_xml_find_previous_element (start, found_element_start, found_element_end)) { tag_type = ide_xml_get_element_tag_type (found_element_start, found_element_end); if (tag_type == IDE_XML_ELEMENT_TAG_END) { element_name = ide_xml_get_element_name (found_element_start, found_element_end); if (element_name != NULL) g_queue_push_head(element_queue, element_name); } else if (tag_type == IDE_XML_ELEMENT_TAG_START) { element_name = ide_xml_get_element_name (found_element_start, found_element_end); if (element_name != NULL) { if(g_strcmp0 (g_queue_peek_head(element_queue), element_name) == 0) { g_free (g_queue_pop_head (element_queue)); g_free (element_name); } /*Unbalanced element.Stop parsing*/ else { g_free (element_name); goto completed; } } } start = found_element_start; } completed: element_queue_length = g_queue_get_length (element_queue); g_queue_free_full (element_queue, g_free); return element_queue_length > 0 ? FALSE : TRUE; }
static void on_pkg_config_exit (AnjutaLauncher * launcher, int child_pid, int exit_status, gulong time_taken_in_seconds, gpointer user_data) { SymbolDBSystem *sdbs; SymbolDBSystemPriv *priv; SingleScanData *ss_data; GList *cflags = NULL; ss_data = (SingleScanData *)user_data; sdbs = ss_data->sdbs; priv = sdbs->priv; /* first of all disconnect the signals */ g_signal_handlers_disconnect_by_func (launcher, on_pkg_config_exit, user_data); if (ss_data->contents != NULL && strlen (ss_data->contents) > 0) { cflags = sdb_system_get_normalized_cflags (ss_data->contents); } /* check our ss_data struct. If it has a != null callback then we should * call it right now.. */ if (ss_data->parseable_cb != NULL) { DEBUG_PRINT ("%s", "on_pkg_config_exit parseable activated"); ss_data->parseable_cb (sdbs, cflags == NULL ? FALSE : TRUE, ss_data->parseable_data); } /* no callback to call. Just parse the package on */ if (ss_data->engine_scan == TRUE && cflags != NULL) { EngineScanData *es_data; es_data = g_new0 (EngineScanData, 1); es_data->sdbs = sdbs; es_data->cflags = cflags; es_data->package_name = g_strdup (ss_data->package_name); es_data->special_abort_scan = FALSE; /* is the engine queue already full && working? */ if (g_queue_get_length (priv->engine_queue) > 0) { /* just push the tail waiting for a later processing [i.e. after * a scan-end received */ DEBUG_PRINT ("pushing on engine queue [length %d] %s", g_queue_get_length (priv->engine_queue), es_data->package_name); g_queue_push_tail (priv->engine_queue, es_data); } else { /* push the tail to signal a 'working engine' */ DEBUG_PRINT ("scanning with engine queue [length %d] %s", g_queue_get_length (priv->engine_queue), es_data->package_name); g_queue_push_tail (priv->engine_queue, es_data); sdb_system_do_engine_scan (sdbs, es_data); } } /* destroys, after popping, the ss_data from the queue */ g_queue_remove (priv->sscan_queue, ss_data); destroy_single_scan_data (ss_data); /* proceed with another scan */ sdb_system_do_scan_next_package (sdbs); }
int __ofono_sms_txq_submit(struct ofono_sms *sms, GSList *list, unsigned int flags, struct ofono_uuid *uuid, ofono_sms_txq_queued_cb_t cb, void *data) { struct message *m = NULL; struct tx_queue_entry *entry; entry = tx_queue_entry_new(list, flags); if (entry == NULL) return -ENOMEM; if (flags & OFONO_SMS_SUBMIT_FLAG_EXPOSE_DBUS) { m = message_create(&entry->uuid, sms->atom); if (m == NULL) goto err; if (message_dbus_register(m) == FALSE) goto err; message_set_data(m, entry); g_hash_table_insert(sms->messages, &entry->uuid, m); } if (list->next != NULL) { if (sms->ref == 65536) sms->ref = 1; else sms->ref = sms->ref + 1; } entry->id = sms->tx_counter++; g_queue_push_tail(sms->txq, entry); if (sms->registered && g_queue_get_length(sms->txq) == 1) sms->tx_source = g_timeout_add(0, tx_next, sms); if (uuid) memcpy(uuid, &entry->uuid, sizeof(*uuid)); if (flags & OFONO_SMS_SUBMIT_FLAG_EXPOSE_DBUS) { const char *uuid_str; unsigned char i; uuid_str = ofono_uuid_to_str(&entry->uuid); for (i = 0; i < entry->num_pdus; i++) { struct pending_pdu *pdu; pdu = &entry->pdus[i]; sms_tx_backup_store(sms->imsi, entry->id, entry->flags, uuid_str, i, pdu->pdu, pdu->pdu_len, pdu->tpdu_len); } } if (cb) cb(sms, &entry->uuid, data); if (m && (flags & OFONO_SMS_SUBMIT_FLAG_EXPOSE_DBUS)) message_emit_added(m, OFONO_MESSAGE_MANAGER_INTERFACE); return 0; err: tx_queue_entry_destroy(entry); return -EINVAL; }
static gssize socket_send_message (NiceSocket *sock, const NiceOutputMessage *message) { TcpPriv *priv = sock->priv; gssize ret; GError *gerr = NULL; gsize message_len; /* Don't try to access the socket if it had an error, otherwise we risk a * crash with SIGPIPE (Broken pipe) */ if (priv->error) return -1; message_len = output_message_get_size (message); /* First try to send the data, don't send it later if it can be sent now * this way we avoid allocating memory on every send */ if (g_queue_is_empty (&priv->send_queue)) { ret = g_socket_send_message (sock->fileno, NULL, message->buffers, message->n_buffers, NULL, 0, G_SOCKET_MSG_NONE, NULL, &gerr); if (ret < 0) { if (g_error_matches (gerr, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK) || g_error_matches (gerr, G_IO_ERROR, G_IO_ERROR_FAILED)) { /* Queue the message and send it later. */ add_to_be_sent (sock, message, 0, message_len, FALSE); ret = message_len; } g_error_free (gerr); } else if ((gsize) ret < message_len) { /* Partial send. */ add_to_be_sent (sock, message, ret, message_len, TRUE); ret = message_len; } } else { /* FIXME: This dropping will break http/socks5/etc * We probably need a way to the upper layer to control reliability */ /* If the queue is too long, drop whatever packets we can. */ if (g_queue_get_length (&priv->send_queue) >= MAX_QUEUE_LENGTH) { guint peek_idx = 0; struct to_be_sent *tbs = NULL; while ((tbs = g_queue_peek_nth (&priv->send_queue, peek_idx)) != NULL) { if (tbs->can_drop) { tbs = g_queue_pop_nth (&priv->send_queue, peek_idx); free_to_be_sent (tbs); break; } else { peek_idx++; } } } /* Queue the message and send it later. */ add_to_be_sent (sock, message, 0, message_len, FALSE); ret = message_len; } return ret; }
static cb_ret_t listbox_execute_cmd (WListbox * l, unsigned long command) { cb_ret_t ret = MSG_HANDLED; int i; Widget *w = WIDGET (l); int length; if (l->list == NULL || g_queue_is_empty (l->list)) return MSG_NOT_HANDLED; switch (command) { case CK_Up: listbox_back (l); break; case CK_Down: listbox_fwd (l); break; case CK_Top: listbox_select_first (l); break; case CK_Bottom: listbox_select_last (l); break; case CK_PageUp: for (i = 0; (i < w->lines - 1) && (l->pos > 0); i++) listbox_back (l); break; case CK_PageDown: length = g_queue_get_length (l->list); for (i = 0; i < w->lines - 1 && l->pos < length - 1; i++) listbox_fwd (l); break; case CK_Delete: if (l->deletable) { gboolean is_last, is_more; length = g_queue_get_length (l->list); is_last = (l->pos + 1 >= length); is_more = (l->top + w->lines >= length); listbox_remove_current (l); if ((l->top > 0) && (is_last || is_more)) l->top--; } break; case CK_Clear: if (l->deletable && mc_global.widget.confirm_history_cleanup /* TRANSLATORS: no need to translate 'DialogTitle', it's just a context prefix */ && (query_dialog (Q_ ("DialogTitle|History cleanup"), _("Do you want clean this history?"), D_ERROR, 2, _("&Yes"), _("&No")) == 0)) listbox_remove_list (l); break; default: ret = MSG_NOT_HANDLED; } return ret; }
static void parasite_python_shell_process_line(GtkWidget *python_shell) { ParasitePythonShellPrivate *priv = PARASITE_PYTHON_SHELL_GET_PRIVATE(python_shell); char *command = parasite_python_shell_get_input(python_shell); char last_char; parasite_python_shell_append_text(PARASITE_PYTHON_SHELL(python_shell), "\n", NULL); if (*command != '\0') { /* Save this command in the history. */ g_queue_push_head(priv->history, command); priv->cur_history_item = NULL; if (g_queue_get_length(priv->history) > MAX_HISTORY_LENGTH) g_free(g_queue_pop_tail(priv->history)); } last_char = command[MAX(0, strlen(command) - 1)]; if (last_char == ':' || last_char == '\\' || (priv->in_block && g_ascii_isspace(command[0]))) { printf("in block.. %c, %d, %d\n", last_char, priv->in_block, g_ascii_isspace(command[0])); /* This is a multi-line expression */ if (priv->pending_command == NULL) priv->pending_command = g_string_new(command); else g_string_append(priv->pending_command, command); g_string_append_c(priv->pending_command, '\n'); if (last_char == ':') priv->in_block = TRUE; } else { if (priv->pending_command != NULL) { g_string_append(priv->pending_command, command); g_string_append_c(priv->pending_command, '\n'); /* We're not actually leaking this. It's in the history. */ command = g_string_free(priv->pending_command, FALSE); } parasite_python_run(command, parasite_python_shell_log_stdout, parasite_python_shell_log_stderr, python_shell); if (priv->pending_command != NULL) { /* Now do the cleanup. */ g_free(command); priv->pending_command = NULL; priv->in_block = FALSE; } } parasite_python_shell_write_prompt(python_shell); }
int ddb_gvfs_scandir (const char *dir, struct dirent ***namelist, int (*selector) (const struct dirent *), int (*cmp) (const struct dirent **, const struct dirent **)) { GQueue *file_list = g_queue_new (); GQueue *dir_list = g_queue_new (); g_queue_push_head (dir_list, g_file_new_for_uri (dir)); GFile *gdir; while ((gdir = g_queue_pop_head (dir_list)) != NULL) { GFileEnumerator *file_enumerator = g_file_enumerate_children (gdir, G_FILE_ATTRIBUTE_STANDARD_NAME, G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS, NULL, NULL); if (file_enumerator == NULL) { g_object_unref (gdir); continue; } GFileInfo *file_info; GFile *child_file; while ((file_info = g_file_enumerator_next_file (file_enumerator, NULL, NULL)) != NULL) { child_file = g_file_get_child (gdir, g_file_info_get_name (file_info)); g_object_unref (file_info); if (g_file_query_file_type (child_file, G_FILE_QUERY_INFO_NONE, NULL) == G_FILE_TYPE_DIRECTORY) g_queue_push_head (dir_list, child_file); else { g_queue_push_tail (file_list, g_file_get_uri (child_file)); g_object_unref (child_file); } } g_file_enumerator_close (file_enumerator, NULL, NULL); g_object_unref (file_enumerator); g_object_unref (gdir); } g_queue_free (dir_list); int num_files = 0; *namelist = malloc (sizeof(void *) * g_queue_get_length (file_list)); char *fname; while ((fname = g_queue_pop_head (file_list)) != NULL) { struct dirent entry; strncpy (entry.d_name, fname, sizeof(entry.d_name) - 1); entry.d_name[sizeof(entry.d_name) - 1] = '\0'; if (selector == NULL || (selector && selector(&entry))) { (*namelist)[num_files] = calloc (1, sizeof (struct dirent)); strcpy ((*namelist)[num_files]->d_name, entry.d_name); num_files++; } g_free (fname); } g_queue_free (file_list); return num_files; }
static void _tcp_flush(TCP* tcp) { MAGIC_ASSERT(tcp); /* make sure our information is up to date */ _tcp_updateReceiveWindow(tcp); _tcp_updateSendWindow(tcp); /* flush packets that can now be sent to socket */ while(g_queue_get_length(tcp->throttledOutput) > 0) { /* get the next throttled packet, in sequence order */ Packet* packet = g_queue_pop_head(tcp->throttledOutput); /* break out if we have no packets left */ if(!packet) { break; } guint length = packet_getPayloadLength(packet); if(length > 0) { PacketTCPHeader header; packet_getTCPHeader(packet, &header); /* we cant send it if our window is too small */ gboolean fitsInWindow = (header.sequence < (tcp->send.unacked + tcp->send.window)) ? TRUE : FALSE; /* we cant send it if we dont have enough space */ gboolean fitsInBuffer = (length <= socket_getOutputBufferSpace(&(tcp->super))) ? TRUE : FALSE; if(!fitsInBuffer || !fitsInWindow) { /* we cant send the packet yet */ g_queue_push_head(tcp->throttledOutput, packet); break; } else { /* we will send: store length in virtual retransmission buffer * so we can reduce buffer space consumed when we receive the ack */ _tcp_addRetransmit(tcp, header.sequence, length); } } /* packet is sendable, we removed it from out buffer */ tcp->throttledOutputLength -= length; /* update TCP header to our current advertised window and acknowledgement */ packet_updateTCP(packet, tcp->receive.next, tcp->receive.window); /* keep track of the last things we sent them */ tcp->send.lastAcknowledgement = tcp->receive.next; tcp->send.lastWindow = tcp->receive.window; /* socket will queue it ASAP */ gboolean success = socket_addToOutputBuffer(&(tcp->super), packet); /* we already checked for space, so this should always succeed */ g_assert(success); } /* any packets now in order can be pushed to our user input buffer */ while(g_queue_get_length(tcp->unorderedInput) > 0) { Packet* packet = g_queue_pop_head(tcp->unorderedInput); PacketTCPHeader header; packet_getTCPHeader(packet, &header); if(header.sequence == tcp->receive.next) { /* move from the unordered buffer to user input buffer */ gboolean fitInBuffer = socket_addToInputBuffer(&(tcp->super), packet); if(fitInBuffer) { tcp->unorderedInputLength -= packet_getPayloadLength(packet); (tcp->receive.next)++; continue; } } /* we could not buffer it because its out of order or we have no space */ g_queue_push_head(tcp->unorderedInput, packet); break; } /* check if user needs an EOF signal */ gboolean wantsEOF = ((tcp->flags & TCPF_LOCAL_CLOSED) || (tcp->flags & TCPF_REMOTE_CLOSED)) ? TRUE : FALSE; if(wantsEOF) { /* if anyone closed, can't send anymore */ tcp->error |= TCPE_SEND_EOF; if((tcp->receive.next >= tcp->receive.end) && !(tcp->flags & TCPF_EOF_SIGNALED)) { /* user needs to read a 0 so it knows we closed */ tcp->error |= TCPE_RECEIVE_EOF; descriptor_adjustStatus((Descriptor*)tcp, DS_READABLE, TRUE); } } }