void handle_init(void) { my_window = window_create(); week_day_layer = text_layer_create(GRect(0, 10, 144, 42)); text_layer_set_background_color(week_day_layer, GColorClear); layer_add_child(window_get_root_layer(my_window), text_layer_get_layer(week_day_layer)); day_of_month_layer = text_layer_create(GRect(0,42,144,95)); text_layer_set_background_color(day_of_month_layer, GColorClear); layer_add_child(window_get_root_layer(my_window), text_layer_get_layer(day_of_month_layer)); month_layer = text_layer_create(GRect(0,110,144,58)); text_layer_set_background_color(month_layer, GColorClear); layer_add_child(window_get_root_layer(my_window), text_layer_get_layer(month_layer)); time_t now = time(NULL); set_strings(localtime(&now)); render(); inverter_layer = inverter_layer_create(GRect(0, 0, 144, 168)); layer_add_child(window_get_root_layer(my_window), inverter_layer_get_layer(inverter_layer)); window_stack_push(my_window, true); tick_timer_service_subscribe(DAY_UNIT, handle_time_change); }
void handle_time_change(struct tm *t, TimeUnits units) { set_strings(t); render(); }
static jboolean process_audio (GstElement *source, JNIEnv *env, jobject header) { /* will contain the properties we need to put into the given GstHeader */ AudioProperties *properties = NULL; /* GStreamer elements */ GstElement *pipeline = NULL; GstElement *decoder = NULL; GstElement *typefind = NULL; GstStateChangeReturn res; jboolean result = JNI_FALSE; properties = (AudioProperties *) g_malloc0 (sizeof (AudioProperties)); if (properties == NULL) { return result; } reset_properties(properties); /* * create the decoder element, this will decode the stream and retrieve * its properties. * We connect a signal to this element, to be informed when it is done * in decoding the stream and to get the needed informations about the * audio file. */ decoder = gst_element_factory_make ("decodebin", "decoder"); if (decoder == NULL) { free_properties(properties); return result; } /* now, we create a pipeline and fill it with the other elements */ pipeline = gst_pipeline_new ("pipeline"); if (pipeline == NULL) { gst_object_unref (GST_OBJECT (decoder)); free_properties(properties); return result; } g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK (new_decoded_pad), pipeline); g_signal_connect (G_OBJECT (decoder), "element-added", G_CALLBACK (element_added), properties); /* * we get the typefind from the decodebin to catch the additional properties * that the decodebin does not expose to us */ typefind = gst_bin_get_by_name (GST_BIN (decoder), "typefind"); if (typefind != NULL) { /* * NOTE: the above is not a typo, we can live without the typefind, * just, our stream detection will not be as accurate as we would. * Anyway, if this fails, there is some problem, probabily a memory * error. */ g_signal_connect (G_OBJECT (typefind), "have-type", G_CALLBACK (typefind_callback), properties); } gst_bin_add_many (GST_BIN (pipeline), source, decoder, NULL); gst_element_link (source, decoder); /* * now, we set the pipeline playing state to pause and traverse it * to get the info we need. */ res = gst_element_set_state (pipeline, GST_STATE_PAUSED); if (res == GST_STATE_CHANGE_FAILURE) { gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (GST_OBJECT (pipeline)); free_properties(properties); return result; } res = gst_element_get_state (pipeline, NULL, NULL, GST_CLOCK_TIME_NONE); if (res != GST_STATE_CHANGE_SUCCESS) { gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (GST_OBJECT (pipeline)); free_properties(properties); return result; } if (fill_info (decoder, properties)) { result = set_strings (env, properties, header); } /* free stuff */ gst_element_set_state (pipeline, GST_STATE_NULL); free_properties (properties); gst_object_unref (GST_OBJECT (pipeline)); return result; }