static struct node *cache_lookup(const char *path) { return (struct node *) g_hash_table_lookup(cache.table, path); }
static void sync_playlists (RBMediaPlayerSource *source) { RBMediaPlayerSourcePrivate *priv = MEDIA_PLAYER_SOURCE_GET_PRIVATE (source); RBMediaPlayerSourceClass *klass = RB_MEDIA_PLAYER_SOURCE_GET_CLASS (source); RBPlaylistManager *playlist_manager; RBShell *shell; GHashTable *device; GList *all_playlists; GList *l; if (klass->impl_add_playlist == NULL || klass->impl_remove_playlists == NULL) { rb_debug ("source class doesn't support playlists"); return; } /* build an updated device contents map, so we can find the device entries * corresponding to the entries in the local playlists. */ device = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)rhythmdb_entry_unref); rb_media_player_source_get_entries (source, SYNC_CATEGORY_MUSIC, device); /* remove all playlists from the device, then add the synced playlists. */ klass->impl_remove_playlists (source); /* get all local playlists */ g_object_get (source, "shell", &shell, NULL); g_object_get (shell, "playlist-manager", &playlist_manager, NULL); all_playlists = rb_playlist_manager_get_playlists (playlist_manager); g_object_unref (playlist_manager); g_object_unref (shell); for (l = all_playlists; l != NULL; l = l->next) { char *name; RBSource *playlist_source = RB_SOURCE (l->data); RhythmDBQueryModel *model; GList *tracks = NULL; GtkTreeIter iter; /* is this playlist selected for syncing? */ g_object_get (playlist_source, "name", &name, NULL); if (rb_sync_settings_group_enabled (priv->sync_settings, SYNC_CATEGORY_MUSIC, name) == FALSE) { rb_debug ("not syncing playlist %s", name); g_free (name); continue; } /* match playlist entries to entries on the device */ g_object_get (playlist_source, "base-query-model", &model, NULL); if (gtk_tree_model_get_iter_first (GTK_TREE_MODEL (model), &iter) == FALSE) { rb_debug ("not syncing empty playlist %s", name); g_free (name); g_object_unref (model); continue; } do { char *trackid; RhythmDBEntry *entry; RhythmDBEntry *device_entry; entry = rhythmdb_query_model_iter_to_entry (model, &iter); trackid = rb_sync_state_make_track_uuid (entry); device_entry = g_hash_table_lookup (device, trackid); if (device_entry != NULL) { tracks = g_list_prepend (tracks, device_entry); } else { rb_debug ("unable to find entry on device for track %s (id %s)", rhythmdb_entry_get_string (entry, RHYTHMDB_PROP_LOCATION), trackid); } g_free (trackid); } while (gtk_tree_model_iter_next (GTK_TREE_MODEL (model), &iter)); tracks = g_list_reverse (tracks); /* transfer the playlist to the device */ rb_debug ("syncing playlist %s", name); klass->impl_add_playlist (source, name, tracks); g_free (name); g_list_free (tracks); g_object_unref (model); } g_hash_table_destroy (device); }
static gboolean is_valid_tcp (const gchar *address_entry, GHashTable *key_value_pairs, GError **error) { gboolean ret; GList *keys; GList *l; const gchar *host; const gchar *port; const gchar *family; gint port_num; gchar *endp; ret = FALSE; keys = NULL; host = NULL; port = NULL; family = NULL; keys = g_hash_table_get_keys (key_value_pairs); for (l = keys; l != NULL; l = l->next) { const gchar *key = l->data; if (g_strcmp0 (key, "host") == 0) host = g_hash_table_lookup (key_value_pairs, key); else if (g_strcmp0 (key, "port") == 0) port = g_hash_table_lookup (key_value_pairs, key); else if (g_strcmp0 (key, "family") == 0) family = g_hash_table_lookup (key_value_pairs, key); else { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Unsupported key `%s' in address entry `%s'"), key, address_entry); goto out; } } if (port != NULL) { port_num = strtol (port, &endp, 10); if ((*port == '\0' || *endp != '\0') || port_num < 0 || port_num >= 65536) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Error in address `%s' - the port attribute is malformed"), address_entry); goto out; } } if (family != NULL && !(g_strcmp0 (family, "ipv4") == 0 || g_strcmp0 (family, "ipv6") == 0)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Error in address `%s' - the family attribute is malformed"), address_entry); goto out; } if (host != NULL) { /* TODO: validate host */ } ret= TRUE; out: g_list_free (keys); return ret; }
/** * snmp_bc_logsrc2rid: * @handle: Pointer to handler's data. * @src: Log's "Source" field string. * @res_info: Location to store HPI mapping data for resource * @ovr_flags: Override flags * * Translates error log's "Source" field into an HPI resource ID * and stores HPI mapping info needed by other routines in * @res_info. Assume "Source" field text is in the following format: * * "BLADE_0x" - map to blade x RID * "SWITCH_x" - map to switch x RID * * All other "Source" field text strings are mapped to the * Chassis's resource ID. * * @ovr_flags is used to indicate exception cases. The only one * currently is to indicate if the resource is an expansion card. * * Return values: * SA_OK - normal case. * SA_ERR_HPI_INVALID_PARAMS - @handle, @src, or @resinfo is NULL. **/ static SaErrorT snmp_bc_logsrc2rid(struct oh_handler_state *handle, gchar *src, LogSource2ResourceT *resinfo, unsigned short ovr_flags) { int rpt_index; guint loc; gchar **src_parts = NULL, *endptr = NULL, *root_tuple; SaErrorT err; SaHpiBoolT isblade, isexpansioncard, ischassis, isswitch; SaHpiEntityPathT ep, ep_root; SaHpiEntityTypeT entity_type; struct snmp_bc_sensor *array_ptr; if (!handle || !src || !resinfo) { dbg("Invalid parameter."); return(SA_ERR_HPI_INVALID_PARAMS); } /* Find top-level chassis entity path */ ep_init(&ep); ep_init(&ep_root); root_tuple = (gchar *)g_hash_table_lookup(handle->config, "entity_root"); string2entitypath(root_tuple, &ep_root); /* Assume chassis location/type unless another resource type is discovered */ loc = ep_root.Entry[0].EntityLocation; entity_type = ep_root.Entry[0].EntityType; /* Break down "Source" text string to find source's RPT index and location */ src_parts = g_strsplit(src, "_", -1); if (src_parts == NULL) { dbg("Cannot split Source text string."); g_strfreev(src_parts); return(SA_ERR_HPI_INTERNAL_ERROR); } /* See if resource is something other than the chassis */ isblade = isexpansioncard = isswitch = ischassis = SAHPI_FALSE; if (!strcmp(src_parts[0], "BLADE")) { /* All expansion card events are reported as blade events in Error Log */ if (ovr_flags & OVR_EXP) { isexpansioncard = SAHPI_TRUE; } else { isblade = SAHPI_TRUE; } } else { if (!strcmp(src_parts[0], "SWITCH")) { isswitch = SAHPI_TRUE; } } /* If not the chassis, find the location value from last part of log's source string */ if (isexpansioncard == SAHPI_TRUE || isblade == SAHPI_TRUE || isswitch == SAHPI_TRUE) { loc = strtoul(src_parts[1], &endptr, 10); if (isexpansioncard == SAHPI_TRUE) { rpt_index = BC_RPT_ENTRY_BLADE_ADDIN_CARD; array_ptr = &snmp_bc_blade_addin_sensors[0]; } else { if (isblade == SAHPI_TRUE) { rpt_index = BC_RPT_ENTRY_BLADE; array_ptr = &snmp_bc_blade_sensors[0]; } else { rpt_index = BC_RPT_ENTRY_SWITCH_MODULE; array_ptr = &snmp_bc_switch_sensors[0]; } } entity_type = snmp_rpt_array[rpt_index].rpt.ResourceEntity.Entry[0].EntityType; } else { ischassis = SAHPI_TRUE; rpt_index = BC_RPT_ENTRY_CHASSIS; array_ptr = &snmp_bc_chassis_sensors[0]; } g_strfreev(src_parts); /* Find rest of Entity Path and calculate RID */ err = ep_concat(&ep, &snmp_rpt_array[rpt_index].rpt.ResourceEntity); if (err) { dbg("Cannot concat Entity Path. Error=%s.", oh_lookup_error(err)); return(SA_ERR_HPI_INTERNAL_ERROR); } err = ep_concat(&ep, &ep_root); if (err) { dbg("Cannot concat Entity Path. Error=%s.", oh_lookup_error(err)); return(SA_ERR_HPI_INTERNAL_ERROR); } err = set_ep_instance(&ep, entity_type, loc); if (err) { dbg("Cannot set location. Type=%s; Location=%d; Error=%s.", oh_lookup_entitytype(entity_type), loc, oh_lookup_error(err)); return(SA_ERR_HPI_INTERNAL_ERROR); } /* Special case - if Expansion Card set location of parent blade as well */ if (isexpansioncard == SAHPI_TRUE) { err = set_ep_instance(&ep, SAHPI_ENT_SBC_BLADE, loc); if (err) { dbg("Cannot set location. Type=%s; Location=%d; Error=%s.", oh_lookup_entitytype(SAHPI_ENT_SBC_BLADE), loc, oh_lookup_error(err)); return(SA_ERR_HPI_INTERNAL_ERROR); } } /* Fill in RID and RPT table info about "Source" */ resinfo->rpt = rpt_index; resinfo->sensor_array_ptr = array_ptr; resinfo->ep = ep; resinfo->rid = oh_uid_lookup(&ep); if (resinfo->rid == 0) { dbg("No RID"); return(SA_ERR_HPI_INTERNAL_ERROR); } return(SA_OK); }
/** * g_dbus_error_encode_gerror: * @error: A #GError. * * Creates a D-Bus error name to use for @error. If @error matches * a registered error (cf. g_dbus_error_register_error()), the corresponding * D-Bus error name will be returned. * * Otherwise the a name of the form * `org.gtk.GDBus.UnmappedGError.Quark._ESCAPED_QUARK_NAME.Code_ERROR_CODE` * will be used. This allows other GDBus applications to map the error * on the wire back to a #GError using g_dbus_error_new_for_dbus_error(). * * This function is typically only used in object mappings to put a * #GError on the wire. Regular applications should not use it. * * Returns: A D-Bus error name (never %NULL). Free with g_free(). * * Since: 2.26 */ gchar * g_dbus_error_encode_gerror (const GError *error) { RegisteredError *re; gchar *error_name; g_return_val_if_fail (error != NULL, NULL); /* Ensure that e.g. G_DBUS_ERROR is registered using g_dbus_error_register_error() */ _g_dbus_initialize (); error_name = NULL; G_LOCK (error_lock); re = NULL; if (quark_code_pair_to_re != NULL) { QuarkCodePair pair; pair.error_domain = error->domain; pair.error_code = error->code; g_assert (dbus_error_name_to_re != NULL); /* check invariant */ re = g_hash_table_lookup (quark_code_pair_to_re, &pair); } if (re != NULL) { error_name = g_strdup (re->dbus_error_name); G_UNLOCK (error_lock); } else { const gchar *domain_as_string; GString *s; guint n; G_UNLOCK (error_lock); /* We can't make a lot of assumptions about what domain_as_string * looks like and D-Bus is extremely picky about error names so * hex-encode it for transport across the wire. */ domain_as_string = g_quark_to_string (error->domain); /* 0 is not a domain; neither are non-quark integers */ g_return_val_if_fail (domain_as_string != NULL, NULL); s = g_string_new ("org.gtk.GDBus.UnmappedGError.Quark._"); for (n = 0; domain_as_string[n] != 0; n++) { gint c = domain_as_string[n]; if (g_ascii_isalnum (c)) { g_string_append_c (s, c); } else { guint nibble_top; guint nibble_bottom; g_string_append_c (s, '_'); nibble_top = ((int) domain_as_string[n]) >> 4; nibble_bottom = ((int) domain_as_string[n]) & 0x0f; if (nibble_top < 10) nibble_top += '0'; else nibble_top += 'a' - 10; if (nibble_bottom < 10) nibble_bottom += '0'; else nibble_bottom += 'a' - 10; g_string_append_c (s, nibble_top); g_string_append_c (s, nibble_bottom); } } g_string_append_printf (s, ".Code%d", error->code); error_name = g_string_free (s, FALSE); } return error_name; }
/** * Scan a daap server for songs. */ static gboolean daap_get_urls_from_server (xmms_xform_t *xform, gchar *host, guint port, xmms_error_t *err) { GSList *dbid_list = NULL; GSList *song_list = NULL, *song_el; cc_item_record_t *db_data; xmms_daap_login_data_t *login_data; gchar *hash; hash = g_strdup_printf ("%s:%u", host, port); login_data = g_hash_table_lookup (login_sessions, hash); if (!login_data) { login_data = g_new0 (xmms_daap_login_data_t, 1); login_data->session_id = daap_command_login (host, port, 0, err); if (xmms_error_iserror (err)) { return FALSE; } login_data->revision_id = daap_command_update (host, port, login_data->session_id, 0); login_data->request_id = 1; login_data->logged_in = TRUE; g_hash_table_insert (login_sessions, hash, login_data); } else { login_data->revision_id = daap_command_update (host, port, login_data->session_id, 0); } dbid_list = daap_command_db_list (host, port, login_data->session_id, login_data->revision_id, 0); if (!dbid_list) { return FALSE; } /* XXX i've never seen more than one db per server out in the wild, * let's hope that never changes *wink* * just use the first db in the list */ db_data = (cc_item_record_t *) dbid_list->data; song_list = daap_command_song_list (host, port, login_data->session_id, login_data->revision_id, 0, db_data->dbid); g_slist_foreach (dbid_list, (GFunc) cc_item_record_free, NULL); g_slist_free (dbid_list); if (!song_list) { return FALSE; } for (song_el = song_list; song_el; song_el = g_slist_next (song_el)) { daap_add_song_to_list (xform, song_el->data); } g_slist_foreach (song_list, (GFunc) cc_item_record_free, NULL); g_slist_free (song_list); return TRUE; }
/** * snmp_bc_log2event: * @handle: Pointer to handler's data. * @logstr: Platform Event Log to be mapped. * @event: Pointer to put mapped event. * @isdst: Boolean to indicate of DST on on/off. * @event_enabled_ptr: * * Maps platform error log entries to HPI events. * * @isdst ("is DayLight Savings Time") parameter is a performance hack. * Design assumes the event's timestamp is the time local to the platform itself. * So instead of forcing platform accesses for each log entry to determine if * DST is in effect, the isdst parameter allows the caller to query the * hardware DST info once then make multiple translation calls. * * Return values: * SA_OK - normal case. * SA_ERR_HPI_INVALID_PARAMS - @handle, @logstr, @event, @event_enabled_ptr NULL. **/ SaErrorT snmp_bc_log2event(struct oh_handler_state *handle, gchar *logstr, SaHpiEventT *event, int isdst, int *event_enabled_ptr) { bc_sel_entry log_entry; gchar *recovery_str, *login_str; gchar root_str[SNMP_BC_MAX_SEL_ENTRY_LENGTH], search_str[SNMP_BC_MAX_SEL_ENTRY_LENGTH]; LogSource2ResourceT resinfo; SaErrorT err; SaHpiBoolT is_recovery_event, is_threshold_event; SaHpiEventT working, *event_ptr; SaHpiResourceIdT event_rid; SaHpiSeverityT event_severity; SaHpiTextBufferT thresh_read_value, thresh_trigger_value; SaHpiTimeT event_time; Xml2EventInfoT *strhash_data; struct snmp_bc_hnd *custom_handle = (struct snmp_bc_hnd *)handle->data; if (!handle || !logstr || !event || !event_enabled_ptr) { dbg("Invalid parameter."); return(SA_ERR_HPI_INVALID_PARAMS); } memset(&working, 0, sizeof(SaHpiEventT)); is_recovery_event = is_threshold_event = SAHPI_FALSE; /* Parse hardware log entry into its various components */ err = snmp_bc_parse_sel_entry(handle, logstr, &log_entry); if (err) { dbg("Cannot parse log entry=%s. Error=%s.", logstr, oh_lookup_error(err)); return(err); } /* Find default RID from log's "Source" field */ err = snmp_bc_logsrc2rid(handle, log_entry.source, &resinfo, 0); if (err) { dbg("Cannot translate %s to RID. Error=%s", log_entry.source, oh_lookup_error(err)); return(err); } /* Set dynamic event fields with default values from the log string. These may be overwritten in the code below */ event_rid = resinfo.rid; event_time = (SaHpiTimeT)mktime(&log_entry.time) * 1000000000; event_severity = log_entry.sev; /* FIXME:: Do tmp and only set if no errors - really this is a boolean ?? */ /* Assume event is enabled; unless we find out differently */ *event_enabled_ptr = 1; /********************************************************************** * For some types of events (e.g. thresholds), dynamic data is appended * to some root string. Need to find this root string, since its the * root string which is mapped in the XML to event hash table. **********************************************************************/ /* Set default search string */ strncpy(search_str, log_entry.text, SNMP_BC_MAX_SEL_ENTRY_LENGTH); /* Discover "recovery" event strings */ recovery_str = strstr(search_str, EVT_RECOVERY); if (recovery_str && (recovery_str == search_str)) { is_recovery_event = SAHPI_TRUE; memset(search_str, 0, SNMP_BC_MAX_SEL_ENTRY_LENGTH); strcpy(search_str, (log_entry.text + strlen(EVT_RECOVERY))); } /* Adjust "login" event strings - strip username */ login_str = strstr(log_entry.text, LOG_LOGIN_STRING); if (login_str) { gchar *id_str = strstr(log_entry.text, LOG_LOGIN_CHAR); if (id_str != NULL) { memset(search_str, 0, SNMP_BC_MAX_SEL_ENTRY_LENGTH); strncpy(search_str, log_entry.text, (id_str - log_entry.text)); search_str[(id_str - log_entry.text)] = '\0'; } } /* Adjust "threshold" event strings */ if (strstr(log_entry.text, LOG_THRESHOLD_VALUE_STRING)) { is_threshold_event = SAHPI_TRUE; oh_init_textbuffer(&thresh_read_value); oh_init_textbuffer(&thresh_trigger_value); err = snmp_bc_parse_threshold_str(search_str, root_str, &thresh_read_value, &thresh_trigger_value); if (err) { dbg("Cannot parse threshold string=%s.", search_str); } else { memset(search_str, 0, SNMP_BC_MAX_SEL_ENTRY_LENGTH); strcpy(search_str, root_str); } } trace("Search string=%s.", search_str); /* See if adjusted root string is in the XML to event hash table */ strhash_data = (Xml2EventInfoT *)g_hash_table_lookup(bc_xml2event_hash, search_str); if (strhash_data) { /* Handle strings that have multiple event numbers */ int dupovrovr = 0; if (strhash_data->event_dup) { strhash_data = snmp_bc_findevent4dupstr(search_str, strhash_data, &resinfo); if (strhash_data == NULL) { dbg("Cannot find valid event for duplicate string=%s and RID=%d.", search_str, resinfo.rid); if (snmp_bc_map2oem(&working, &log_entry, EVENT_NOT_MAPPED)) { dbg("Cannot map to OEM Event %s.", log_entry.text); return(SA_ERR_HPI_INTERNAL_ERROR); } goto DONE; } if (strhash_data->event_ovr & OVR_RID) { dbg("Cannot have RID override on duplicate string=%s.", search_str); dupovrovr = 1; } } /* If OVR_SEV, use BCT-level severity calculated in off-line scripts */ if (strhash_data->event_ovr & OVR_SEV) { event_severity = strhash_data->event_sev; } /* Look to see if event is mapped to an HPI entity */ event_ptr = (SaHpiEventT *)g_hash_table_lookup(custom_handle->event2hpi_hash_ptr, strhash_data->event); if (event_ptr) { /* Set static event data defined during resource discovery */ working = *event_ptr; /* Find RID */ if (strhash_data->event_ovr & OVR_EXP) { /* If OVR_EXP, find RID of expansion card */ err = snmp_bc_logsrc2rid(handle, log_entry.source, &resinfo, strhash_data->event_ovr); if (err) { dbg("Cannot translate %s to RID. Error=%s.", log_entry.source, oh_lookup_error(err)); return(err); } event_rid = resinfo.rid; } else { /* if OVR_RID, use RID from bc_resources.c */ /* (unless dup strings have OVR_RID set incorrectly) */ if ((strhash_data->event_ovr & OVR_RID) && !dupovrovr) { event_rid = event_ptr->Source; } else { /* Restore original RID calculated from error log */ working.Source = event_rid; } } /* Handle sensor events */ if (working.EventType == SAHPI_ET_SENSOR) { if (is_recovery_event) { /* FIXME:: we should read sensors on recovery events to get current * state. Currently recovery state is hardcoded in bc_resources.c * hardcoded recovery states should be removed. Set Optional bits * for current state */ /* FIXME: Assertion is not right - we shouldn't set here Originally thought this meant sensor in a fail state */ working.EventDataUnion.SensorEvent.Assertion = SAHPI_FALSE; } /* Set sensor's current/last state; see if sensor's events are disabled */ err = snmp_bc_set_previous_event_state(handle, &working, is_recovery_event, event_enabled_ptr); if (err) { dbg("Cannot set previous state for %s; Error=%s.", log_entry.text, oh_lookup_error(err)); return(SA_ERR_HPI_INTERNAL_ERROR); } if (!(*event_enabled_ptr)) { return(SA_OK); } /* Convert threshold strings into event values */ if (is_threshold_event) { /* FIXME:: Do we need to check mib.convert_snmpstr >= 0 */ /* FIXME:: Need to check IsSupported??? */ if (oh_encode_sensorreading(&thresh_read_value, working.EventDataUnion.SensorEvent.TriggerReading.Type, &working.EventDataUnion.SensorEvent.TriggerReading)) { dbg("Cannot convert trigger reading=%s for text=%s.", thresh_read_value.Data, log_entry.text); return(SA_ERR_HPI_INTERNAL_ERROR); } working.EventDataUnion.SensorEvent.OptionalDataPresent = working.EventDataUnion.SensorEvent.OptionalDataPresent | SAHPI_SOD_TRIGGER_READING; if (oh_encode_sensorreading(&thresh_trigger_value, working.EventDataUnion.SensorEvent.TriggerThreshold.Type, &working.EventDataUnion.SensorEvent.TriggerThreshold)) { dbg("Cannot convert trigger threshold=%s for text=%s.", thresh_trigger_value.Data, log_entry.text); return(SA_ERR_HPI_INTERNAL_ERROR); } working.EventDataUnion.SensorEvent.OptionalDataPresent = working.EventDataUnion.SensorEvent.OptionalDataPresent | SAHPI_SOD_TRIGGER_THRESHOLD; } } /* Handle hot-swap events */ else if (working.EventType == SAHPI_ET_HOTSWAP) { err = snmp_bc_set_previous_event_state(handle, &working, is_recovery_event, event_enabled_ptr); if (err) { dbg("Cannot set previous state for %s; Error=%s", log_entry.text, oh_lookup_error(err)); return(SA_ERR_HPI_INTERNAL_ERROR); } } else { dbg("Platform doesn't support events of type=%s.", oh_lookup_eventtype(working.EventType)); return(SA_ERR_HPI_INTERNAL_ERROR); } } /* End found mapped event */ else { /* Map to OEM Event - Log Not Mapped */ if (snmp_bc_map2oem(&working, &log_entry, EVENT_NOT_MAPPED)) { dbg("Cannot map to OEM Event %s.", log_entry.text); return(SA_ERR_HPI_INTERNAL_ERROR); } } } else { /* Map to OEM Event - String not in XML to event hash table */ if (snmp_bc_map2oem(&working, &log_entry, EVENT_NOT_ALERTABLE)) { dbg("Cannot map to OEM Event %s.", log_entry.text); return(SA_ERR_HPI_INTERNAL_ERROR); } } DONE: working.Source = event_rid; working.Timestamp = event_time; working.Severity = event_severity; memcpy((void *)event, (void *)&working, sizeof(SaHpiEventT)); return(SA_OK); }
static gboolean la_handler_service_handle_register (LAHandler *interface, GDBusMethodInvocation *invocation, const gchar *unit, NSMShutdownType shutdown_mode, guint timeout, LAHandlerService *service) { ShutdownConsumer *consumer; ShutdownClient *client; GError *error = NULL; const gchar *existing_bus_name; const gchar *existing_object_path; gchar *bus_name; gchar *object_path; g_return_val_if_fail (IS_LA_HANDLER (interface), FALSE); g_return_val_if_fail (G_IS_DBUS_METHOD_INVOCATION (invocation), FALSE); g_return_val_if_fail (unit != NULL && *unit != '\0', FALSE); g_return_val_if_fail (LA_HANDLER_IS_SERVICE (service), FALSE); if (shutdown_mode != NSM_SHUTDOWN_TYPE_NORMAL && shutdown_mode != NSM_SHUTDOWN_TYPE_FAST && shutdown_mode != (NSM_SHUTDOWN_TYPE_NORMAL | NSM_SHUTDOWN_TYPE_FAST)) { /* the shutdown mode is invalid */ DLT_LOG (la_handler_context, DLT_LOG_ERROR, DLT_STRING ("Failed to register legacy application: " "invalid shutdown mode"), DLT_INT (shutdown_mode)); la_handler_complete_register (interface, invocation); return TRUE; } /* find out if we have a shutdown client for this unit already */ client = g_hash_table_lookup (service->units_to_clients, unit); if (client != NULL) { /* there already is a shutdown client for the unit, so simply * re-register its client with the new shutdown mode and timeout */ /* extract information from the client */ existing_bus_name = shutdown_client_get_bus_name (client); existing_object_path = shutdown_client_get_object_path (client); /* temporarily store a reference to the legacy app handler service object * in the invocation object */ g_object_set_data_full (G_OBJECT (invocation), "la-handler-service", g_object_ref (service), (GDestroyNotify) g_object_unref); /* re-register the shutdown consumer with the NSM Consumer */ nsm_consumer_call_register_shutdown_client (service->nsm_consumer, existing_bus_name, existing_object_path, shutdown_mode, timeout, NULL, la_handler_service_handle_register_finish, invocation); } else { /* create a new shutdown client and consumer for the unit */ bus_name = "org.genivi.NodeStartupController1"; object_path = g_strdup_printf ("%s/%u", service->prefix, service->index); client = shutdown_client_new (bus_name, object_path, shutdown_mode, timeout); consumer = shutdown_consumer_skeleton_new (); shutdown_client_set_consumer (client, consumer); /* remember the legacy app handler service object in shutdown client */ g_object_set_data_full (G_OBJECT (client), "la-handler-service", g_object_ref (service), (GDestroyNotify) g_object_unref); /* implement the LifecycleRequest method of the shutdown consumer */ g_signal_connect (consumer, "handle-lifecycle-request", G_CALLBACK (la_handler_service_handle_consumer_lifecycle_request), client); /* associate the shutdown client with the unit name */ g_hash_table_insert (service->units_to_clients, g_strdup (unit), g_object_ref (client)); g_hash_table_insert (service->clients_to_units, g_object_ref (client), g_strdup (unit)); /* export the shutdown consumer on the bus */ g_dbus_interface_skeleton_export (G_DBUS_INTERFACE_SKELETON (consumer), service->connection, object_path, &error); if (error != NULL) { DLT_LOG (la_handler_context, DLT_LOG_ERROR, DLT_STRING ("Failed to export shutdown consumer on the bus:"), DLT_STRING (error->message)); g_error_free (error); } /* temporarily store a reference to the legacy app handler service object * in the invocation object */ g_object_set_data_full (G_OBJECT (invocation), "la-handler-service", g_object_ref (service), (GDestroyNotify) g_object_unref); /* register the shutdown consumer with the NSM Consumer */ nsm_consumer_call_register_shutdown_client (service->nsm_consumer, bus_name, object_path, shutdown_mode, timeout, NULL, la_handler_service_handle_register_finish, invocation); /* free strings and release the shutdown consumer */ g_free (object_path); g_object_unref (consumer); /* increment the counter for our shutdown consumer object paths */ service->index++; } return TRUE; }
static void manager_real_set (GdaAttributesManager *mgr, gpointer ptr, const gchar *att_name, GDestroyNotify destroy, const GValue *value, gboolean steal_value) { ObjAttrs *objattrs; g_return_if_fail (att_name); if (mgr->for_objects) g_return_if_fail (G_IS_OBJECT (ptr)); gda_mutex_lock (mgr->mutex); /* pick up the correct ObjAttrs */ objattrs = g_hash_table_lookup (mgr->obj_hash, ptr); if (!objattrs) { objattrs = g_new0 (ObjAttrs, 1); objattrs->mgr = mgr; objattrs->objects = g_slist_prepend (NULL, ptr); objattrs->values_hash = g_hash_table_new_full (attname_hash, attname_equal, (GDestroyNotify) attname_free, (GDestroyNotify) gda_value_free); g_hash_table_insert (mgr->obj_hash, ptr, objattrs); if (mgr->for_objects) g_object_weak_ref (G_OBJECT (ptr), (GWeakNotify) obj_destroyed_cb, objattrs); } if (objattrs->objects->next) { /* create another ObjAttrs specifically for @ptr */ ObjAttrs *objattrs2; objattrs2 = g_new0 (ObjAttrs, 1); objattrs2->mgr = mgr; objattrs2->objects = g_slist_prepend (NULL, ptr); objattrs2->values_hash = g_hash_table_new_full (attname_hash, attname_equal, (GDestroyNotify) attname_free, (GDestroyNotify) gda_value_free); objattrs->objects = g_slist_remove (objattrs->objects, ptr); g_hash_table_remove (mgr->obj_hash, ptr); g_hash_table_insert (mgr->obj_hash, ptr, objattrs2); if (mgr->for_objects) { g_object_weak_unref (G_OBJECT (ptr), (GWeakNotify) obj_destroyed_cb, objattrs); g_object_weak_ref (G_OBJECT (ptr), (GWeakNotify) obj_destroyed_cb, objattrs2); } CopyData cdata; cdata.to_mgr = mgr; cdata.ptr = ptr; g_hash_table_foreach (objattrs->values_hash, (GHFunc) foreach_copy_func, &cdata); objattrs = objattrs2; } /* Actually add the attribute */ if (value) { AttName *attname; attname = g_new (AttName, 1); attname->mgr = mgr; attname->att_name = (gchar*) att_name; /* NOT duplicated */ attname->att_name_destroy = destroy; if (steal_value) g_hash_table_insert (objattrs->values_hash, attname, (GValue*) value); else g_hash_table_insert (objattrs->values_hash, attname, gda_value_copy (value)); } else { AttName attname; attname.att_name = (gchar*) att_name; g_hash_table_remove (objattrs->values_hash, &attname); } if (mgr->signal_func && mgr->for_objects) mgr->signal_func ((GObject*) ptr, att_name, value, mgr->signal_data); gda_mutex_unlock (mgr->mutex); }
static void prv_remove_device(dld_upnp_t *upnp, const gchar *ip_address, const char *udn) { dld_device_t *device; unsigned int i; dld_device_context_t *context; gboolean subscribed; gboolean under_construction = FALSE; prv_device_new_ct_t *priv_t; gboolean construction_ctx = FALSE; const dleyna_task_queue_key_t *queue_id; DLEYNA_LOG_DEBUG("Enter"); device = g_hash_table_lookup(upnp->device_udn_map, udn); if (!device) { priv_t = g_hash_table_lookup(upnp->device_uc_map, udn); if (priv_t) { device = priv_t->device; under_construction = TRUE; } } if (!device) { DLEYNA_LOG_WARNING("Device not found. Ignoring"); goto on_error; } for (i = 0; i < device->contexts->len; ++i) { context = g_ptr_array_index(device->contexts, i); if (!strcmp(context->ip_address, ip_address)) break; } if (i < device->contexts->len) { subscribed = (context->bms.subscribed); if (under_construction) construction_ctx = !strcmp(context->ip_address, priv_t->ip_address); (void) g_ptr_array_remove_index(device->contexts, i); if (device->contexts->len == 0) { if (!under_construction) { DLEYNA_LOG_DEBUG( "Last Context lost. Delete device"); upnp->lost_device(device->path); g_hash_table_remove(upnp->device_udn_map, udn); } else { DLEYNA_LOG_WARNING( "Device under construction. Cancelling"); dleyna_task_processor_cancel_queue( priv_t->queue_id); } } else if (under_construction && construction_ctx) { DLEYNA_LOG_WARNING( "Device under construction. Switching context"); /* Cancel previous contruction task chain */ g_hash_table_remove(priv_t->upnp->device_uc_map, priv_t->udn); dleyna_task_queue_set_finally( priv_t->queue_id, prv_device_context_switch_end); dleyna_task_processor_cancel_queue(priv_t->queue_id); /* Create a new construction task chain */ context = dld_device_get_context(device); queue_id = prv_create_device_queue(&priv_t); prv_update_device_context(priv_t, upnp, udn, device, context->ip_address, queue_id); /* Start tasks from current construction step */ dld_device_construct(device, context, upnp->connection, upnp->interface_info, queue_id); } else if (subscribed && !device->timeout_id) { DLEYNA_LOG_DEBUG("Subscribe on new context"); device->timeout_id = g_timeout_add_seconds(1, prv_subscribe_to_service_changes, device); } } on_error: DLEYNA_LOG_DEBUG("Exit"); return; }
static void gimp_temp_progress_run (const gchar *name, gint nparams, const GimpParam *param, gint *nreturn_vals, GimpParam **return_vals) { static GimpParam values[2]; GimpProgressData *progress_data; *nreturn_vals = 1; *return_vals = values; values[0].type = GIMP_PDB_STATUS; values[0].data.d_status = GIMP_PDB_SUCCESS; progress_data = g_hash_table_lookup (gimp_progress_ht, name); if (! progress_data) { g_warning ("Can't find internal progress data"); values[0].data.d_status = GIMP_PDB_EXECUTION_ERROR; } else { GimpProgressCommand command = param[0].data.d_int32; switch (command) { case GIMP_PROGRESS_COMMAND_START: progress_data->vtable.start (param[1].data.d_string, param[2].data.d_float != 0.0, progress_data->data); break; case GIMP_PROGRESS_COMMAND_END: progress_data->vtable.end (progress_data->data); break; case GIMP_PROGRESS_COMMAND_SET_TEXT: progress_data->vtable.set_text (param[1].data.d_string, progress_data->data); break; case GIMP_PROGRESS_COMMAND_SET_VALUE: progress_data->vtable.set_value (param[2].data.d_float, progress_data->data); break; case GIMP_PROGRESS_COMMAND_PULSE: if (progress_data->vtable.pulse) progress_data->vtable.pulse (progress_data->data); else progress_data->vtable.set_value (-1, progress_data->data); break; case GIMP_PROGRESS_COMMAND_GET_WINDOW: *nreturn_vals = 2; values[1].type = GIMP_PDB_FLOAT; if (progress_data->vtable.get_window) values[1].data.d_float = (gdouble) progress_data->vtable.get_window (progress_data->data); else values[1].data.d_float = 0; break; default: values[0].data.d_status = GIMP_PDB_CALLING_ERROR; break; } } }
static gboolean mpp_write_project (MrpParser *parser) { xmlNodePtr node, child, calendars_node; GList *list, *l; GList *assignments = NULL; MrpGroup *default_group = NULL; NodeEntry *entry; MrpCalendar *root_calendar; node = xmlNewDocNode (parser->doc, NULL, "project", NULL); parser->doc->xmlRootNode = node; mpp_write_property_specs (parser, node); mpp_write_custom_properties (parser, node, MRP_OBJECT (parser->project)); mpp_write_phases (parser, node); /* Write calendars */ calendars_node = xmlNewChild (node, NULL, "calendars", NULL); child = xmlNewChild (calendars_node, NULL, "day-types", NULL); mpp_write_day (parser, child, mrp_day_get_work ()); mpp_write_day (parser, child, mrp_day_get_nonwork ()); mpp_write_day (parser, child, mrp_day_get_use_base ()); for (l = mrp_day_get_all (parser->project); l; l = l->next) { mpp_write_day (parser, child, MRP_DAY (l->data)); } /* Get the calendars */ root_calendar = mrp_project_get_root_calendar (parser->project); for (l = mrp_calendar_get_children (root_calendar); l; l = l->next) { mpp_write_calendar (parser, calendars_node, l->data); } /* Write project properties now that we have the calendar id. */ mpp_write_project_properties (parser, node); /* Write tasks. */ child = xmlNewChild (node, NULL, "tasks",NULL); entry = g_new0 (NodeEntry, 1); entry->id = 0; entry->node = child; g_hash_table_insert (parser->task_hash, parser->root_task, entry); /* Generate IDs and hash table. */ parser->last_id = 1; mrp_project_task_traverse (parser->project, parser->root_task, (MrpTaskTraverseFunc) mpp_hash_insert_task_cb, parser); mrp_project_task_traverse (parser->project, parser->root_task, (MrpTaskTraverseFunc) mpp_write_task_cb, parser); /* Write resource groups. */ child = xmlNewChild (node, NULL, "resource-groups",NULL); list = mrp_project_get_groups (parser->project); /* Generate IDs and hash table. */ parser->last_id = 1; for (l = list; l; l = l->next) { mpp_hash_insert_group (parser, l->data); } g_object_get (parser->project, "default-group", &default_group, NULL); if (default_group) { entry = g_hash_table_lookup (parser->group_hash, default_group); mpp_xml_set_int (child, "default_group", entry->id); } for (l = list; l; l = l->next) { mpp_write_group (parser, child, l->data); } /* Write resources. */ child = xmlNewChild (node, NULL, "resources",NULL); list = mrp_project_get_resources (parser->project); /* Generate IDs and hash table. */ parser->last_id = 1; for (l = list; l; l = l->next) { GList *r_list; mpp_hash_insert_resource (parser, l->data); r_list = mrp_resource_get_assignments (MRP_RESOURCE (l->data)); assignments = g_list_concat (assignments, g_list_copy (r_list)); } for (l = list; l; l = l->next) { mpp_write_resource (parser, child, l->data); } /* Write assignments. */ child = xmlNewChild (node, NULL, "allocations", NULL); for (l = assignments; l; l = l->next) { mpp_write_assignment (parser, child, l->data); } g_list_free (assignments); return TRUE; }
static void mpp_write_resource (MrpParser *parser, xmlNodePtr parent, MrpResource *resource) { xmlNodePtr node; gchar *name, *short_name, *email; gchar *note; gint type, units; gfloat std_rate; /*, ovt_rate;*/ NodeEntry *group_entry; NodeEntry *resource_entry; MrpGroup *group; MrpCalendar *calendar; gint id; g_return_if_fail (MRP_IS_RESOURCE (resource)); node = xmlNewChild (parent, NULL, "resource", NULL); mrp_object_get (MRP_OBJECT (resource), "name", &name, "short_name", &short_name, "email", &email, "type", &type, "units", &units, "group", &group, "cost", &std_rate, "note", ¬e, /*"cost-overtime", &ovt_rate,*/ NULL); group_entry = g_hash_table_lookup (parser->group_hash, group); /* FIXME: should group really be able to be NULL? Should always * be default group? */ if (group_entry != NULL) { mpp_xml_set_int (node, "group", group_entry->id); } resource_entry = g_hash_table_lookup (parser->resource_hash, resource); mpp_xml_set_int (node, "id", resource_entry->id); xmlSetProp (node, "name", name); xmlSetProp (node, "short-name", short_name); mpp_xml_set_int (node, "type", type); mpp_xml_set_int (node, "units", units); xmlSetProp (node, "email", email); xmlSetProp (node, "note", note); mpp_xml_set_float (node, "std-rate", std_rate); /*mpp_xml_set_float (node, "ovt-rate", ovt_rate);*/ calendar = mrp_resource_get_calendar (resource); if (calendar) { id = GPOINTER_TO_INT (g_hash_table_lookup (parser->calendar_hash, calendar)); if (id) { mpp_xml_set_int (node, "calendar", id); } } mpp_write_custom_properties (parser, node, MRP_OBJECT (resource)); g_free (name); g_free (short_name); g_free (email); g_free (note); }
static gboolean mpp_write_task_cb (MrpTask *task, MrpParser *parser) { MrpTask *parent; NodeEntry *entry; xmlNodePtr node, parent_node; gchar *name; gchar *note; mrptime start, finish, work_start; MrpConstraint *constraint; gint duration; gint work; gint complete; gint priority; MrpTaskType type; MrpTaskSched sched; GList *predecessors, *l; /* Don't want the root task. */ if (task == parser->root_task) { return FALSE; } parent = mrp_task_get_parent (task); entry = g_hash_table_lookup (parser->task_hash, parent); parent_node = entry->node; node = xmlNewChild (parent_node, NULL, "task", NULL); entry = g_hash_table_lookup (parser->task_hash, task); entry->node = node; g_object_get (task, "name", &name, "note", ¬e, "start", &start, "finish", &finish, "duration", &duration, "work", &work, "constraint", &constraint, "percent-complete", &complete, "priority", &priority, "type", &type, "sched", &sched, NULL); work_start = mrp_task_get_work_start (task); if (type == MRP_TASK_TYPE_MILESTONE) { finish = start; work = 0; duration = 0; } mpp_xml_set_int (node, "id", entry->id); xmlSetProp (node, "name", name); xmlSetProp (node, "note", note); mpp_xml_set_int (node, "work", work); mpp_xml_set_int (node, "duration", duration); mpp_xml_set_date (node, "start", start); mpp_xml_set_date (node, "end", finish); mpp_xml_set_date (node, "work-start", work_start); mpp_xml_set_int (node, "percent-complete", complete); mpp_xml_set_int (node, "priority", priority); mpp_xml_set_task_type (node, "type", type); mpp_xml_set_task_sched (node, "scheduling", sched); mpp_write_custom_properties (parser, node, MRP_OBJECT (task)); mpp_write_constraint (node, constraint); predecessors = mrp_task_get_predecessor_relations (task); if (predecessors != NULL) { node = xmlNewChild (node, NULL, "predecessors", NULL); for (l = predecessors; l; l = l->next) { mpp_write_predecessor (parser, node, l->data); } } g_free (name); g_free (note); return FALSE; }
/* method to parse standard tags for each item element */ static itemPtr atom10_parse_entry (feedParserCtxtPtr ctxt, xmlNodePtr cur) { NsHandler *nsh; parseItemTagFunc pf; atom10ElementParserFunc func; static GHashTable *entryElementHash = NULL; if (!entryElementHash) { entryElementHash = g_hash_table_new (g_str_hash, g_str_equal); g_hash_table_insert (entryElementHash, "author", &atom10_parse_entry_author); g_hash_table_insert (entryElementHash, "category", &atom10_parse_entry_category); g_hash_table_insert (entryElementHash, "content", &atom10_parse_entry_content); g_hash_table_insert (entryElementHash, "contributor", &atom10_parse_entry_contributor); g_hash_table_insert (entryElementHash, "id", &atom10_parse_entry_id); g_hash_table_insert (entryElementHash, "link", &atom10_parse_entry_link); g_hash_table_insert (entryElementHash, "published", &atom10_parse_entry_published); g_hash_table_insert (entryElementHash, "rights", &atom10_parse_entry_rights); /* FIXME: Parse "source" */ g_hash_table_insert (entryElementHash, "summary", &atom10_parse_entry_summary); g_hash_table_insert (entryElementHash, "title", &atom10_parse_entry_title); g_hash_table_insert (entryElementHash, "updated", &atom10_parse_entry_updated); } ctxt->item = item_new (); cur = cur->xmlChildrenNode; while (cur) { if (cur->type != XML_ELEMENT_NODE || cur->name == NULL || cur->ns == NULL) { cur = cur->next; continue; } if ((cur->ns->href && (nsh = (NsHandler *)g_hash_table_lookup (ns_atom10_ns_uri_table, (gpointer)cur->ns->href))) || (cur->ns->prefix && (nsh = (NsHandler *)g_hash_table_lookup (atom10_nstable, (gpointer)cur->ns->prefix)))) { pf = nsh->parseItemTag; if (pf) (*pf) (ctxt, cur); cur = cur->next; continue; } /* check namespace of this tag */ if (!cur->ns->href) { /* This is an invalid feed... no idea what to do with the current element */ debug1 (DEBUG_PARSING, "element with no namespace found in atom feed (%s)!", cur->name); cur = cur->next; continue; } if (xmlStrcmp(cur->ns->href, ATOM10_NS)) { debug1(DEBUG_PARSING, "unknown namespace %s found!", cur->ns->href); cur = cur->next; continue; } /* At this point, the namespace must be the Atom 1.0 namespace */ func = g_hash_table_lookup (entryElementHash, cur->name); if (func) { (*func) (cur, ctxt, NULL); } else { debug1 (DEBUG_PARSING, "unknown entry element \"%s\" found", cur->name); } cur = cur->next; } /* after parsing we fill the infos into the itemPtr structure */ ctxt->item->readStatus = FALSE; if (0 == ctxt->item->time) ctxt->item->time = ctxt->feed->time; return ctxt->item; }
/** * pango_win32_font_cache_loadw: * @cache: a #PangoWin32FontCache * @logfont: a pointer to a LOGFONTW structure describing the font to load. * * Creates a HFONT from a LOGFONTW. The * result may be newly loaded, or it may have been previously * stored * * Return value: The font structure, or %NULL if the font could * not be loaded. In order to free this structure, you must call * pango_win32_font_cache_unload(). * * Since: 1.16 **/ HFONT pango_win32_font_cache_loadw (PangoWin32FontCache *cache, const LOGFONTW *lfp) { CacheEntry *entry; LOGFONTW lf; HFONT hfont; int tries; g_return_val_if_fail (cache != NULL, NULL); g_return_val_if_fail (lfp != NULL, NULL); entry = g_hash_table_lookup (cache->forward, lfp); if (entry) { g_atomic_int_inc (&entry->ref_count); PING (("increased refcount for cache entry %p: %d", entry->hfont, entry->ref_count)); } else { BOOL font_smoothing; lf = *lfp; SystemParametersInfo (SPI_GETFONTSMOOTHING, 0, &font_smoothing, 0); /* If on XP or better, try to use ClearType if the global system * settings ask for it. */ if (font_smoothing && (_pango_win32_os_version_info.dwMajorVersion > 5 || (_pango_win32_os_version_info.dwMajorVersion == 5 && _pango_win32_os_version_info.dwMinorVersion >= 1))) { UINT smoothing_type; #ifndef SPI_GETFONTSMOOTHINGTYPE #define SPI_GETFONTSMOOTHINGTYPE 0x200a #endif #ifndef FE_FONTSMOOTHINGCLEARTYPE #define FE_FONTSMOOTHINGCLEARTYPE 2 #endif #ifndef CLEARTYPE_QUALITY #define CLEARTYPE_QUALITY 5 #endif SystemParametersInfo (SPI_GETFONTSMOOTHINGTYPE, 0, &smoothing_type, 0); lf.lfQuality = (font_smoothing ? (smoothing_type == FE_FONTSMOOTHINGCLEARTYPE ? CLEARTYPE_QUALITY : ANTIALIASED_QUALITY) : DEFAULT_QUALITY); } else lf.lfQuality = (font_smoothing ? ANTIALIASED_QUALITY : DEFAULT_QUALITY); lf.lfCharSet = DEFAULT_CHARSET; for (tries = 0; ; tries++) { PING (("... trying CreateFontIndirect " "height=%ld,width=%ld,escapement=%ld,orientation=%ld," "weight=%ld,%s%s%s" "charset=%d,outprecision=%d,clipprecision=%d," "quality=%d,pitchandfamily=%#.02x,facename=\"%S\")", lf.lfHeight, lf.lfWidth, lf.lfEscapement, lf.lfOrientation, lf.lfWeight, (lf.lfItalic ? "italic," : ""), (lf.lfUnderline ? "underline," : ""), (lf.lfStrikeOut ? "strikeout," : ""), lf.lfCharSet, lf.lfOutPrecision, lf.lfClipPrecision, lf.lfQuality, lf.lfPitchAndFamily, lf.lfFaceName)); hfont = CreateFontIndirectW (&lf); if (hfont != NULL) { PING (("Success! hfont=%p", hfont)); break; } /* If we fail, try some similar fonts often found on Windows. */ if (tries == 0) { gchar *p = g_utf16_to_utf8 (lf.lfFaceName, -1, NULL, NULL, NULL); if (!p) ; /* Nothing */ else if (g_ascii_strcasecmp (p, "helvetica") == 0) wcscpy (lf.lfFaceName, L"arial"); else if (g_ascii_strcasecmp (p, "new century schoolbook") == 0) wcscpy (lf.lfFaceName, L"century schoolbook"); else if (g_ascii_strcasecmp (p, "courier") == 0) wcscpy (lf.lfFaceName, L"courier new"); else if (g_ascii_strcasecmp (p, "lucida") == 0) wcscpy (lf.lfFaceName, L"lucida sans unicode"); else if (g_ascii_strcasecmp (p, "lucidatypewriter") == 0) wcscpy (lf.lfFaceName, L"lucida console"); else if (g_ascii_strcasecmp (p, "times") == 0) wcscpy (lf.lfFaceName, L"times new roman"); g_free (p); } else if (tries == 1) { gchar *p = g_utf16_to_utf8 (lf.lfFaceName, -1, NULL, NULL, NULL); if (!p) ; /* Nothing */ else if (g_ascii_strcasecmp (p, "courier") == 0) { wcscpy (lf.lfFaceName, L""); lf.lfPitchAndFamily |= FF_MODERN; } else if (g_ascii_strcasecmp (p, "times new roman") == 0) { wcscpy (lf.lfFaceName, L""); lf.lfPitchAndFamily |= FF_ROMAN; } else if (g_ascii_strcasecmp (p, "helvetica") == 0 || g_ascii_strcasecmp (p, "lucida") == 0) { wcscpy (lf.lfFaceName, L""); lf.lfPitchAndFamily |= FF_SWISS; } else { wcscpy (lf.lfFaceName, L""); lf.lfPitchAndFamily = (lf.lfPitchAndFamily & 0x0F) | FF_DONTCARE; } g_free (p); } else break; tries++; } if (!hfont) return NULL; entry = g_slice_new (CacheEntry); entry->logfontw = lf; entry->hfont = hfont; entry->ref_count = 1; entry->mru = NULL; g_hash_table_insert (cache->forward, &entry->logfontw, entry); g_hash_table_insert (cache->back, entry->hfont, entry); } if (entry->mru) { if (cache->mru_count > 1 && entry->mru->prev) { /* Move to the head of the mru list */ if (entry->mru == cache->mru_tail) { cache->mru_tail = cache->mru_tail->prev; cache->mru_tail->next = NULL; } else { entry->mru->prev->next = entry->mru->next; entry->mru->next->prev = entry->mru->prev; } entry->mru->next = cache->mru; entry->mru->prev = NULL; cache->mru->prev = entry->mru; cache->mru = entry->mru; } } else { g_atomic_int_inc (&entry->ref_count); /* Insert into the mru list */ if (cache->mru_count == CACHE_SIZE) { CacheEntry *old_entry = cache->mru_tail->data; cache->mru_tail = cache->mru_tail->prev; cache->mru_tail->next = NULL; g_list_free_1 (old_entry->mru); old_entry->mru = NULL; cache_entry_unref (cache, old_entry); } else cache->mru_count++; cache->mru = g_list_prepend (cache->mru, entry); if (!cache->mru_tail) cache->mru_tail = cache->mru; entry->mru = cache->mru; } return entry->hfont; }
/* reads a Atom feed URL and returns a new channel structure (even if the feed could not be read) */ static void atom10_parse_feed (feedParserCtxtPtr ctxt, xmlNodePtr cur) { NsHandler *nsh; parseChannelTagFunc pf; atom10ElementParserFunc func; static GHashTable *feedElementHash = NULL; if(!feedElementHash) { feedElementHash = g_hash_table_new (g_str_hash, g_str_equal); g_hash_table_insert (feedElementHash, "author", &atom10_parse_feed_author); g_hash_table_insert (feedElementHash, "category", &atom10_parse_feed_category); g_hash_table_insert (feedElementHash, "contributor", &atom10_parse_feed_contributor); g_hash_table_insert (feedElementHash, "generator", &atom10_parse_feed_generator); g_hash_table_insert (feedElementHash, "icon", &atom10_parse_feed_icon); g_hash_table_insert (feedElementHash, "id", &atom10_parse_feed_id); g_hash_table_insert (feedElementHash, "link", &atom10_parse_feed_link); g_hash_table_insert (feedElementHash, "logo", &atom10_parse_feed_logo); g_hash_table_insert (feedElementHash, "rights", &atom10_parse_feed_rights); g_hash_table_insert (feedElementHash, "subtitle", &atom10_parse_feed_subtitle); g_hash_table_insert (feedElementHash, "title", &atom10_parse_feed_title); g_hash_table_insert (feedElementHash, "updated", &atom10_parse_feed_updated); } while (TRUE) { if (xmlStrcmp (cur->name, BAD_CAST"feed")) { g_string_append (ctxt->feed->parseErrors, "<p>Could not find Atom 1.0 header!</p>"); break; } /* parse feed contents */ cur = cur->xmlChildrenNode; while (cur) { if (!cur->name || cur->type != XML_ELEMENT_NODE || !cur->ns) { cur = cur->next; continue; } /* check if supported namespace should handle the current tag by trying to determine a namespace handler */ nsh = NULL; if (cur->ns->href) nsh = (NsHandler *)g_hash_table_lookup (ns_atom10_ns_uri_table, (gpointer)cur->ns->href); if (cur->ns->prefix && !nsh) nsh = (NsHandler *)g_hash_table_lookup (atom10_nstable, (gpointer)cur->ns->prefix); if(nsh) { pf = nsh->parseChannelTag; if(pf) (*pf)(ctxt, cur); cur = cur->next; continue; } /* check namespace of this tag */ if (!cur->ns->href) { /* This is an invalid feed... no idea what to do with the current element */ debug1 (DEBUG_PARSING, "element with no namespace found in atom feed (%s)!", cur->name); cur = cur->next; continue; } if (xmlStrcmp (cur->ns->href, ATOM10_NS)) { debug1 (DEBUG_PARSING, "unknown namespace %s found in atom feed!", cur->ns->href); cur = cur->next; continue; } /* At this point, the namespace must be the Atom 1.0 namespace */ func = g_hash_table_lookup (feedElementHash, cur->name); if (func) { (*func) (cur, ctxt, NULL); } else if (xmlStrEqual (cur->name, BAD_CAST"entry")) { ctxt->item = atom10_parse_entry (ctxt, cur); if (ctxt->item) ctxt->items = g_list_append (ctxt->items, ctxt->item); } cur = cur->next; } /* FIXME: Maybe check to see that the required information was actually provided (persuant to the RFC). */ /* after parsing we fill in the infos into the feedPtr structure */ break; } }
static void mount_add (TrackerStorage *storage, GMount *mount) { TrackerStoragePrivate *priv; GFile *root; GVolume *volume; gchar *mount_name, *mount_path, *uuid; gboolean is_optical = FALSE; gboolean is_removable = FALSE; /* Get mount name */ mount_name = g_mount_get_name (mount); /* Get root path of the mount */ root = g_mount_get_root (mount); mount_path = g_file_get_path (root); g_debug ("Found '%s' mounted on path '%s'", mount_name, mount_path); /* Do not process shadowed mounts! */ if (g_mount_is_shadowed (mount)) { g_debug (" Skipping shadowed mount '%s'", mount_name); g_object_unref (root); g_free (mount_path); g_free (mount_name); return; } priv = TRACKER_STORAGE_GET_PRIVATE (storage); /* fstab partitions may not have corresponding * GVolumes, so volume may be NULL */ volume = g_mount_get_volume (mount); if (volume) { /* GMount with GVolume */ /* Try to get UUID from the Volume. * Note that g_volume_get_uuid() is NOT equivalent */ uuid = g_volume_get_identifier (volume, G_VOLUME_IDENTIFIER_KIND_UUID); if (!uuid) { gchar *content_type; gboolean is_multimedia; gboolean is_blank; /* Optical discs usually won't have UUID in the GVolume */ content_type = mount_guess_content_type (mount, &is_optical, &is_multimedia, &is_blank); is_removable = TRUE; /* We don't index content which is video, music or blank */ if (!is_multimedia && !is_blank) { uuid = g_compute_checksum_for_string (G_CHECKSUM_MD5, mount_name, -1); g_debug (" No UUID, generated:'%s' (based on mount name)", uuid); g_debug (" Assuming GVolume has removable media, if wrong report a bug! " "content type is '%s'", content_type); } else { g_debug (" Being ignored because mount with volume is music/video/blank " "(content type:%s, optical:%s, multimedia:%s, blank:%s)", content_type, is_optical ? "yes" : "no", is_multimedia ? "yes" : "no", is_blank ? "yes" : "no"); } g_free (content_type); } else { /* Any other removable media will have UUID in the * GVolume. Note that this also may include some * partitions in the machine which have GVolumes * associated to the GMounts. We also check a drive * exists to be sure the device is local. */ GDrive *drive; drive = g_volume_get_drive (volume); if (drive) { /* We can't mount/unmount system volumes, so tag * them as non removable. */ is_removable = g_volume_can_mount (volume); g_debug (" Found mount with volume and drive which %s be mounted: " "Assuming it's %s removable, if wrong report a bug!", is_removable ? "can" : "cannot", is_removable ? "" : "not"); g_object_unref (drive); } else { /* Note: not sure when this can happen... */ g_debug (" Mount with volume but no drive, " "assuming not a removable device, " "if wrong report a bug!"); is_removable = FALSE; } } g_object_unref (volume); } else { /* GMount without GVolume. * Note: Never found a case where this g_mount_get_uuid() returns * non-NULL... :-) */ uuid = g_mount_get_uuid (mount); if (!uuid) { if (mount_path) { gchar *content_type; gboolean is_multimedia; gboolean is_blank; content_type = mount_guess_content_type (mount, &is_optical, &is_multimedia, &is_blank); /* Note: for GMounts without GVolume, is_blank should NOT be considered, * as it may give unwanted results... */ if (!is_multimedia) { uuid = g_compute_checksum_for_string (G_CHECKSUM_MD5, mount_path, -1); g_debug (" No UUID, generated:'%s' (based on mount path)", uuid); } else { g_debug (" Being ignored because mount is music/video " "(content type:%s, optical:%s, multimedia:%s)", content_type, is_optical ? "yes" : "no", is_multimedia ? "yes" : "no"); } g_free (content_type); } else { g_debug (" Being ignored because mount has no GVolume (i.e. not user mountable) " "and has no mount root path available"); } } } /* If we got something to be used as UUID, then add the mount * to the TrackerStorage */ if (uuid && mount_path && !g_hash_table_lookup (priv->mounts_by_uuid, uuid)) { g_debug (" Adding mount point with UUID: '%s', removable: %s, optical: %s, path: '%s'", uuid, is_removable ? "yes" : "no", is_optical ? "yes" : "no", mount_path); mount_add_new (storage, uuid, mount_path, mount_name, is_removable, is_optical); } else { g_debug (" Skipping mount point with UUID: '%s', path: '%s', already managed: '%s'", uuid ? uuid : "none", mount_path ? mount_path : "none", (uuid && g_hash_table_lookup (priv->mounts_by_uuid, uuid)) ? "yes" : "no"); } g_free (mount_name); g_free (mount_path); g_free (uuid); g_object_unref (root); }
static gboolean xmms_daap_init (xmms_xform_t *xform) { gint dbid; GSList *dbid_list = NULL; xmms_daap_data_t *data; xmms_daap_login_data_t *login_data; xmms_error_t err; const gchar *url; const gchar *metakey; gchar *command, *hash; guint filesize; g_return_val_if_fail (xform, FALSE); url = xmms_xform_indata_get_str (xform, XMMS_STREAM_TYPE_URL); g_return_val_if_fail (url, FALSE); data = g_new0 (xmms_daap_data_t, 1); xmms_error_reset (&err); if (!get_data_from_url (url, &(data->host), &(data->port), &command, &err)) { return FALSE; } hash = g_strdup_printf ("%s:%u", data->host, data->port); login_data = g_hash_table_lookup (login_sessions, hash); if (!login_data) { XMMS_DBG ("creating login data for %s", hash); login_data = g_new0 (xmms_daap_login_data_t, 1); login_data->request_id = 1; login_data->logged_in = TRUE; login_data->session_id = daap_command_login (data->host, data->port, login_data->request_id, &err); if (xmms_error_iserror (&err)) { return FALSE; } g_hash_table_insert (login_sessions, hash, login_data); } login_data->revision_id = daap_command_update (data->host, data->port, login_data->session_id, login_data->request_id); dbid_list = daap_command_db_list (data->host, data->port, login_data->session_id, login_data->revision_id, login_data->request_id); if (!dbid_list) { return FALSE; } /* XXX: see XXX in the browse function above */ dbid = ((cc_item_record_t *) dbid_list->data)->dbid; /* want to request a stream, but don't read the data yet */ data->channel = daap_command_init_stream (data->host, data->port, login_data->session_id, login_data->revision_id, login_data->request_id, dbid, command, &filesize); if (! data->channel) { return FALSE; } login_data->request_id++; metakey = XMMS_MEDIALIB_ENTRY_PROPERTY_SIZE; xmms_xform_metadata_set_int (xform, metakey, filesize); xmms_xform_private_data_set (xform, data); xmms_xform_outdata_type_add (xform, XMMS_STREAM_TYPE_MIMETYPE, "application/octet-stream", XMMS_STREAM_TYPE_END); g_slist_foreach (dbid_list, (GFunc) cc_item_record_free, NULL); g_slist_free (dbid_list); g_free (command); return TRUE; }
static DBusMessage *vpn_notify(struct connman_task *task, DBusMessage *msg, void *user_data) { struct vpn_provider *provider = user_data; struct vpn_data *data; struct vpn_driver_data *vpn_driver_data; const char *name; int state, index, err; data = vpn_provider_get_data(provider); name = vpn_provider_get_driver_name(provider); if (!name) { DBG("Cannot find VPN driver for provider %p", provider); vpn_provider_set_state(provider, VPN_PROVIDER_STATE_FAILURE); return NULL; } vpn_driver_data = g_hash_table_lookup(driver_hash, name); if (!vpn_driver_data) { DBG("Cannot find VPN driver data for name %s", name); vpn_provider_set_state(provider, VPN_PROVIDER_STATE_FAILURE); return NULL; } state = vpn_driver_data->vpn_driver->notify(msg, provider); DBG("provider %p driver %s state %d", provider, name, state); switch (state) { case VPN_STATE_CONNECT: case VPN_STATE_READY: if (data->state == VPN_STATE_READY) { /* * This is the restart case, in which case we must * just set the IP address. * * We need to remove first the old address, just * replacing the old address will not work as expected * because the old address will linger in the interface * and not disapper so the clearing is needed here. * * Also the state must change, otherwise the routes * will not be set properly. */ vpn_provider_set_state(provider, VPN_PROVIDER_STATE_CONNECT); vpn_provider_clear_address(provider, AF_INET); vpn_provider_clear_address(provider, AF_INET6); vpn_provider_change_address(provider); vpn_provider_set_state(provider, VPN_PROVIDER_STATE_READY); break; } index = vpn_provider_get_index(provider); vpn_provider_ref(provider); data->watch = vpn_rtnl_add_newlink_watch(index, vpn_newlink, provider); err = connman_inet_ifup(index); if (err < 0) { if (err == -EALREADY) /* * So the interface is up already, that is just * great. Unfortunately in this case the * newlink watch might not have been called at * all. We must manually call it here so that * the provider can go to ready state and the * routes are setup properly. */ vpn_newlink(IFF_UP, 0, provider); else DBG("Cannot take interface %d up err %d/%s", index, -err, strerror(-err)); } break; case VPN_STATE_UNKNOWN: case VPN_STATE_IDLE: case VPN_STATE_DISCONNECT: case VPN_STATE_FAILURE: vpn_provider_set_state(provider, VPN_PROVIDER_STATE_DISCONNECT); break; case VPN_STATE_AUTH_FAILURE: vpn_provider_indicate_error(provider, VPN_PROVIDER_ERROR_AUTH_FAILED); break; } return NULL; }
static Xml2EventInfoT *snmp_bc_findevent4dupstr(gchar *search_str, Xml2EventInfoT *strhash_data, LogSource2ResourceT *resinfo) { gchar dupstr[SNMP_BC_MAX_SEL_ENTRY_LENGTH]; Xml2EventInfoT *dupstr_hash_data; short strnum; strncpy(dupstr, search_str, SNMP_BC_MAX_SEL_ENTRY_LENGTH); dupstr_hash_data = strhash_data; strnum = strhash_data->event_dup + 1; /* Original string plus dups */ while (strnum && (dupstr_hash_data != NULL)) { int i,j; gchar *normalized_event; /* Search sensor array for the duplicate string's event */ for (i=0; (resinfo->sensor_array_ptr + i)->sensor.Num != 0; i++) { for (j=0; (resinfo->sensor_array_ptr + i)->sensor_info.event_array[j].event != NULL; j++) { normalized_event = snmp_derive_objid(resinfo->ep, (resinfo->sensor_array_ptr + i)->sensor_info.event_array[j].event); if (!strcmp(dupstr_hash_data->event, normalized_event)) { g_free(normalized_event); return dupstr_hash_data; } g_free(normalized_event); } } /* Search resource array for the duplicate string's event */ for (i=0; snmp_rpt_array[resinfo->rpt].res_info.event_array[i].event != NULL; i++) { normalized_event = snmp_derive_objid(resinfo->ep, snmp_rpt_array[resinfo->rpt].res_info.event_array[i].event); if (!strcmp(dupstr_hash_data->event, normalized_event)) { g_free(normalized_event); return dupstr_hash_data; } g_free(normalized_event); } /* Find next duplicate string */ strnum--; if (strnum) { gchar strnum_str[OH_MAX_LOCATION_DIGITS]; gchar *tmpstr; snprintf(strnum_str, OH_MAX_LOCATION_DIGITS, "%d", strnum); tmpstr = g_strconcat(search_str, HPIDUP_STRING, strnum_str, NULL); strncpy(dupstr, tmpstr, SNMP_BC_MAX_SEL_ENTRY_LENGTH); g_free(tmpstr); /* FIXME Fix for RSA */ dupstr_hash_data = (Xml2EventInfoT *)g_hash_table_lookup(bc_xml2event_hash, dupstr); if (dupstr_hash_data == NULL) { dbg("Cannot find duplicate string=%s.", dupstr); } } } return NULL; }
static int vpn_connect(struct vpn_provider *provider, vpn_provider_connect_cb_t cb, const char *dbus_sender, void *user_data) { struct vpn_data *data = vpn_provider_get_data(provider); struct vpn_driver_data *vpn_driver_data; const char *name; int ret = 0; enum vpn_state state = VPN_STATE_UNKNOWN; if (data) state = data->state; DBG("data %p state %d", data, state); switch (state) { case VPN_STATE_UNKNOWN: data = g_try_new0(struct vpn_data, 1); if (!data) return -ENOMEM; data->provider = vpn_provider_ref(provider); data->watch = 0; data->flags = 0; data->task = NULL; vpn_provider_set_data(provider, data); /* fall through */ case VPN_STATE_DISCONNECT: case VPN_STATE_IDLE: case VPN_STATE_FAILURE: case VPN_STATE_AUTH_FAILURE: data->state = VPN_STATE_IDLE; break; case VPN_STATE_CONNECT: return -EINPROGRESS; case VPN_STATE_READY: return -EISCONN; } name = vpn_provider_get_driver_name(provider); if (!name) return -EINVAL; vpn_driver_data = g_hash_table_lookup(driver_hash, name); if (!vpn_driver_data || !vpn_driver_data->vpn_driver) { ret = -EINVAL; goto exist_err; } if (vpn_driver_data->vpn_driver->flags != VPN_FLAG_NO_TUN) { ret = vpn_create_tun(provider); if (ret < 0) goto exist_err; } data->task = connman_task_create(vpn_driver_data->program); if (!data->task) { ret = -ENOMEM; stop_vpn(provider); goto exist_err; } if (connman_task_set_notify(data->task, "notify", vpn_notify, provider)) { ret = -ENOMEM; stop_vpn(provider); connman_task_destroy(data->task); data->task = NULL; goto exist_err; } ret = vpn_driver_data->vpn_driver->connect(provider, data->task, data->if_name, cb, dbus_sender, user_data); if (ret < 0 && ret != -EINPROGRESS) { stop_vpn(provider); connman_task_destroy(data->task); data->task = NULL; goto exist_err; } DBG("%s started with dev %s", vpn_driver_data->provider_driver.name, data->if_name); data->state = VPN_STATE_CONNECT; return -EINPROGRESS; exist_err: vpn_provider_set_index(provider, -1); vpn_provider_set_data(provider, NULL); vpn_provider_unref(data->provider); g_free(data->if_name); g_free(data); return ret; }
} acse_ctx_oid_table = g_hash_table_new(acse_ctx_oid_hash, acse_ctx_oid_equal); } static void register_ctx_id_and_oid(packet_info *pinfo _U_, guint32 idx, char *oid) { acse_ctx_oid_t *aco, *tmpaco; aco=wmem_new(wmem_file_scope(), acse_ctx_oid_t); aco->ctx_id=idx; aco->oid=wmem_strdup(wmem_file_scope(), oid); /* if this ctx already exists, remove the old one first */ tmpaco=(acse_ctx_oid_t *)g_hash_table_lookup(acse_ctx_oid_table, aco); if(tmpaco){ g_hash_table_remove(acse_ctx_oid_table, tmpaco); } g_hash_table_insert(acse_ctx_oid_table, aco, aco); } static char * find_oid_by_ctx_id(packet_info *pinfo _U_, guint32 idx) { acse_ctx_oid_t aco, *tmpaco; aco.ctx_id=idx; tmpaco=(acse_ctx_oid_t *)g_hash_table_lookup(acse_ctx_oid_table, &aco); if(tmpaco){ return tmpaco->oid; } return NULL;
static struct oh_handler *new_handler(GHashTable *handler_config) { /* Return a new oh_handler instance */ struct oh_plugin *plugin = NULL; struct oh_handler *handler = NULL; static unsigned int handler_id = 1; unsigned int *hidp; char *hid_strp; if (!handler_config) { dbg("ERROR creating new handler. Invalid parameter."); return NULL; } handler = (struct oh_handler *)g_malloc0(sizeof(struct oh_handler)); if (!handler) { dbg("Out of Memory!"); return NULL; } hidp = (unsigned int *)g_malloc(sizeof(unsigned int)); if (!hidp) { dbg("Out of Memory!"); g_free(handler); return NULL; } hid_strp = strdup("handler-id"); if (!hid_strp) { dbg("Out of Memory!"); g_free(handler); g_free(hidp); return NULL; } plugin = oh_get_plugin((char *)g_hash_table_lookup(handler_config, "plugin")); if(!plugin) { dbg("Attempt to create handler for unknown plugin %s", (char *)g_hash_table_lookup(handler_config, "plugin")); goto cleanexit; } /* Initialize handler */ handler->abi = plugin->abi; plugin->handler_count++; /* Increment # of handlers using the plugin */ oh_release_plugin(plugin); g_static_rec_mutex_lock(&oh_handlers.lock); handler->id = handler_id++; g_static_rec_mutex_unlock(&oh_handlers.lock); *hidp = handler->id; g_hash_table_insert(handler_config, (gpointer)hid_strp,(gpointer)hidp); handler->plugin_name = (char *)g_hash_table_lookup(handler_config, "plugin"); handler->config = handler_config; handler->dids = NULL; handler->refcount = 0; g_static_rec_mutex_init(&handler->lock); g_static_rec_mutex_init(&handler->refcount_lock); return handler; cleanexit: g_free(hidp); g_free(hid_strp); g_free(handler); return NULL; }
static guint find_data_offsets(const gchar *buffer, gsize size, GPtrArray *ezdfile, GError **error) { EZDSection *dataset, *section; GString *grkey; guint required_size = 0; gint ngroups, nchannels, i, j, k; guint ndata = 0; gchar *p; /* Sanity check */ if (!ezdfile->len) { err_NO_DATA(error); return 0; } dataset = (EZDSection*)g_ptr_array_index(ezdfile, 0); if (strcmp(dataset->name, "DataSet")) { g_set_error(error, GWY_MODULE_FILE_ERROR, GWY_MODULE_FILE_ERROR_DATA, _("First section isn't DataSet")); return 0; } if (!(p = g_hash_table_lookup(dataset->meta, "GroupCount")) || (ngroups = atol(p)) <= 0) { err_INVALID(error, _("GroupCount in [DataSet]")); return 0; } /* Scan groups */ grkey = g_string_new(NULL); for (i = 0; i < ngroups; i++) { g_string_printf(grkey, "Gr%d-Count", i); if (!(p = g_hash_table_lookup(dataset->meta, grkey->str))) { g_warning("No count for group %u", i); continue; } if ((nchannels = atol(p)) <= 0) continue; /* Scan channels inside a group, note it's OK there's less channels * than specified */ for (j = 0; j < nchannels; j++) { g_string_printf(grkey, "Gr%d-Ch%d", i, j); if (!(p = g_hash_table_lookup(dataset->meta, grkey->str))) continue; section = NULL; for (k = 1; k < ezdfile->len; k++) { section = (EZDSection*)g_ptr_array_index(ezdfile, k); if (gwy_strequal(section->name, p)) break; } if (!section) { g_warning("Cannot find section for %s", p); continue; } /* Compute data position */ gwy_debug("Data %s at offset %u from data start", grkey->str, required_size); gwy_debug("xres = %d, yres = %d, bpp = %d, z-name = %s", section->xres, section->yres, section->bitdepth, section->zrange.name); if (section->yres < 2) { gwy_debug("Skipping 1D data Gr%d-Ch%d. FIXME.", i, j); continue; } ndata++; section->data = buffer + required_size; required_size += section->xres * section->yres * (section->bitdepth/8); if (required_size > size) { g_warning("Truncated file, %s doesn't fit", grkey->str); g_string_free(grkey, TRUE); section->data = NULL; return 0; } section->group = i; section->channel = j; } } g_string_free(grkey, TRUE); if (!ndata) err_NO_DATA(error); return ndata; }
/** * dfu_target_download_element: **/ static gboolean dfu_target_download_element (DfuTarget *target, DfuElement *element, DfuTargetTransferFlags flags, GCancellable *cancellable, GError **error) { DfuTargetPrivate *priv = GET_PRIVATE (target); DfuSector *sector; GBytes *bytes; guint i; guint nr_chunks; guint dfuse_sector_offset = 0; guint last_sector_id = G_MAXUINT; guint old_percentage = G_MAXUINT; guint16 transfer_size = dfu_device_get_transfer_size (priv->device); g_autoptr(GError) error_local = NULL; /* ST uses wBlockNum=0 for DfuSe commands and wBlockNum=1 is reserved */ if (dfu_device_has_dfuse_support (priv->device)) dfuse_sector_offset = 2; /* round up as we have to transfer incomplete blocks */ bytes = dfu_element_get_contents (element); nr_chunks = ceil ((gdouble) g_bytes_get_size (bytes) / (gdouble) transfer_size); if (nr_chunks == 0) { g_set_error_literal (error, DFU_ERROR, DFU_ERROR_INVALID_FILE, "zero-length firmware"); return FALSE; } for (i = 0; i < nr_chunks + 1; i++) { gsize length; gsize offset; guint percentage; g_autoptr(GBytes) bytes_tmp = NULL; /* caclulate the offset into the element data */ offset = i * transfer_size; /* for DfuSe devices we need to handle the erase and setting * the address manually */ if (dfu_device_has_dfuse_support (priv->device)) { /* check the sector with this element address is suitable */ sector = dfu_target_get_sector_for_addr (target, offset); if (sector == NULL) { g_set_error (error, DFU_ERROR, DFU_ERROR_INVALID_DEVICE, "no memory sector at 0x%04x", (guint) offset); return FALSE; } if (!dfu_sector_has_cap (sector, DFU_SECTOR_CAP_WRITEABLE)) { g_set_error (error, DFU_ERROR, DFU_ERROR_INVALID_DEVICE, "memory sector at 0x%04x is not writable", (guint) offset); return FALSE; } /* if it's erasable and not yet blanked */ if (!dfu_sector_has_cap (sector, DFU_SECTOR_CAP_ERASEABLE) && g_hash_table_lookup (priv->sectors_erased, sector) == NULL) { g_debug ("erasing DfuSe address at 0x%04x", (guint) offset); if (!dfu_target_erase_address (target, offset, cancellable, error)) return FALSE; g_hash_table_insert (priv->sectors_erased, sector, GINT_TO_POINTER (1)); } /* manually set the sector address */ if (dfu_sector_get_id (sector) != last_sector_id) { g_debug ("setting DfuSe address to 0x%04x", (guint) offset); if (!dfu_target_set_address (target, offset, cancellable, error)) return FALSE; last_sector_id = dfu_sector_get_id (sector); } } /* we have to write one final zero-sized chunk for EOF */ if (i < nr_chunks) { length = g_bytes_get_size (bytes) - offset; if (length > transfer_size) length = transfer_size; bytes_tmp = g_bytes_new_from_bytes (bytes, offset, length); } else { bytes_tmp = g_bytes_new (NULL, 0); } g_debug ("writing #%04x chunk of size %" G_GSIZE_FORMAT, i, g_bytes_get_size (bytes_tmp)); if (!dfu_target_download_chunk (target, i + dfuse_sector_offset, bytes_tmp, cancellable, error)) return FALSE; /* update UI */ percentage = (offset * 100) / g_bytes_get_size (bytes); if (percentage != old_percentage) { g_signal_emit (target, signals[SIGNAL_PERCENTAGE_CHANGED], 0, percentage); } /* give the target a chance to update */ g_usleep (dfu_device_get_download_timeout (priv->device) * 1000); /* getting the status moves the state machine to DNLOAD-IDLE */ if (!dfu_device_refresh (priv->device, cancellable, error)) return FALSE; } /* verify */ if (flags & DFU_TARGET_TRANSFER_FLAG_VERIFY) { GBytes *bytes_tmp; g_autoptr(DfuElement) element_tmp = NULL; element_tmp = dfu_target_upload_element (target, dfu_element_get_address (element), g_bytes_get_size (bytes), cancellable, error); if (element_tmp == NULL) return FALSE; bytes_tmp = dfu_element_get_contents (element_tmp); if (g_bytes_compare (bytes_tmp, bytes) != 0) { g_autofree gchar *bytes_cmp_str = NULL; bytes_cmp_str = _g_bytes_compare_verbose (bytes_tmp, bytes); g_set_error (error, DFU_ERROR, DFU_ERROR_VERIFY_FAILED, "verify failed: %s", bytes_cmp_str); return FALSE; } } return TRUE; }
static gboolean is_valid_unix (const gchar *address_entry, GHashTable *key_value_pairs, GError **error) { gboolean ret; GList *keys; GList *l; const gchar *path; const gchar *tmpdir; const gchar *abstract; ret = FALSE; keys = NULL; path = NULL; tmpdir = NULL; abstract = NULL; keys = g_hash_table_get_keys (key_value_pairs); for (l = keys; l != NULL; l = l->next) { const gchar *key = l->data; if (g_strcmp0 (key, "path") == 0) path = g_hash_table_lookup (key_value_pairs, key); else if (g_strcmp0 (key, "tmpdir") == 0) tmpdir = g_hash_table_lookup (key_value_pairs, key); else if (g_strcmp0 (key, "abstract") == 0) abstract = g_hash_table_lookup (key_value_pairs, key); else { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Unsupported key `%s' in address entry `%s'"), key, address_entry); goto out; } } if (path != NULL) { if (tmpdir != NULL || abstract != NULL) goto meaningless; } else if (tmpdir != NULL) { if (path != NULL || abstract != NULL) goto meaningless; } else if (abstract != NULL) { if (path != NULL || tmpdir != NULL) goto meaningless; } else { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Address `%s' is invalid (need exactly one of path, tmpdir or abstract keys)"), address_entry); goto out; } ret= TRUE; goto out; meaningless: g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Meaningless key/value pair combination in address entry `%s'"), address_entry); out: g_list_free (keys); return ret; }
/** * process_kqueue_notifications: * @gioc: unused. * @cond: unused. * @data: unused. * * Processes notifications, coming from the kqueue thread. * * Reads notifications from the command file descriptor, emits the * "changed" event on the appropriate monitor. * * A typical GIO Channel callback function. * * Returns: %TRUE **/ static gboolean process_kqueue_notifications (GIOChannel *gioc, GIOCondition cond, gpointer data) { struct kqueue_notification n; kqueue_sub *sub = NULL; GFileMonitor *monitor = NULL; GFileMonitorEvent mask = 0; g_assert (kqueue_socket_pair[0] != -1); if (!_ku_read (kqueue_socket_pair[0], &n, sizeof (struct kqueue_notification))) { KH_W ("Failed to read a kqueue notification, error %d", errno); return TRUE; } G_LOCK (hash_lock); sub = (kqueue_sub *) g_hash_table_lookup (subs_hash_table, GINT_TO_POINTER (n.fd)); G_UNLOCK (hash_lock); if (sub == NULL) { KH_W ("Got a notification for a deleted or non-existing subscription %d", n.fd); return TRUE; } monitor = G_FILE_MONITOR (sub->user_data); g_assert (monitor != NULL); if (n.flags & (NOTE_DELETE | NOTE_REVOKE)) { if (sub->deps) { dl_free (sub->deps); sub->deps = NULL; } _km_add_missing (sub); if (!(n.flags & NOTE_REVOKE)) { /* Note that NOTE_REVOKE is issued by the kqueue thread * on EV_ERROR kevent. In this case, a file descriptor is * already closed from the kqueue thread, no need to close * it manually */ _kh_cancel_sub (sub); } } if (sub->is_dir && n.flags & (NOTE_WRITE | NOTE_EXTEND)) { _kh_dir_diff (sub, monitor); n.flags &= ~(NOTE_WRITE | NOTE_EXTEND); } if (n.flags) { gboolean done = FALSE; mask = convert_kqueue_events_to_gio (n.flags, &done); if (done == TRUE) { GFile *file = g_file_new_for_path (sub->filename); g_file_monitor_emit_event (monitor, file, NULL, mask); g_object_unref (file); } } return TRUE; }
/* TODO: Declare an extension point called GDBusTransport (or similar) * and move code below to extensions implementing said extension * point. That way we can implement a D-Bus transport over X11 without * making libgio link to libX11... */ static GIOStream * g_dbus_address_connect (const gchar *address_entry, const gchar *transport_name, GHashTable *key_value_pairs, GCancellable *cancellable, GError **error) { GIOStream *ret; GSocketConnectable *connectable; const gchar *nonce_file; connectable = NULL; ret = NULL; nonce_file = NULL; if (FALSE) { } #ifdef G_OS_UNIX else if (g_strcmp0 (transport_name, "unix") == 0) { const gchar *path; const gchar *abstract; path = g_hash_table_lookup (key_value_pairs, "path"); abstract = g_hash_table_lookup (key_value_pairs, "abstract"); if ((path == NULL && abstract == NULL) || (path != NULL && abstract != NULL)) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Error in address `%s' - the unix transport requires exactly one of the " "keys `path' or `abstract' to be set"), address_entry); } else if (path != NULL) { connectable = G_SOCKET_CONNECTABLE (g_unix_socket_address_new (path)); } else if (abstract != NULL) { connectable = G_SOCKET_CONNECTABLE (g_unix_socket_address_new_with_type (abstract, -1, G_UNIX_SOCKET_ADDRESS_ABSTRACT)); } else { g_assert_not_reached (); } } #endif else if (g_strcmp0 (transport_name, "tcp") == 0 || g_strcmp0 (transport_name, "nonce-tcp") == 0) { const gchar *s; const gchar *host; glong port; gchar *endp; gboolean is_nonce; is_nonce = (g_strcmp0 (transport_name, "nonce-tcp") == 0); host = g_hash_table_lookup (key_value_pairs, "host"); if (host == NULL) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Error in address `%s' - the host attribute is missing or malformed"), address_entry); goto out; } s = g_hash_table_lookup (key_value_pairs, "port"); if (s == NULL) s = "0"; port = strtol (s, &endp, 10); if ((*s == '\0' || *endp != '\0') || port < 0 || port >= 65536) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Error in address `%s' - the port attribute is missing or malformed"), address_entry); goto out; } if (is_nonce) { nonce_file = g_hash_table_lookup (key_value_pairs, "noncefile"); if (nonce_file == NULL) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Error in address `%s' - the noncefile attribute is missing or malformed"), address_entry); goto out; } } /* TODO: deal with family key/value-pair */ connectable = g_network_address_new (host, port); } else if (g_strcmp0 (address_entry, "autolaunch:") == 0) { gchar *autolaunch_address; autolaunch_address = get_session_address_platform_specific (error); if (autolaunch_address != NULL) { ret = g_dbus_address_try_connect_one (autolaunch_address, NULL, cancellable, error); g_free (autolaunch_address); goto out; } else { g_prefix_error (error, _("Error auto-launching: ")); } } else { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Unknown or unsupported transport `%s' for address `%s'"), transport_name, address_entry); } if (connectable != NULL) { GSocketClient *client; GSocketConnection *connection; g_assert (ret == NULL); client = g_socket_client_new (); connection = g_socket_client_connect (client, connectable, cancellable, error); g_object_unref (connectable); g_object_unref (client); if (connection == NULL) goto out; ret = G_IO_STREAM (connection); if (nonce_file != NULL) { gchar nonce_contents[16 + 1]; size_t num_bytes_read; FILE *f; /* be careful to read only 16 bytes - we also check that the file is only 16 bytes long */ f = fopen (nonce_file, "rb"); if (f == NULL) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Error opening nonce file `%s': %s"), nonce_file, g_strerror (errno)); g_object_unref (ret); ret = NULL; goto out; } num_bytes_read = fread (nonce_contents, sizeof (gchar), 16 + 1, f); if (num_bytes_read != 16) { if (num_bytes_read == 0) { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Error reading from nonce file `%s': %s"), nonce_file, g_strerror (errno)); } else { g_set_error (error, G_IO_ERROR, G_IO_ERROR_INVALID_ARGUMENT, _("Error reading from nonce file `%s', expected 16 bytes, got %d"), nonce_file, (gint) num_bytes_read); } g_object_unref (ret); ret = NULL; fclose (f); goto out; } fclose (f); if (!g_output_stream_write_all (g_io_stream_get_output_stream (ret), nonce_contents, 16, NULL, cancellable, error)) { g_prefix_error (error, _("Error writing contents of nonce file `%s' to stream:"), nonce_file); g_object_unref (ret); ret = NULL; goto out; } } } out: return ret; }
/*! * \internal * \brief Create a new remote stonith op * \param client, he local stonith client id that initaited the operation * \param request, The request from the client that started the operation * \param peer, Is this operation owned by another stonith peer? Operations * owned by other peers are stored on all the stonith nodes, but only the * owner executes the operation. All the nodes get the results to the operation * once the owner finishes executing it. */ void * create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer) { remote_fencing_op_t *op = NULL; xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE); if (remote_op_list == NULL) { remote_op_list = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_remote_op); } /* If this operation is owned by another node, check to make * sure we haven't already created this operation. */ if (peer && dev) { const char *op_id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID); CRM_CHECK(op_id != NULL, return NULL); op = g_hash_table_lookup(remote_op_list, op_id); if (op) { crm_debug("%s already exists", op_id); return op; } } op = calloc(1, sizeof(remote_fencing_op_t)); crm_element_value_int(request, F_STONITH_TIMEOUT, (int *)&(op->base_timeout)); if (peer && dev) { op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID); } else { op->id = crm_generate_uuid(); } g_hash_table_replace(remote_op_list, op->id, op); CRM_LOG_ASSERT(g_hash_table_lookup(remote_op_list, op->id) != NULL); crm_trace("Created %s", op->id); op->state = st_query; op->replies_expected = fencing_active_peers(); op->action = crm_element_value_copy(dev, F_STONITH_ACTION); op->originator = crm_element_value_copy(dev, F_STONITH_ORIGIN); op->delegate = crm_element_value_copy(dev, F_STONITH_DELEGATE); /* May not be set */ op->created = time(NULL); if (op->originator == NULL) { /* Local or relayed request */ op->originator = strdup(stonith_our_uname); } CRM_LOG_ASSERT(client != NULL); if (client) { op->client_id = strdup(client); } op->client_name = crm_element_value_copy(request, F_STONITH_CLIENTNAME); op->target = crm_element_value_copy(dev, F_STONITH_TARGET); op->request = copy_xml(request); /* TODO: Figure out how to avoid this */ crm_element_value_int(request, F_STONITH_CALLOPTS, (int *)&(op->call_options)); crm_element_value_int(request, F_STONITH_CALLID, (int *)&(op->client_callid)); crm_trace("%s new stonith op: %s - %s of %s for %s", (peer && dev) ? "Recorded" : "Generated", op->id, op->action, op->target, op->client_name); if (op->call_options & st_opt_cs_nodeid) { int nodeid = crm_atoi(op->target, NULL); crm_node_t *node = crm_get_peer(nodeid, NULL); /* Ensure the conversion only happens once */ op->call_options &= ~st_opt_cs_nodeid; if (node && node->uname) { free(op->target); op->target = strdup(node->uname); } else { crm_warn("Could not expand nodeid '%s' into a host name (%p)", op->target, node); } } /* check to see if this is a duplicate operation of another in-flight operation */ merge_duplicates(op); return op; }