static void byzanz_recorder_constructed (GObject *object) { ByzanzRecorder *recorder = BYZANZ_RECORDER (object); g_sequence_append (recorder->layers, g_object_new (BYZANZ_TYPE_LAYER_WINDOW, "recorder", recorder, NULL)); g_sequence_append (recorder->layers, g_object_new (BYZANZ_TYPE_LAYER_CURSOR, "recorder", recorder, NULL)); if (G_OBJECT_CLASS (byzanz_recorder_parent_class)->constructed) G_OBJECT_CLASS (byzanz_recorder_parent_class)->constructed (object); }
/* Parses power supplies on the current system. */ extern power_supply* power_supply_parse(power_supply* ps) { GDir* dir = NULL; const gchar* tmp; GString* filename = g_string_sized_new(STRING_LEN); guint len = 0; gchar* contents; if (g_file_test(SYS_ACPI_PATH, G_FILE_TEST_IS_DIR)) { dir = g_dir_open(SYS_ACPI_PATH, 0, NULL); if (dir != NULL) { while ((tmp = g_dir_read_name(dir)) != NULL) { g_string_append(filename, SYS_ACPI_PATH); g_string_append(filename, tmp); g_string_append_c(filename, G_DIR_SEPARATOR); len = filename->len; g_string_append(filename, SYS_ACPI_TYPE_FILE); if (g_file_test(filename->str, G_FILE_TEST_IS_REGULAR)) { g_file_get_contents(filename->str, &contents, 0, NULL); g_string_truncate(filename, len); g_string_append(filename, SYS_ACPI_UEVENT_FILE); if (strcmp(SYS_ACPI_TYPE_AC, contents) == 0) { ac* tmp = ac_new(g_strdup(filename->str)); ac_parse(tmp); g_sequence_append(ps->ac_list, tmp); } else if (strcmp(SYS_ACPI_TYPE_BAT, contents) == 0) { bat* tmp = bat_new(g_strdup(filename->str)); bat_parse(tmp); g_sequence_append(ps->bat_list, tmp); } else { g_fprintf(stderr, "unsupported power supply type %s", contents); } g_free(contents); } g_string_truncate(filename, 0); } g_dir_close(dir); } } g_string_free(filename, TRUE); if (DEBUG) { g_sequence_foreach(ps->ac_list, &ac_print, NULL); g_sequence_foreach(ps->bat_list, &bat_print, NULL); } return ps; }
static void photos_base_manager_default_add_object (PhotosBaseManager *self, GObject *object) { PhotosBaseManagerPrivate *priv; GObject *old_object; GSequenceIter *iter; PhotosBaseManagerObjectData *object_data; const gchar *id; guint position; priv = photos_base_manager_get_instance_private (self); id = photos_filterable_get_id (PHOTOS_FILTERABLE (object)); old_object = photos_base_manager_get_object_by_id (self, id); if (old_object != NULL) return; if (priv->sort_func == NULL) { position = photos_base_manager_get_objects_count (self); iter = g_sequence_append (priv->sequence, g_object_ref (object)); } else { iter = g_sequence_insert_sorted (priv->sequence, g_object_ref (object), priv->sort_func, priv->sort_data); position = g_sequence_iter_get_position (iter); } object_data = photos_base_manager_object_data_new (object, iter); g_hash_table_insert (priv->objects, g_strdup (id), object_data); photos_base_manager_objects_changed (self, position, 0, 1); g_signal_emit (self, signals[OBJECT_ADDED], 0, object); }
void xml_node_add_tag(XmlNode * node, char * key, char * value) { XmlKeyValue * kv = g_slice_alloc(sizeof(XmlKeyValue)); kv -> key = g_strdup(key); kv -> value = g_strdup(value); g_sequence_append(node -> tags, kv); }
/* * Used by Extreme Binning. */ static struct segment* segment_file_defined(struct chunk *c) { static struct segment* tmp; /* * For file-defined segmenting, * the end is not a new segment. */ if (tmp == NULL) tmp = new_segment(); if (c == NULL) return tmp; g_sequence_append(tmp->chunks, c); if (CHECK_CHUNK(c, CHUNK_FILE_END)) { struct segment* ret = tmp; tmp = NULL; return ret; } else if (CHECK_CHUNK(c, CHUNK_FILE_START)) { return NULL; } else { /* a normal chunk */ tmp->chunk_num++; return NULL; } }
/* * Used by SiLo and Block Locality Caching. */ static struct segment* segment_fixed(struct chunk * c) { static struct segment* tmp; if (tmp == NULL) tmp = new_segment(); if (c == NULL) /* The end of stream */ return tmp; g_sequence_append(tmp->chunks, c); if (CHECK_CHUNK(c, CHUNK_FILE_START) || CHECK_CHUNK(c, CHUNK_FILE_END)) /* FILE_END */ return NULL; /* a normal chunk */ tmp->chunk_num++; if (tmp->chunk_num == destor.index_segment_algorithm[1]) { /* segment boundary */ struct segment* ret = tmp; tmp = NULL; return ret; } return NULL; }
static void _fm_folder_model_add_file(FmFolderModel* model, FmFileInfo* file) { if( !model->show_hidden && fm_file_info_is_hidden(file) ) g_sequence_append( model->hidden, fm_folder_item_new(file) ); else fm_folder_model_file_created(model, file); }
static void load_hit_objects(osux_beatmap *beatmap, GtkTreeStore *tree_store, GSequence *ho_seq, GtkTreeIter *hitobjects) { for (unsigned i = 0; i < beatmap->hitobject_count; ++i) { osux_hitobject *ho = &beatmap->hitobjects[i]; char *type; switch (HIT_OBJECT_TYPE(ho)) { case HITOBJECT_CIRCLE: type = _("Circle"); break; case HITOBJECT_SLIDER: type = _("Slider"); break; case HITOBJECT_SPINNER:type = _("Spinner"); break; case HITOBJECT_HOLD: type = _("Hold"); break; default: type = _("Invalid type"); break; } GtkTreeIter iter; gtk_tree_store_append(tree_store, &iter, hitobjects); gtk_tree_store_set(tree_store, &iter, COL_OFFSET, ho->offset, COL_TYPE, type, COL_DETAILS, ho->details, COL_OBJECT, ho, -1); g_sequence_append(ho_seq, ho); } g_sequence_sort(ho_seq, &sort_object_end_offset, NULL); }
gboolean egg_sorted_hash_insert (EggSortedHash *hash, gpointer key, gpointer value, guint *position) { GSequenceIter *it; if (g_hash_table_contains (hash->iters, key)) { if (hash->key_free_func) hash->key_free_func (key); if (hash->value_free_func) hash->value_free_func (value); return FALSE; } if (hash->sort_func) it = g_sequence_insert_sorted (hash->items, value, hash->sort_func, hash->sort_func_data); else it = g_sequence_append (hash->items, value); g_hash_table_insert (hash->iters, key, it); if (position) *position = g_sequence_iter_get_position (it); return TRUE; }
static GSequence* _sequence_new (gchar *data) { GSequence *value = NULL; value = g_sequence_new (NULL); g_sequence_append (value, (guint8 *)data); return value; }
RmOffsetTable rm_offset_create_table(const char *path) { int fd = rm_sys_open(path, O_RDONLY); if(fd == -1) { rm_log_info("Error opening %s in setup_fiemap_extents\n", path); return NULL; } /* struct fiemap does not allocate any extents by default, * so we choose ourself how many of them we allocate. * */ const int n_extents = 256; struct fiemap *fiemap = g_malloc0(sizeof(struct fiemap) + n_extents * sizeof(struct fiemap_extent)); struct fiemap_extent *fm_ext = fiemap->fm_extents; /* data structure we save our offsets in */ GSequence *self = g_sequence_new((GFreeFunc)rm_offset_free_func); bool last = false; while(!last) { fiemap->fm_flags = 0; fiemap->fm_extent_count = n_extents; fiemap->fm_length = FIEMAP_MAX_OFFSET; if(ioctl(fd, FS_IOC_FIEMAP, (unsigned long) fiemap) == -1) { break; } /* This might happen on empty files - those have no * extents, but they have an offset on the disk. */ if(fiemap->fm_mapped_extents <= 0) { break; } /* used for detecting contiguous extents, which we ignore */ unsigned long expected = 0; /* Remember all non contiguous extents */ for(unsigned i = 0; i < fiemap->fm_mapped_extents && !last; i++) { if (i == 0 || fm_ext[i].fe_physical != expected) { RmOffsetEntry *offset_entry = g_slice_new(RmOffsetEntry); offset_entry->logical = fm_ext[i].fe_logical; offset_entry->physical = fm_ext[i].fe_physical; g_sequence_append(self, offset_entry); } expected = fm_ext[i].fe_physical + fm_ext[i].fe_length; fiemap->fm_start = fm_ext[i].fe_logical + fm_ext[i].fe_length; last = fm_ext[i].fe_flags & FIEMAP_EXTENT_LAST; } } rm_sys_close(fd); g_free(fiemap); g_sequence_sort(self, (GCompareDataFunc)rm_offset_sort_logical, NULL); return self; }
/* * Used by Sparse Index. */ static struct segment* segment_content_defined(struct chunk *c) { static struct segment* tmp; if (tmp == NULL) tmp = new_segment(); if (c == NULL) /* The end of stream */ return tmp; if (CHECK_CHUNK(c, CHUNK_FILE_START) || CHECK_CHUNK(c, CHUNK_FILE_END)) { g_sequence_append(tmp->chunks, c); return NULL; } /* Avoid too small segment. */ if (tmp->chunk_num < destor.index_segment_min) { g_sequence_append(tmp->chunks, c); tmp->chunk_num++; return NULL; } int *head = (int*)&c->fp[16]; if ((*head) % destor.index_segment_algorithm[1] == 0) { struct segment* ret = tmp; tmp = new_segment(); g_sequence_append(tmp->chunks, c); tmp->chunk_num++; return ret; } g_sequence_append(tmp->chunks, c); tmp->chunk_num++; if (tmp->chunk_num >= destor.index_segment_max){ struct segment* ret = tmp; tmp = new_segment(); return ret; } return NULL; }
static ClutterModelIter * clutter_list_model_insert_row (ClutterModel *model, gint index_) { ClutterListModel *model_default = CLUTTER_LIST_MODEL (model); GSequence *sequence = model_default->priv->sequence; ClutterListModelIter *retval; guint n_columns, i, pos; GValueArray *array; GSequenceIter *seq_iter; n_columns = clutter_model_get_n_columns (model); array = g_value_array_new (n_columns); for (i = 0; i < n_columns; i++) { GValue *value = NULL; g_value_array_append (array, NULL); value = g_value_array_get_nth (array, i); g_value_init (value, clutter_model_get_column_type (model, i)); } if (index_ < 0) { seq_iter = g_sequence_append (sequence, array); pos = g_sequence_get_length (sequence) - 1; } else if (index_ == 0) { seq_iter = g_sequence_prepend (sequence, array); pos = 0; } else { seq_iter = g_sequence_get_iter_at_pos (sequence, index_); seq_iter = g_sequence_insert_before (seq_iter, array); pos = index_; } retval = g_object_new (CLUTTER_TYPE_LIST_MODEL_ITER, "model", model, "row", pos, NULL); retval->seq_iter = seq_iter; return CLUTTER_MODEL_ITER (retval); }
void swfdec_text_buffer_insert_text (SwfdecTextBuffer *buffer, gsize pos, const char *text) { gsize len; GSequenceIter *iter; SwfdecTextBufferFormat *format; g_return_if_fail (SWFDEC_IS_TEXT_BUFFER (buffer)); g_return_if_fail (pos <= buffer->text->len); g_return_if_fail (text != NULL); len = strlen (text); if (len == 0) return; if (pos == buffer->text->len) { g_string_insert_len (buffer->text, pos, text, len); format = swfdec_text_buffer_format_new (); format->start = pos; swfdec_text_attributes_copy (&format->attr, &buffer->default_attributes, SWFDEC_TEXT_ATTRIBUTES_MASK); iter = g_sequence_append (buffer->attributes, format); swfdec_text_buffer_remove_duplicates (g_sequence_iter_prev (iter), g_sequence_iter_next (iter)); } else { g_string_insert_len (buffer->text, pos, text, len); iter = g_sequence_get_end_iter (buffer->attributes); for (;;) { iter = g_sequence_iter_prev (iter); format = g_sequence_get (iter); if (format->start <= pos) break; format->start += len; } } CHECK_ATTRIBUTES (buffer); /* adapt cursor */ if (buffer->cursor_start >= pos) buffer->cursor_start += len; if (buffer->cursor_end >= pos) buffer->cursor_end += len; g_signal_emit (buffer, signals[TEXT_CHANGED], 0); g_signal_emit (buffer, signals[CURSOR_CHANGED], 0, (gulong) MIN (buffer->cursor_start, buffer->cursor_end), (gulong) MAX (buffer->cursor_start, buffer->cursor_end)); }
/** * gst_rtsp_mount_points_add_factory: * @mounts: a #GstRTSPMountPoints * @path: a mount point * @factory: (transfer full): a #GstRTSPMediaFactory * * Attach @factory to the mount point @path in @mounts. * * @path is of the form (/node)+. Any previous mount point will be freed. * * Ownership is taken of the reference on @factory so that @factory should not be * used after calling this function. */ void gst_rtsp_mount_points_add_factory (GstRTSPMountPoints * mounts, const gchar * path, GstRTSPMediaFactory * factory) { GstRTSPMountPointsPrivate *priv; DataItem *item; g_return_if_fail (GST_IS_RTSP_MOUNT_POINTS (mounts)); g_return_if_fail (GST_IS_RTSP_MEDIA_FACTORY (factory)); g_return_if_fail (path != NULL); priv = mounts->priv; item = data_item_new (g_strdup (path), strlen (path), factory); GST_INFO ("adding media factory %p for path %s", factory, path); g_mutex_lock (&priv->lock); g_sequence_append (priv->mounts, item); priv->dirty = TRUE; g_mutex_unlock (&priv->lock); }
/* * When a container buffer is full, we push it into container_queue. */ static void* filter_thread(void *arg) { int enable_rewrite = 1; struct fileRecipeMeta* r = NULL; while (1) { struct chunk* c = sync_queue_pop(rewrite_queue); if (c == NULL) /* backup job finish */ break; /* reconstruct a segment */ struct segment* s = new_segment(); /* segment head */ assert(CHECK_CHUNK(c, CHUNK_SEGMENT_START)); free_chunk(c); c = sync_queue_pop(rewrite_queue); while (!(CHECK_CHUNK(c, CHUNK_SEGMENT_END))) { g_sequence_append(s->chunks, c); if (!CHECK_CHUNK(c, CHUNK_FILE_START) && !CHECK_CHUNK(c, CHUNK_FILE_END)) s->chunk_num++; c = sync_queue_pop(rewrite_queue); } free_chunk(c); /* For self-references in a segment. * If we find an early copy of the chunk in this segment has been rewritten, * the rewrite request for it will be denied to avoid repeat rewriting. */ GHashTable *recently_rewritten_chunks = g_hash_table_new_full(g_int64_hash, g_fingerprint_equal, NULL, free_chunk); GHashTable *recently_unique_chunks = g_hash_table_new_full(g_int64_hash, g_fingerprint_equal, NULL, free_chunk); pthread_mutex_lock(&index_lock.mutex); TIMER_DECLARE(1); TIMER_BEGIN(1); /* This function will check the fragmented chunks * that would be rewritten later. * If we find an early copy of the chunk in earlier segments, * has been rewritten, * the rewrite request for it will be denied. */ index_check_buffer(s); GSequenceIter *iter = g_sequence_get_begin_iter(s->chunks); GSequenceIter *end = g_sequence_get_end_iter(s->chunks); for (; iter != end; iter = g_sequence_iter_next(iter)) { c = g_sequence_get(iter); if (CHECK_CHUNK(c, CHUNK_FILE_START) || CHECK_CHUNK(c, CHUNK_FILE_END)) continue; VERBOSE("Filter phase: %dth chunk in %s container %lld", chunk_num, CHECK_CHUNK(c, CHUNK_OUT_OF_ORDER) ? "out-of-order" : "", c->id); /* Cache-Aware Filter */ if (destor.rewrite_enable_cache_aware && restore_aware_contains(c->id)) { assert(c->id != TEMPORARY_ID); VERBOSE("Filter phase: %dth chunk is cached", chunk_num); SET_CHUNK(c, CHUNK_IN_CACHE); } /* A cfl-switch for rewriting out-of-order chunks. */ if (destor.rewrite_enable_cfl_switch) { double cfl = restore_aware_get_cfl(); if (enable_rewrite && cfl > destor.rewrite_cfl_require) { VERBOSE("Filter phase: Turn OFF the (out-of-order) rewrite switch of %.3f", cfl); enable_rewrite = 0; } else if (!enable_rewrite && cfl < destor.rewrite_cfl_require) { VERBOSE("Filter phase: Turn ON the (out-of-order) rewrite switch of %.3f", cfl); enable_rewrite = 1; } } if(CHECK_CHUNK(c, CHUNK_DUPLICATE) && c->id == TEMPORARY_ID){ struct chunk* ruc = g_hash_table_lookup(recently_unique_chunks, &c->fp); assert(ruc); c->id = ruc->id; } struct chunk* rwc = g_hash_table_lookup(recently_rewritten_chunks, &c->fp); if(rwc){ c->id = rwc->id; SET_CHUNK(c, CHUNK_REWRITE_DENIED); } /* A fragmented chunk will be denied if it has been rewritten recently */ if (!CHECK_CHUNK(c, CHUNK_DUPLICATE) || (!CHECK_CHUNK(c, CHUNK_REWRITE_DENIED) && (CHECK_CHUNK(c, CHUNK_SPARSE) || (enable_rewrite && CHECK_CHUNK(c, CHUNK_OUT_OF_ORDER) && !CHECK_CHUNK(c, CHUNK_IN_CACHE))))) { /* * If the chunk is unique, or be fragmented and not denied, * we write it to a container. * Fragmented indicates: sparse, or out of order and not in cache, */ if (storage_buffer.container_buffer == NULL){ storage_buffer.container_buffer = create_container(); if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY) storage_buffer.chunks = g_sequence_new(free_chunk); } if (container_overflow(storage_buffer.container_buffer, c->size)) { if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY){ /* * TO-DO * Update_index for physical locality */ GHashTable *features = sampling(storage_buffer.chunks, g_sequence_get_length(storage_buffer.chunks)); index_update(features, get_container_id(storage_buffer.container_buffer)); g_hash_table_destroy(features); g_sequence_free(storage_buffer.chunks); storage_buffer.chunks = g_sequence_new(free_chunk); } TIMER_END(1, jcr.filter_time); write_container_async(storage_buffer.container_buffer); TIMER_BEGIN(1); storage_buffer.container_buffer = create_container(); } if(add_chunk_to_container(storage_buffer.container_buffer, c)){ struct chunk* wc = new_chunk(0); memcpy(&wc->fp, &c->fp, sizeof(fingerprint)); wc->id = c->id; if (!CHECK_CHUNK(c, CHUNK_DUPLICATE)) { jcr.unique_chunk_num++; jcr.unique_data_size += c->size; g_hash_table_insert(recently_unique_chunks, &wc->fp, wc); VERBOSE("Filter phase: %dth chunk is recently unique, size %d", chunk_num, g_hash_table_size(recently_unique_chunks)); } else { jcr.rewritten_chunk_num++; jcr.rewritten_chunk_size += c->size; g_hash_table_insert(recently_rewritten_chunks, &wc->fp, wc); } if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY){ struct chunk* ck = new_chunk(0); memcpy(&ck->fp, &c->fp, sizeof(fingerprint)); g_sequence_append(storage_buffer.chunks, ck); } VERBOSE("Filter phase: Write %dth chunk to container %lld", chunk_num, c->id); }else{ VERBOSE("Filter phase: container %lld already has this chunk", c->id); assert(destor.index_category[0] != INDEX_CATEGORY_EXACT || destor.rewrite_algorithm[0]!=REWRITE_NO); } }else{ if(CHECK_CHUNK(c, CHUNK_REWRITE_DENIED)){ VERBOSE("Filter phase: %lldth fragmented chunk is denied", chunk_num); }else if (CHECK_CHUNK(c, CHUNK_OUT_OF_ORDER)) { VERBOSE("Filter phase: %lldth chunk in out-of-order container %lld is already cached", chunk_num, c->id); } } assert(c->id != TEMPORARY_ID); /* Collect historical information. */ har_monitor_update(c->id, c->size); /* Restore-aware */ restore_aware_update(c->id, c->size); chunk_num++; } int full = index_update_buffer(s); /* Write a SEGMENT_BEGIN */ segmentid sid = append_segment_flag(jcr.bv, CHUNK_SEGMENT_START, s->chunk_num); /* Write recipe */ iter = g_sequence_get_begin_iter(s->chunks); end = g_sequence_get_end_iter(s->chunks); for (; iter != end; iter = g_sequence_iter_next(iter)) { c = g_sequence_get(iter); if(r == NULL){ assert(CHECK_CHUNK(c,CHUNK_FILE_START)); r = new_file_recipe_meta(c->data); }else if(!CHECK_CHUNK(c,CHUNK_FILE_END)){ struct chunkPointer cp; cp.id = c->id; assert(cp.id>=0); memcpy(&cp.fp, &c->fp, sizeof(fingerprint)); cp.size = c->size; append_n_chunk_pointers(jcr.bv, &cp ,1); r->chunknum++; r->filesize += c->size; jcr.chunk_num++; jcr.data_size += c->size; }else{ assert(CHECK_CHUNK(c,CHUNK_FILE_END)); append_file_recipe_meta(jcr.bv, r); free_file_recipe_meta(r); r = NULL; jcr.file_num++; } } /* Write a SEGMENT_END */ append_segment_flag(jcr.bv, CHUNK_SEGMENT_END, 0); if(destor.index_category[1] == INDEX_CATEGORY_LOGICAL_LOCALITY){ /* * TO-DO * Update_index for logical locality */ s->features = sampling(s->chunks, s->chunk_num); if(destor.index_category[0] == INDEX_CATEGORY_EXACT){ /* * For exact deduplication, * unique fingerprints are inserted. */ VERBOSE("Filter phase: add %d unique fingerprints to %d features", g_hash_table_size(recently_unique_chunks), g_hash_table_size(s->features)); GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, recently_unique_chunks); while(g_hash_table_iter_next(&iter, &key, &value)){ struct chunk* uc = value; fingerprint *ft = malloc(sizeof(fingerprint)); memcpy(ft, &uc->fp, sizeof(fingerprint)); g_hash_table_insert(s->features, ft, NULL); } /* * OPTION: * It is still an open problem whether we need to update * rewritten fingerprints. * It would increase index update overhead, while the benefit * remains unclear. * More experiments are required. */ VERBOSE("Filter phase: add %d rewritten fingerprints to %d features", g_hash_table_size(recently_rewritten_chunks), g_hash_table_size(s->features)); g_hash_table_iter_init(&iter, recently_rewritten_chunks); while(g_hash_table_iter_next(&iter, &key, &value)){ struct chunk* uc = value; fingerprint *ft = malloc(sizeof(fingerprint)); memcpy(ft, &uc->fp, sizeof(fingerprint)); g_hash_table_insert(s->features, ft, NULL); } } index_update(s->features, sid); } free_segment(s); if(index_lock.wait_threshold > 0 && full == 0){ pthread_cond_broadcast(&index_lock.cond); } TIMER_END(1, jcr.filter_time); pthread_mutex_unlock(&index_lock.mutex); g_hash_table_destroy(recently_rewritten_chunks); g_hash_table_destroy(recently_unique_chunks); } if (storage_buffer.container_buffer && !container_empty(storage_buffer.container_buffer)){ if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY){ /* * TO-DO * Update_index for physical locality */ GHashTable *features = sampling(storage_buffer.chunks, g_sequence_get_length(storage_buffer.chunks)); index_update(features, get_container_id(storage_buffer.container_buffer)); g_hash_table_destroy(features); g_sequence_free(storage_buffer.chunks); } write_container_async(storage_buffer.container_buffer); } /* All files done */ jcr.status = JCR_STATUS_DONE; return NULL; }
void xml_node_add_child(XmlNode * node, XmlNode * child) { g_sequence_append(node -> childs, child); }
gboolean is_store_add_sensor(IsStore *self, IsSensor *sensor, GtkTreeIter *iter) { IsStorePrivate *priv; GSequence *entries; IsStoreEntry *entry = NULL; GSequenceIter *parent = NULL; gchar **names = NULL; int i; GtkTreePath *path; GtkTreeIter _iter; gboolean ret = FALSE; g_return_val_if_fail(IS_IS_STORE(self), FALSE); g_return_val_if_fail(IS_IS_SENSOR(sensor), FALSE); priv = self->priv; entry = find_entry(self, is_sensor_get_path(sensor)); if (entry) { is_warning("store", "sensor %s already exists in store, not adding duplicate", is_sensor_get_path(sensor)); goto out; } entries = priv->entries; names = g_strsplit(is_sensor_get_path(sensor), "/", 0); /* otherwise iterate through to create the entry */ for (i = 0; names[i] != NULL; i++) { GSequenceIter *seq_iter; gchar *name = names[i]; entry = NULL; for (seq_iter = g_sequence_get_begin_iter(entries); !g_sequence_iter_is_end(seq_iter); seq_iter = g_sequence_iter_next(seq_iter)) { entry = (IsStoreEntry *)g_sequence_get(seq_iter); if (g_strcmp0(entry->name, name) == 0) { entries = entry->entries; parent = seq_iter; break; } entry = NULL; } if (!entry) { /* create entry for this name component */ entry = entry_new(name); entry->iter = g_sequence_append(entries, entry); entry->parent = parent; entries = entry->entries; _iter.stamp = priv->stamp; _iter.user_data = entry->iter; path = gtk_tree_model_get_path(GTK_TREE_MODEL(self), &_iter); gtk_tree_model_row_inserted(GTK_TREE_MODEL(self), path, &_iter); gtk_tree_path_free(path); /* parent of the next entry we create will be this * entry */ parent = entry->iter; } } g_strfreev(names); g_assert(entry); g_assert(find_entry(self, is_sensor_get_path(sensor)) == entry); is_debug("store", "inserted sensor %s with label %s", is_sensor_get_path(sensor), is_sensor_get_label(sensor)); entry->sensor = g_object_ref(sensor); _iter.stamp = priv->stamp; _iter.user_data = entry->iter; path = gtk_tree_model_get_path(GTK_TREE_MODEL(self), &_iter); gtk_tree_model_row_changed(GTK_TREE_MODEL(self), path, &_iter); gtk_tree_path_free(path); /* return a copy of iter */ if (iter != NULL) { iter->stamp = priv->stamp; iter->user_data = entry->iter; } ret = TRUE; out: return ret; }
static GSignondIdentityInfo * _get_filled_identity_info_2 ( GSignondIdentityInfo **identity_inp, gboolean add_creds, gboolean add_methods, gboolean add_realms, gboolean add_acl, gboolean add_owner) { guint32 type = 456; const gchar *username = "******"; const gchar *secret = "secret1"; const gchar *caption = "caption1"; GSignondIdentityInfo *identity = NULL; GSignondSecurityContextList *ctx_list = NULL; GSignondSecurityContext *ctx1, *ctx2, *ctx3 ; GHashTable *methods = NULL; GSequence *seq1 = NULL, *seq_realms; identity = *identity_inp; if (identity == NULL) identity = gsignond_identity_info_new (); gsignond_identity_info_set_identity_new (identity); gsignond_identity_info_set_secret (identity, secret); gsignond_identity_info_set_store_secret (identity, TRUE); if (add_creds) { gsignond_identity_info_set_username (identity, username); gsignond_identity_info_set_username_secret (identity, TRUE); gsignond_identity_info_set_caption (identity, caption); } /*realms*/ if (add_realms) { seq_realms = _sequence_new("realms1"); gsignond_identity_info_set_realms (identity, seq_realms); g_sequence_free (seq_realms); } /*methods*/ if (add_methods) { methods = g_hash_table_new_full ((GHashFunc)g_str_hash, (GEqualFunc)g_str_equal, (GDestroyNotify)NULL, (GDestroyNotify)g_sequence_free); seq1 = _sequence_new("mech11"); g_sequence_append (seq1, "mech12"); g_hash_table_insert (methods, "method1", seq1); g_hash_table_insert (methods, "method2", _sequence_new("mech21")); g_hash_table_insert (methods, "method3", _sequence_new("mech31")); gsignond_identity_info_set_methods (identity, methods); g_hash_table_unref (methods); } /*acl*/ ctx1 = gsignond_security_context_new_from_values ("sysctx1", "appctx1"); ctx2 = gsignond_security_context_new_from_values ("sysctx2", "appctx2"); ctx3 = gsignond_security_context_new_from_values ("sysctx3", "appctx3"); ctx_list = g_list_append (ctx_list,ctx1); ctx_list = g_list_append (ctx_list,ctx2); ctx_list = g_list_append (ctx_list,ctx3); if (add_acl) { gsignond_identity_info_set_access_control_list (identity, ctx_list); } /*owners*/ if (add_owner) { gsignond_identity_info_set_owner (identity, ctx1); } gsignond_security_context_list_free (ctx_list); gsignond_identity_info_set_validated (identity, FALSE); gsignond_identity_info_set_identity_type (identity, type); return identity; }