void page_destroy(Evas_Object *scroller, Evas_Object *page)
{
	Evas_Object *mapbuf;
	Evas_Object *bg;
	Evas_Object *item;
	Evas_Object *box;
	Evas_Object *index;
	Evas_Object *tab;
	Eina_List *pending_list;

	register unsigned int i;
	int page_max_app;
	unsigned int count;

	evas_object_event_callback_del(page, EVAS_CALLBACK_CHANGED_SIZE_HINTS, _changed_size_hints_cb);

	page_max_app = (int) evas_object_data_get(scroller, "page_max_app");
	for (i = 0; i < page_max_app; i ++) {
		item = page_unpack_item_at(page, i);
		if (!item) continue;

		item_destroy(item);
	}

	pending_list = evas_object_data_get(page, "pending,list");
	eina_list_free(pending_list);

	bg = evas_object_data_get(page, "bg");
	evas_object_del(bg);

	box = evas_object_data_get(scroller, "box");
	ret_if(NULL == box);

	mapbuf = mapbuf_get_mapbuf(page);
	if (mapbuf) {
		elm_box_unpack(box, mapbuf);
		mapbuf_unbind(mapbuf);
	} else {
		elm_box_unpack(box, page);
	}

	index = evas_object_data_get(scroller, "index");
	if (index) {
		tab = evas_object_data_get(scroller, "tab");
		count = page_scroller_count_page(scroller);
		index_update(tab, index, count);
	}

	evas_object_data_del(page, "win");
	evas_object_data_del(page, "layout");
	evas_object_data_del(page, "controlbar");
	evas_object_data_del(page, "tab");
	evas_object_data_del(page, "scroller");
	evas_object_data_del(page, "page_edje");
	evas_object_data_del(page, "page_max_app");
	evas_object_data_del(page, "bg");
	evas_object_data_del(page, "pending,list");
	evas_object_data_del(page, "dirty");
	layout_unload_edj(page);
}
Пример #2
0
/*
 * Decode the Number of Records, Unpadded Size, and Uncompressed Size
 * fields from the Index field. That is, Index Padding and CRC32 are not
 * decoded by this function.
 *
 * This can return XZ_OK (more input needed), XZ_STREAM_END (everything
 * successfully decoded), or XZ_DATA_ERROR (input is corrupt).
 */
static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b)
{
	enum xz_ret ret;

	do {
		ret = dec_vli(s, b->in, &b->in_pos, b->in_size);
		if (ret != XZ_STREAM_END) {
			index_update(s, b);
			return ret;
		}

		switch (s->index.sequence) {
		case SEQ_INDEX_COUNT:
			s->index.count = s->vli;

			/*
			 * Validate that the Number of Records field
			 * indicates the same number of Records as
			 * there were Blocks in the Stream.
			 */
			if (s->index.count != s->block.count)
				return XZ_DATA_ERROR;

			s->index.sequence = SEQ_INDEX_UNPADDED;
			break;

		case SEQ_INDEX_UNPADDED:
			s->index.hash.unpadded += s->vli;
			s->index.sequence = SEQ_INDEX_UNCOMPRESSED;
			break;

		case SEQ_INDEX_UNCOMPRESSED:
			s->index.hash.uncompressed += s->vli;

#ifndef GRUB_EMBED_DECOMPRESSOR
			if (s->hash)
				s->hash->write(s->index.hash.hash_context,
					       (const uint8_t *)&s->index.hash, 2 * sizeof(vli_type));
#endif

			--s->index.count;
			s->index.sequence = SEQ_INDEX_UNPADDED;
			break;
		}
	} while (s->index.count > 0);

	return XZ_STREAM_END;
}
Пример #3
0
void reconstruct_summary_file(HashTable *Index, char *results_filename)
{
    //'testing' is the directory where summary index file is located (with extension of.dat)
    char *file_path = calloc(1, strlen("data/indexer/") + strlen(".dat")+ strlen(results_filename) + 2);
    snprintf(file_path, (strlen("data/indexer/") + strlen(".dat")+ strlen(results_filename) + 2), "data/indexer/%s.dat", results_filename);

    if (IsFile(file_path)) {
        FILE *results_file = fopen(file_path, "r");
        MYASSERT(results_file != NULL);

        int tmp = 0;
        int end = 0;
        int beginning = 0;

        //Keeps grabbing line_contents until reaches the end of file
        while ((tmp = fgetc(results_file)) != EOF) {
            ungetc(sizeof(tmp), results_file);

            //while you're not at the end of a line already or at the end of a file,
            //then set reading bookmark to end of the line 
            while (((tmp = fgetc(results_file)) != '\n') && (tmp != EOF)) {}
            
            //total size of the line_content 
            end = ftell(results_file);

            //allocating memory for line_content that is being read
            char *line_content = calloc(1, end - beginning);

            //reading contents of current line specified by begin and end 
            fseek(results_file, beginning, SEEK_SET);
            fread(line_content, sizeof(char), end - beginning, results_file);
            //updating hashtable with words on each line and their doc_ids and frequency
            index_update(Index, line_content);

            beginning = end;
            free(line_content);
        }
        fclose(results_file);
    }
    free(file_path);
}
Evas_Object *page_create(Evas_Object *scroller, int idx, int rotate)
{
	Evas_Object *page;
	Evas_Object *bg;
	Evas_Object *index;
	Evas_Object *tab;
	Evas_Object *mapbuf;
	Evas_Object *box;

	char *page_edje;
	bool enable_bg_image;

	unsigned int count;
	int page_height;
	int page_width;

	_D("Create a new page[%d]", idx);

	page_edje = evas_object_data_get(scroller, "page_edje");
	enable_bg_image = (bool) evas_object_data_get(scroller, "enable_bg_image");

	page = layout_load_edj(scroller, page_edje, PAGE_GROUP_NAME);
	retv_if(!page, NULL);

	evas_object_event_callback_add(page, EVAS_CALLBACK_CHANGED_SIZE_HINTS, _changed_size_hints_cb, NULL);
	edje_object_signal_callback_add(_EDJ(page), "dim,down", "menu", _dim_down_cb, NULL);
	edje_object_signal_callback_add(_EDJ(page), "dim,up", "menu", _dim_up_cb, NULL);

	box = evas_object_data_get(scroller, "box");
	mapbuf = mapbuf_bind(box, page);
	if (!mapbuf) {
		_D("Failed to bind the object with mapbuf");
	}

	bg = evas_object_rectangle_add(menu_screen_get_evas());
	if (!bg) {
		evas_object_del(page);
		return NULL;
	}
	evas_object_color_set(bg, 0, 0, 0, 0);

	page_width = menu_screen_get_root_width();
	page_height = (int) ((double) PROP_PORTRAIT_HEIGHT * ((double) menu_screen_get_root_height()));

	evas_object_size_hint_min_set(bg, page_width, page_height);
	evas_object_size_hint_max_set(bg, page_width, page_height);
	elm_object_part_content_set(page, "bg", bg);

	evas_object_data_set(page, "win", evas_object_data_get(scroller, "win"));
	evas_object_data_set(page, "layout", evas_object_data_get(scroller, "layout"));
	evas_object_data_set(page, "controlbar", evas_object_data_get(scroller, "controlbar"));
	evas_object_data_set(page, "tab", evas_object_data_get(scroller, "tab"));
	evas_object_data_set(page, "scroller", scroller);
	evas_object_data_set(page, "page_edje", page_edje);
	evas_object_data_set(page, "page_max_app", evas_object_data_get(scroller, "page_max_app"));
	evas_object_data_set(page, "bg", bg);
	evas_object_data_set(page, "pending,list", NULL);
	evas_object_data_set(page, "dirty", (void *) 0);

	if (_insert_page_at(scroller, page, idx) != MENU_SCREEN_ERROR_OK) {
		evas_object_del(bg);
		evas_object_del(page);
		return NULL;
	}

	index = evas_object_data_get(scroller, "index");
	if (index) {
		tab = evas_object_data_get(scroller, "tab");
		count = page_scroller_count_page(scroller);
		index_update(tab, index, count);
	}

	return page;
}
Пример #5
0
void send_fingerchunk(FingerChunk *fchunk, EigenValue* eigenvalue, BOOL update) {
	index_update(&fchunk->fingerprint, fchunk->container_id, eigenvalue,
			update);
	update_cfl(cfl_monitor, fchunk->container_id, fchunk->length);
	sync_queue_push(fingerchunk_queue, fchunk);
}
Пример #6
0
Файл: ur-cmd.c Проект: spolu/ur
int
cmd_status (const char *path, bool recursive)
{
  state_t ur = STATE_INITIALIZER;
  struct index index = INDEX_INITIALIZER; 
  struct stat64 st_buf;
  DIR *dp;
  struct dirent *ep;
  struct list_elem *e;
  char *branchname = NULL;
      
  if (lstat64 (path, &st_buf) != 0) fail("%s does not exist", path);
  ASSERT (st_buf.st_mode & S_IFDIR);

  if (ur_check (path) == 0) 
    {
      
      if (state_init (&ur, path) != 0) fail ("fail reading state of %s", path);
      if (index_read (&ur, &index) != 0) fail ("fail reading index of %s", path);

      index_update (&ur, &index);

      branchname = branch_get_head_name (&ur);
      printf ("*** %s \n    (branch: %s)\n", ur.path, branchname);
      free (branchname); branchname = NULL;

      // reading added dirty files
      for (e = list_begin (&index.entries); e != list_end (&index.entries);
	   e = list_next (e))
	{
	  struct index_entry *en = list_entry (e, struct index_entry, elem);	
	  if ((en->status & S_IPST) &&
	      (en->status & S_IADD) && 
	      (en->status & S_IDRT)) {
	    printf ("#  added     : %s/%s\n", path, en->name);
	  }
	}

      // reading dirty but not added
      for (e = list_begin (&index.entries); e != list_end (&index.entries);
	   e = list_next (e))
	{
	  struct index_entry *en = list_entry (e, struct index_entry, elem);	
	  if ((en->status & S_IPST) &&
	      !(en->status & S_IADD) && 
	      (en->status & S_IDRT) && 
	      (en->status & S_ITRK)) {
	    printf ("#  dirty     : %s/%s\n", path, en->name);
	  }
	}

      // reading untracked files
      for (e = list_begin (&index.entries); e != list_end (&index.entries);
	   e = list_next (e))
	{
	  struct index_entry *en = list_entry (e, struct index_entry, elem);	
	  if ((en->status & S_IPST) &&
	      !(en->status & S_ITRK) && 
	      !(en->status & S_IADD)) {
	    printf ("#  untracked : %s/%s\n", path, en->name);
	  }
	}
      
      index_destroy (&index);
      state_destroy (&ur);    
    }
  else {
Пример #7
0
/*
 * When a container buffer is full, we push it into container_queue.
 */
static void* filter_thread(void *arg) {
    int enable_rewrite = 1;
    struct fileRecipeMeta* r = NULL;

    while (1) {
        struct chunk* c = sync_queue_pop(rewrite_queue);

        if (c == NULL)
            /* backup job finish */
            break;

        /* reconstruct a segment */
        struct segment* s = new_segment();

        /* segment head */
        assert(CHECK_CHUNK(c, CHUNK_SEGMENT_START));
        free_chunk(c);

        c = sync_queue_pop(rewrite_queue);
        while (!(CHECK_CHUNK(c, CHUNK_SEGMENT_END))) {
            g_sequence_append(s->chunks, c);
            if (!CHECK_CHUNK(c, CHUNK_FILE_START)
                    && !CHECK_CHUNK(c, CHUNK_FILE_END))
                s->chunk_num++;

            c = sync_queue_pop(rewrite_queue);
        }
        free_chunk(c);

        /* For self-references in a segment.
         * If we find an early copy of the chunk in this segment has been rewritten,
         * the rewrite request for it will be denied to avoid repeat rewriting. */
        GHashTable *recently_rewritten_chunks = g_hash_table_new_full(g_int64_hash,
        		g_fingerprint_equal, NULL, free_chunk);
        GHashTable *recently_unique_chunks = g_hash_table_new_full(g_int64_hash,
        			g_fingerprint_equal, NULL, free_chunk);

        pthread_mutex_lock(&index_lock.mutex);

        TIMER_DECLARE(1);
        TIMER_BEGIN(1);
        /* This function will check the fragmented chunks
         * that would be rewritten later.
         * If we find an early copy of the chunk in earlier segments,
         * has been rewritten,
         * the rewrite request for it will be denied. */
        index_check_buffer(s);

    	GSequenceIter *iter = g_sequence_get_begin_iter(s->chunks);
    	GSequenceIter *end = g_sequence_get_end_iter(s->chunks);
        for (; iter != end; iter = g_sequence_iter_next(iter)) {
            c = g_sequence_get(iter);

    		if (CHECK_CHUNK(c, CHUNK_FILE_START) || CHECK_CHUNK(c, CHUNK_FILE_END))
    			continue;

            VERBOSE("Filter phase: %dth chunk in %s container %lld", chunk_num,
                    CHECK_CHUNK(c, CHUNK_OUT_OF_ORDER) ? "out-of-order" : "", c->id);

            /* Cache-Aware Filter */
            if (destor.rewrite_enable_cache_aware && restore_aware_contains(c->id)) {
                assert(c->id != TEMPORARY_ID);
                VERBOSE("Filter phase: %dth chunk is cached", chunk_num);
                SET_CHUNK(c, CHUNK_IN_CACHE);
            }

            /* A cfl-switch for rewriting out-of-order chunks. */
            if (destor.rewrite_enable_cfl_switch) {
                double cfl = restore_aware_get_cfl();
                if (enable_rewrite && cfl > destor.rewrite_cfl_require) {
                    VERBOSE("Filter phase: Turn OFF the (out-of-order) rewrite switch of %.3f",
                            cfl);
                    enable_rewrite = 0;
                } else if (!enable_rewrite && cfl < destor.rewrite_cfl_require) {
                    VERBOSE("Filter phase: Turn ON the (out-of-order) rewrite switch of %.3f",
                            cfl);
                    enable_rewrite = 1;
                }
            }

            if(CHECK_CHUNK(c, CHUNK_DUPLICATE) && c->id == TEMPORARY_ID){
            	struct chunk* ruc = g_hash_table_lookup(recently_unique_chunks, &c->fp);
            	assert(ruc);
            	c->id = ruc->id;
            }
            struct chunk* rwc = g_hash_table_lookup(recently_rewritten_chunks, &c->fp);
            if(rwc){
            	c->id = rwc->id;
            	SET_CHUNK(c, CHUNK_REWRITE_DENIED);
            }

            /* A fragmented chunk will be denied if it has been rewritten recently */
            if (!CHECK_CHUNK(c, CHUNK_DUPLICATE) || (!CHECK_CHUNK(c, CHUNK_REWRITE_DENIED)
            		&& (CHECK_CHUNK(c, CHUNK_SPARSE)
                    || (enable_rewrite && CHECK_CHUNK(c, CHUNK_OUT_OF_ORDER)
                        && !CHECK_CHUNK(c, CHUNK_IN_CACHE))))) {
                /*
                 * If the chunk is unique, or be fragmented and not denied,
                 * we write it to a container.
                 * Fragmented indicates: sparse, or out of order and not in cache,
                 */
                if (storage_buffer.container_buffer == NULL){
                	storage_buffer.container_buffer = create_container();
                	if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY)
                		storage_buffer.chunks = g_sequence_new(free_chunk);
                }

                if (container_overflow(storage_buffer.container_buffer, c->size)) {

                    if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY){
                        /*
                         * TO-DO
                         * Update_index for physical locality
                         */
                        GHashTable *features = sampling(storage_buffer.chunks,
                        		g_sequence_get_length(storage_buffer.chunks));
                        index_update(features, get_container_id(storage_buffer.container_buffer));
                        g_hash_table_destroy(features);
                        g_sequence_free(storage_buffer.chunks);
                        storage_buffer.chunks = g_sequence_new(free_chunk);
                    }
                    TIMER_END(1, jcr.filter_time);
                    write_container_async(storage_buffer.container_buffer);
                    TIMER_BEGIN(1);
                    storage_buffer.container_buffer = create_container();
                }

                if(add_chunk_to_container(storage_buffer.container_buffer, c)){

                	struct chunk* wc = new_chunk(0);
                	memcpy(&wc->fp, &c->fp, sizeof(fingerprint));
                	wc->id = c->id;
                	if (!CHECK_CHUNK(c, CHUNK_DUPLICATE)) {
                		jcr.unique_chunk_num++;
                		jcr.unique_data_size += c->size;
                		g_hash_table_insert(recently_unique_chunks, &wc->fp, wc);
                    	VERBOSE("Filter phase: %dth chunk is recently unique, size %d", chunk_num,
                    			g_hash_table_size(recently_unique_chunks));
                	} else {
                		jcr.rewritten_chunk_num++;
                		jcr.rewritten_chunk_size += c->size;
                		g_hash_table_insert(recently_rewritten_chunks, &wc->fp, wc);
                	}

                	if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY){
                		struct chunk* ck = new_chunk(0);
                		memcpy(&ck->fp, &c->fp, sizeof(fingerprint));
                		g_sequence_append(storage_buffer.chunks, ck);
                	}

                	VERBOSE("Filter phase: Write %dth chunk to container %lld",
                			chunk_num, c->id);
                }else{
                	VERBOSE("Filter phase: container %lld already has this chunk", c->id);
            		assert(destor.index_category[0] != INDEX_CATEGORY_EXACT
            				|| destor.rewrite_algorithm[0]!=REWRITE_NO);
                }

            }else{
                if(CHECK_CHUNK(c, CHUNK_REWRITE_DENIED)){
                    VERBOSE("Filter phase: %lldth fragmented chunk is denied", chunk_num);
                }else if (CHECK_CHUNK(c, CHUNK_OUT_OF_ORDER)) {
                    VERBOSE("Filter phase: %lldth chunk in out-of-order container %lld is already cached",
                            chunk_num, c->id);
                }
            }

            assert(c->id != TEMPORARY_ID);

            /* Collect historical information. */
            har_monitor_update(c->id, c->size);

            /* Restore-aware */
            restore_aware_update(c->id, c->size);

            chunk_num++;
        }

        int full = index_update_buffer(s);

        /* Write a SEGMENT_BEGIN */
        segmentid sid = append_segment_flag(jcr.bv, CHUNK_SEGMENT_START, s->chunk_num);

        /* Write recipe */
    	iter = g_sequence_get_begin_iter(s->chunks);
    	end = g_sequence_get_end_iter(s->chunks);
        for (; iter != end; iter = g_sequence_iter_next(iter)) {
            c = g_sequence_get(iter);

        	if(r == NULL){
        		assert(CHECK_CHUNK(c,CHUNK_FILE_START));
        		r = new_file_recipe_meta(c->data);
        	}else if(!CHECK_CHUNK(c,CHUNK_FILE_END)){
        		struct chunkPointer cp;
        		cp.id = c->id;
        		assert(cp.id>=0);
        		memcpy(&cp.fp, &c->fp, sizeof(fingerprint));
        		cp.size = c->size;
        		append_n_chunk_pointers(jcr.bv, &cp ,1);
        		r->chunknum++;
        		r->filesize += c->size;

    	    	jcr.chunk_num++;
	    	    jcr.data_size += c->size;

        	}else{
        		assert(CHECK_CHUNK(c,CHUNK_FILE_END));
        		append_file_recipe_meta(jcr.bv, r);
        		free_file_recipe_meta(r);
        		r = NULL;

	            jcr.file_num++;
        	}
        }

       	/* Write a SEGMENT_END */
       	append_segment_flag(jcr.bv, CHUNK_SEGMENT_END, 0);

        if(destor.index_category[1] == INDEX_CATEGORY_LOGICAL_LOCALITY){
             /*
              * TO-DO
              * Update_index for logical locality
              */
            s->features = sampling(s->chunks, s->chunk_num);
         	if(destor.index_category[0] == INDEX_CATEGORY_EXACT){
         		/*
         		 * For exact deduplication,
         		 * unique fingerprints are inserted.
         		 */
         		VERBOSE("Filter phase: add %d unique fingerprints to %d features",
         				g_hash_table_size(recently_unique_chunks),
         				g_hash_table_size(s->features));
         		GHashTableIter iter;
         		gpointer key, value;
         		g_hash_table_iter_init(&iter, recently_unique_chunks);
         		while(g_hash_table_iter_next(&iter, &key, &value)){
         			struct chunk* uc = value;
         			fingerprint *ft = malloc(sizeof(fingerprint));
         			memcpy(ft, &uc->fp, sizeof(fingerprint));
         			g_hash_table_insert(s->features, ft, NULL);
         		}

         		/*
         		 * OPTION:
         		 * 	It is still an open problem whether we need to update
         		 * 	rewritten fingerprints.
         		 * 	It would increase index update overhead, while the benefit
         		 * 	remains unclear.
         		 * 	More experiments are required.
         		 */
         		VERBOSE("Filter phase: add %d rewritten fingerprints to %d features",
         				g_hash_table_size(recently_rewritten_chunks),
         				g_hash_table_size(s->features));
         		g_hash_table_iter_init(&iter, recently_rewritten_chunks);
         		while(g_hash_table_iter_next(&iter, &key, &value)){
         			struct chunk* uc = value;
         			fingerprint *ft = malloc(sizeof(fingerprint));
         			memcpy(ft, &uc->fp, sizeof(fingerprint));
         			g_hash_table_insert(s->features, ft, NULL);
         		}
         	}
         	index_update(s->features, sid);
         }

        free_segment(s);

        if(index_lock.wait_threshold > 0 && full == 0){
        	pthread_cond_broadcast(&index_lock.cond);
        }
        TIMER_END(1, jcr.filter_time);
        pthread_mutex_unlock(&index_lock.mutex);

        g_hash_table_destroy(recently_rewritten_chunks);
        g_hash_table_destroy(recently_unique_chunks);

    }

    if (storage_buffer.container_buffer
    		&& !container_empty(storage_buffer.container_buffer)){
        if(destor.index_category[1] == INDEX_CATEGORY_PHYSICAL_LOCALITY){
            /*
             * TO-DO
             * Update_index for physical locality
             */
        	GHashTable *features = sampling(storage_buffer.chunks,
        			g_sequence_get_length(storage_buffer.chunks));
        	index_update(features, get_container_id(storage_buffer.container_buffer));
        	g_hash_table_destroy(features);
        	g_sequence_free(storage_buffer.chunks);
        }
        write_container_async(storage_buffer.container_buffer);
    }

    /* All files done */
    jcr.status = JCR_STATUS_DONE;
    return NULL;
}