コード例 #1
0
ファイル: main.c プロジェクト: carriercomm/cloudfs
int main(int argc, char **argv) {
  int32_t ch, index, excl_index;
  const char *name;
  bool load_default, exclusive;

  load_default = true;
  exclusive = false;

  index = 0;
  while (1) {
    if ((ch = getopt_long_only(argc, argv, "", opt_field, &index)) < 0)
      break;
    switch (ch) {
      case OPT_LOG:
        log_load(optarg);
        break;

      case OPT_CONFIG:
        config_load(optarg);
        load_default = false;
        break;

      case OPT_EXCL:
        if (exclusive)
          error("Cannot use --%s with --%s",
              opt_field[index].name,
              opt_field[excl_index].name);
        exclusive = true;
        excl_index = index;

      case OPT_NRML:
        name = opt_field[index].name;
        if (opt_field[index].has_arg)
          config_set(name, optarg);
        else
          config_set(name, "true");
        break;

      case OPT_VERSION:
        version();
        return 1;

      case OPT_HELP:
      default:
        usage();
        return 1;
    }
  }

  if (load_default)
    config_default();

  mt_init();

  store_load();
  bucket_load();
  crypt_load();
  volume_load();
  return 0;
}
コード例 #2
0
ファイル: merge.c プロジェクト: alepharchives/ehash
int merge(struct tmpindex *tmpidx,struct ehash *idx)
{
	struct posting *p;
	unsigned int len;
	char *invertedlist;
	char *merged;
	int mergedlen;
	struct bucket bkt;
	struct page *page;

	p = tmpidx->list;
	while(p)
	{
		invertedlist = ehash_find(idx,p->c,&len);
		if(invertedlist!=NULL && len==USHRT_MAX)    /* invertedlist is bigger than a page! */
		{
			page = cache_pagein(idx->cache,*((int*)invertedlist),*((off_t*)(invertedlist+sizeof(int))));
			if(page == NULL)
				return -1;
			bucket_load(&bkt,page);
			if(merge_invertedlist_page(p,&bkt,idx->cache,idx->freemap) != 0)
				return -1;
		}
		else
		{
			merged = merge_invertedlist(p,invertedlist,len,&mergedlen);
			if(merged == NULL)
				return -1;
			if(ehash_insert(idx,p->c,merged,mergedlen) != 0)
				return -1;
			free(merged); /* merge_invertedlist's return value is malloced!!! */
		}

		merged = NULL;
		tmpidx->list = p->next;
		posting_free(p);
		tmpidx->count--;
		p = tmpidx->list;
	}
	tmpindex_clear(tmpidx);
	return 0;
}
コード例 #3
0
ファイル: merge.c プロジェクト: alepharchives/ehash
static int merge_invertedlist_page(struct posting *p,struct bucket *bkt, struct cache *cache,struct freemap *freemap)
{
	unsigned int docnum;
	unsigned int lastdoc;
	unsigned int firstdoc;
	int len_firstdoc;
	int fd;
	off_t offset;
	unsigned int size;
	unsigned short invertedlen;
	unsigned int add_size;
	struct vector *v;
	struct bucket tmp;
	struct page *page;
	int page_data_size;
	int available_size;
	char *ptr;

	assert(p);
	v = p->invertedlist;
	len_firstdoc = vbyte_decompress(v->vector,v->vector+v->len,&firstdoc);
	if(len_firstdoc <= 0)
		goto exit;
	docnum = *((unsigned int*)bkt->record);
	lastdoc = *((unsigned int*)((char*)bkt->record+sizeof(unsigned int)));
	assert(firstdoc > lastdoc);
	page_data_size =  PAGE_SIZE - sizeof(struct HEAD);
	invertedlen = bkt->head->num; /* special use for such kind of page,bkt->head->num
				         no longer store the number of records, it stores
				         invertedlen instead. */ 
	add_size = v->len - len_firstdoc;
	bkt->head->num += (v->len-len_firstdoc+vbyte_len(firstdoc-lastdoc));
	*((unsigned int*)bkt->record) += p->count;
	((unsigned int*)bkt->record)[1] = p->lastdoc;

	/* switch to the last page */
	tmp = *bkt;
	while(tmp.head->next.fd != 0)
	{
		fd = tmp.head->next.fd;
		offset = tmp.head->next.offset;
		page = cache_pagein(cache,fd,offset);
		if(page == NULL)
			goto exit;
		bucket_load(&tmp,page);
	}
	/* add invertedlist in the page */
	if((char*)tmp.record+tmp.head->ptr + vbyte_len(firstdoc-lastdoc) < (char*)tmp.head + PAGE_SIZE)
		tmp.head->ptr += vbyte_compress((char*)tmp.record+tmp.head->ptr,(char*)tmp.head+PAGE_SIZE,firstdoc-lastdoc);
	else
	{
		size = PAGE_SIZE;
		if(freemap_malloc(freemap,&size,&fd,&offset) != 0)
			goto exit;
		page = cache_newpage(cache,fd,offset);
		if(page == NULL)
			goto exit;
		bucket_init(&tmp,page,0);
		tmp.head->next.fd = fd;
		tmp.head->next.offset = offset;
		tmp.head->ptr = 0;
		tmp.head->ptr += vbyte_compress((char*)tmp.record+tmp.head->ptr,(char*)tmp.head+PAGE_SIZE,firstdoc-lastdoc);
	}	
	ptr = v->vector+len_firstdoc;
	while(add_size > 0)
	{
		if((char*)tmp.record + tmp.head->ptr + add_size < (char*)tmp.head + PAGE_SIZE)
		{
			memcpy((char*)tmp.record+tmp.head->ptr,ptr,add_size);
			tmp.head->ptr += add_size;
			add_size = 0;
		}
		else
		{
			available_size = PAGE_SIZE-sizeof(struct HEAD)-tmp.head->ptr;
			memcpy((char*)tmp.record+tmp.head->ptr,ptr,available_size);
			ptr += available_size;
			tmp.head->ptr += available_size;
			add_size -= available_size;

			size = PAGE_SIZE;
			if(freemap_malloc(freemap,&size,&fd,&offset) != 0)
				goto exit;
			page = cache_newpage(cache,fd,offset);
			if(page == NULL)
				goto exit;
			tmp.head->next.fd = fd;
			tmp.head->next.offset = offset;
			bucket_init(&tmp,page,0);
			tmp.head->ptr = 0;
		}
	}
	return 0;
exit:
	return -1;
}