Ejemplo n.º 1
0
cafs::link cafs::import( const fc::path& p ) {
  if( !fc::exists(p) )  {
    FC_THROW_MSG( "Path does not exist %s", p.string() );
  }
  if( fc::is_regular_file( p ) ) {
    if( fc::file_size(p) > IMBED_THRESHOLD ) {
      auto file_head = import_file(p);
      fc::vector<char>      data(MAX_CHUNK_SIZE);
      fc::datastream<char*> ds(data.data()+1, data.size()-1);
      fc::raw::pack( ds, file_head );
      data[0] = cafs::file_header_type;
      
      //fc::datastream<const char*> ds2(data.data()+1, data.size()-1);
      //file_header tmp;
      //fc::raw::unpack( ds2, tmp );
      //slog( "test unpack %s", fc::json::to_string( tmp ).c_str() );
      data.resize( ds.tellp() );

      //slog( "pre randomized... '%s'", fc::to_hex( data.data(), 16 ).c_str() );
      size_t seed = randomize(data, *((uint64_t*)fc::sha1::hash(data.data(),data.size()).data()) );
      auto chunk_head = slice_chunk( data );
      //slog( "slice chunk %s", fc::json::to_string( chunk_head ).c_str() );
      store_chunk( chunk_head, data );
      
      return link( chunk_head.calculate_id(), seed );
    } else { // no header, just raw data from the file stored in the chunk
      fc::vector<char> data( fc::file_size(p)+1 );
      data[0] = file_data_type;
      fc::ifstream ifile( p.string(), fc::ifstream::binary );
      ifile.read( data.data()+1, data.size()-1 );
      size_t seed = randomize(data, *((uint64_t*)fc::sha1::hash(data.data(),data.size()).data()) );

      auto chunk_head = slice_chunk( data );

      store_chunk( chunk_head, data );
      return link( chunk_head.calculate_id(), seed );
    }
  }
  else if( fc::is_directory(p) ) {
    auto dir = import_directory(p);

    fc::vector<char> data(MAX_CHUNK_SIZE);
    fc::datastream<char*> ds(data.data()+1, data.size()-1);
    fc::raw::pack( ds, dir );
    data[0] = directory_type;
    data.resize( ds.tellp()+1 );

    size_t seed = randomize(data, *((uint64_t*)fc::sha1::hash(data.data(),data.size()).data()) );
    auto chunk_head = slice_chunk( data );
    link l( chunk_head.calculate_id(), seed );
    store_chunk( chunk_head, data );
    return l;
  }
  FC_THROW_MSG( "Unsupported file type while importing '%s'", p.string() );
  return cafs::link();
}
Ejemplo n.º 2
0
void *big_realloc(char *p, void *q, size_t n)
{
	void *a;
	size_t m;

	if (debug_memory) {
		mrlog("big_realloc(%s, %p, %ld)", p, q, n);
		m = memsize*(n+PATTERN_SIZE);
	} else {
		m = memsize*n;
	}

	remove_chunk(q, p);
	a = realloc(q, m);

	if (debug > 2) {
		mrlog("Reallocating %ld bytes (%p => %p) on behalf of %s",	
			(long)m, q, a, p);
	}

	if (a == NULL) {
		mrlog("Allocation '%s' failed, exiting", p);
		mrexit("Out of memory", EXIT_FAILURE);
	}
	store_chunk(a, m, p);
	return a;
}
Ejemplo n.º 3
0
/**
 * callback_file(cookie, buf, buflen):
 * Handle a chunk ${buf} of length ${buflen} from a file which is being
 * written to the tape associated with the multitape write cookie ${cookie}.
 */
static int
callback_file(void * cookie, uint8_t * buf, size_t buflen)
{
	struct multitape_write_internal * d = cookie;
	struct chunkheader ch;

	/* Data is being passed out by c_file. */
	d->c_file_out += buflen;

	/* Anything under MINCHUNK bytes belongs in the trailer stream. */
	if (buflen < MINCHUNK) {
		/* There shouldn't be any trailer yet. */
		if (d->tlen != 0) {
			warn0("Archive entry has two trailers?");
			goto err0;
		}

		/* Write to the trailer stream. */
		if (chunkify_write(d->t.c, buf, buflen))
			goto err0;

		/* Record the trailer length. */
		d->tlen = buflen;

		/* Call the trailer callback, if one exists. */
		if ((d->callback_trailer != NULL) &&
		    (d->callback_trailer)(d->callback_cookie, buf, buflen))
			goto err0;
	} else {
		/* Store the chunk. */
		if (store_chunk(buf, buflen, &ch, d->C))
			goto err0;

		/* Write chunk header to chunk index stream. */
		if (chunkify_write(d->c.c, (uint8_t *)(&ch),
		    sizeof(struct chunkheader)))
			goto err0;

		/* Record the chunkified data length. */
		d->clen += buflen;

		/* Call the chunk callback, if one exists. */
		if ((d->callback_chunk != NULL) &&
		    (d->callback_chunk)(d->callback_cookie, &ch))
			goto err0;
	}

	/* Success! */
	return (0);

err0:
	/* Failure! */
	return (-1);
}
Ejemplo n.º 4
0
/**
 * handle_chunk(buf, buflen, S, C):
 * Handle a chunk ${buf} of length ${buflen} belonging to the stream ${S}:
 * Write it using the chunk layer cookie ${C}, and append a chunk header to
 * the stream index.
 */
static int
handle_chunk(uint8_t * buf, size_t buflen, struct stream * S, CHUNKS_W * C)
{
	struct chunkheader ch;

	if (store_chunk(buf, buflen, &ch, C))
		goto err0;

	/* Add chunk header to elastic array. */
	if (chunklist_append(S->index, &ch, 1))
		goto err0;

	/* Success! */
	return (0);

err0:
	/* Failure! */
	return (-1);
}
Ejemplo n.º 5
0
int main(int argc, char **argv) {
    struct scan_ctx *scan;
    struct scan_chunk_data chunk_data[2];
    struct analyze_ctx *analyze;
    int fd, result;

    analyze = analyze_open(argv[2], argv[1]);
    if (!analyze) {
	perror("analyze_init");
	return EXIT_FAILURE;
    }

    scan = scan_init();
    if (!scan) {
	perror("scan_init");
	return EXIT_FAILURE;
    }

    fd = open(argv[1], O_RDWR);
    if (fd < 0) {
	perror("open");
	return EXIT_FAILURE;
    }
    scan_set_fd(scan, fd);
    scan_set_aio(scan);

    if (!scan_begin(scan))
	return EXIT_FAILURE;
    do {
	result = scan_read_chunk(scan, chunk_data);
	if (result & SCAN_CHUNK_FOUND)
	    store_chunk(analyze, chunk_data);
	else {
	    fputs("Scan error\n", stderr);
	    return EXIT_FAILURE;
	}
    } while (!(result & SCAN_CHUNK_LAST));
    analyze_close(analyze);
    scan_free(scan);

    return EXIT_SUCCESS;
}
Ejemplo n.º 6
0
/**
 *  Imports the file chunks into the database, but does not
 *  import the file_header itself as a chunk, as it may be
 *  imbedded within another chunk.
 *
 *  @pre is_regular_file(p) 
 */
cafs::file_header cafs::import_file( const fc::path& p ) {
  file_header head;  
  head.file_size = fc::file_size(p);

  fc::vector<char> chunk( MAX_CHUNK_SIZE );

  // divide file up into chunks and slices
  fc::ifstream in( p.string(), fc::ifstream::binary );
  uint32_t r = 0;
  while( r < head.file_size ) {
    size_t some = fc::min( size_t(chunk.size()), size_t(head.file_size-r) );
    in.read( chunk.data(), some );
    size_t seed = randomize(chunk, *((uint64_t*)fc::sha1::hash(chunk.data(),chunk.size()).data()) );

    chunk.resize(some);
    auto chunk_head = slice_chunk(chunk);
    auto chunk_id   = store_chunk( chunk_head, chunk );

    head.add_chunk( chunk_id, seed, chunk_head );
    r += some;
  }
  return head;
}