Example #1
0
bool MaprOutputCodedBlockFile::Open(std::string uri, short replication, uint64 chunk_size) {
  CHECK_GE(replication, 0);
  CHECK_LE(replication, 6);
  CHECK_GE(chunk_size, 0);

  CHECK(!is_open_);
  if (!IsValidUri(uri, "maprfs")) {
    return false;
  }

  std::string scheme, path;
  CHECK(ParseUri(uri, &scheme, &path)) << "Invalid uri format: " << uri;

  path_ = path;

  // note a chunk_size of zero to hdfs means use hdfs's default... however, we want to use maprfs's default... which should be based on the settings of the parent directory... if it exists
  string parent_path = fs::path(path).remove_filename().string();;
  if (chunk_size == 0){
    string parent_uri = Uri(scheme, parent_path);
    while (!Exists(parent_uri)){
      parent_path = fs::path(parent_path).remove_filename().string();
      parent_uri = Uri(scheme, parent_path);
      LOG(INFO) << "parent_uri: " << parent_uri;
    }
    CHECK(ChunkSize(parent_uri, &chunk_size));
  }

  CHECK_EQ(chunk_size % (1 << 16), 0) << "MaprFS requires chunk size is a multiple of 2^16";
  CHECK_LE(chunk_size, 1024 * (1<<20)) << "hdfs.h uses a signed 32 int which artificially limits the chunk size to 1GB... maprfs can do more, but not through the c api... ;-(";

  file_ = hdfsOpenFile(fs_, path.c_str(), O_WRONLY, 0, replication, chunk_size);
  if (file_ == NULL){
    LOG(ERROR) << "Failed to open file: " << path;
    return false;
  }

  copying_output_stream_.reset(new MaprCopyingOutputStream(this));
  output_stream_.reset(new google::protobuf::io::CopyingOutputStreamAdaptor(copying_output_stream_.get()));

  is_open_ = true;
  uri_ = uri;
  return true;
}
Example #2
0
Bool ChunkCheck(Chunk chunk)
{
  CHECKS(Chunk, chunk);
  CHECKU(Arena, chunk->arena);
  CHECKL(chunk->serial < chunk->arena->chunkSerial);
  /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */
  CHECKL(TreeCheck(&chunk->chunkTree));
  CHECKL(ChunkPagesToSize(chunk, 1) == ChunkPageSize(chunk));
  CHECKL(ShiftCheck(ChunkPageShift(chunk)));

  CHECKL(chunk->base != (Addr)0);
  CHECKL(chunk->base < chunk->limit);
  /* check chunk structure is at its own base: see .chunk.at.base. */
  CHECKL(chunk->base == (Addr)chunk);
  CHECKL((Addr)(chunk+1) <= chunk->limit);
  CHECKL(ChunkSizeToPages(chunk, ChunkSize(chunk)) == chunk->pages);
  /* check that the tables fit in the chunk */
  CHECKL(chunk->allocBase <= chunk->pages);
  CHECKL(chunk->allocBase >= chunk->pageTablePages);

  CHECKD_NOSIG(BT, chunk->allocTable);
  /* check that allocTable is in the chunk overhead */
  CHECKL((Addr)chunk->allocTable >= chunk->base);
  CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages))
         <= PageIndexBase(chunk, chunk->allocBase));

  /* check they don't overlap (knowing the order) */
  CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages))
         <= (Addr)chunk->pageTable);

  CHECKL(chunk->pageTable != NULL);
  CHECKL((Addr)chunk->pageTable >= chunk->base);
  CHECKL((Addr)&chunk->pageTable[chunk->pageTablePages]
         <= PageIndexBase(chunk, chunk->allocBase));
  CHECKL(NONNEGATIVE(INDEX_OF_ADDR(chunk, (Addr)chunk->pageTable)));
  /* check there's enough space in the page table */
  CHECKL(INDEX_OF_ADDR(chunk, AddrSub(chunk->limit, 1)) < chunk->pages);
  CHECKL(chunk->pageTablePages < chunk->pages);

  /* Could check the consistency of the tables, but not O(1). */
  return TRUE;
}
Example #3
0
Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved,
              BootBlock boot)
{
  Size size;
  Count pages;
  Shift pageShift;
  Size pageTableSize;
  Addr allocBase;
  void *p;
  Res res;

  /* chunk is supposed to be uninitialized, so don't check it. */
  AVERT(Arena, arena);
  AVER(base != NULL);
  AVER(AddrIsAligned(base, ArenaGrainSize(arena)));
  AVER(base < limit);
  AVER(AddrIsAligned(limit, ArenaGrainSize(arena)));
  AVERT(BootBlock, boot);

  chunk->serial = (arena->chunkSerial)++;
  chunk->arena = arena;
  RingInit(&chunk->arenaRing);

  chunk->pageSize = ArenaGrainSize(arena);
  chunk->pageShift = pageShift = SizeLog2(chunk->pageSize);
  chunk->base = base;
  chunk->limit = limit;
  chunk->reserved = reserved;
  size = ChunkSize(chunk);

  /* .overhead.pages: Chunk overhead for the page allocation table. */
  chunk->pages = pages = size >> pageShift;
  res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failAllocTable;
  chunk->allocTable = p;

  pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), chunk->pageSize);
  chunk->pageTablePages = pageTableSize >> pageShift;

  res = Method(Arena, arena, chunkInit)(chunk, boot);
  if (res != ResOK)
    goto failClassInit;

  /* @@@@ Is BootAllocated always right? */
  /* Last thing we BootAlloc'd is pageTable.  We requested pageSize */
  /* alignment, and pageTableSize is itself pageSize aligned, so */
  /* BootAllocated should also be pageSize aligned. */
  AVER(AddrIsAligned(BootAllocated(boot), chunk->pageSize));
  chunk->allocBase = (Index)(BootAllocated(boot) >> pageShift);

  /* Init allocTable after class init, because it might be mapped there. */
  BTResRange(chunk->allocTable, 0, pages);

  /* Check that there is some usable address space remaining in the chunk. */
  allocBase = PageIndexBase(chunk, chunk->allocBase);
  AVER(allocBase < chunk->limit);

  /* Add the chunk's free address space to the arena's freeLand, so that
     we can allocate from it. */
  if (arena->hasFreeLand) {
    res = ArenaFreeLandInsert(arena, allocBase, chunk->limit);
    if (res != ResOK)
      goto failLandInsert;
  }

  TreeInit(&chunk->chunkTree);

  chunk->sig = ChunkSig;
  AVERT(Chunk, chunk);

  ArenaChunkInsert(arena, chunk);

  return ResOK;

failLandInsert:
  Method(Arena, arena, chunkFinish)(chunk);
  /* .no-clean: No clean-ups needed past this point for boot, as we will
     discard the chunk. */
failClassInit:
failAllocTable:
  return res;
}
Example #4
0
EXPORT	void	set_pointer_queue_opts(
	int		opt,
			...)
{
	va_list		ap;
	int		i, size;
	char		*alloc_type;

	va_start(ap,opt);

	for (i = 0; (opt != 0) && (i < N_PQ_OPTS); opt = va_arg(ap,int), ++i)
	{
	    switch (opt)
	    {
	    case PQ_BLOCK_SIZE:
	    	size = va_arg(ap,int);
	    	n_pqs_in_blk = size;
	    	break;

	    case PQ_ALLOC_TYPE:
	    	alloc_type = va_arg(ap,char *);
	    	if (alloc_type == NULL)
	    	{
		    pq_alloc_type = DEFAULT_ALLOC;
		    break;
		}
		switch (alloc_type[0])
		{
		case 's':
		case 'S':
		    pq_alloc_type = USE_STORE_FOR_ALLOC;
		    break;
		case 'v':
		case 'V':
		    pq_alloc_type = USE_VMALLOC_FOR_ALLOC;
		    break;
		default:
		    pq_alloc_type = DEFAULT_ALLOC;
		    break;
		}
		break;

	    case PQ_ALLOC_SIZE_FOR_POINTERS:
	    	size = va_arg(ap,int);
	    	p2sz = size;
	    	break;

	    case PQ_DEFAULTS:
	    default:
	    	n_pqs_in_blk = DEFAULT_N_PQS_IN_BLK;
	    	pq_alloc_type = DEFAULT_ALLOC;
	    	p2sz = 0;
	    	break;
	    }
	}
	if (pq_alloc_type == USE_STORE_FOR_ALLOC)
	{
	    INTERFACE	*intfc = current_interface();
	    int		max_blk_len;

	    max_blk_len = (int)(ChunkSize(intfc) / max(p2sz,sizeof(PTR_LIST)));

	    if (n_pqs_in_blk > max_blk_len)
	    {
	    	screen("ERROR in set_pointer_queue_opts(), "
	    	       "block size too large for store\n"
	    	       "Maximum block size = %d\n",max_blk_len);
	    	clean_up(ERROR);
	    }
	}

	va_end(ap);
}		/*end set_pointer_queue_opts*/
Example #5
0
//-----------------------------------------------------------------------------
// Purpose: Read the entire chunk into a buffer
// Input  : *pOutput - dest buffer
// Output : int bytes read
//-----------------------------------------------------------------------------
int IterateRIFF::ChunkRead( void *pOutput )
{
	return m_riff.ReadData( pOutput, ChunkSize() );
}