Exemple #1
0
bool MaprCopyingOutputStream::Write(const void * buffer, int size){
  CHECK(output_->is_open_);

  // Starting with MapR V2.1.1, sometimes write fails and the hdfs connection
  // must be reset in order to try again.  Not sure why this transient error
  // happens with MapR V2.1.1 but not earlier versions.
  //TODO(heathkh): find source of this bug and remove need for retry!
  bool success = false;
  for (int i=0; i < 10; ++i){
    int bytes_written = hdfsWrite(output_->fs_, output_->file_, buffer, size);
    if (bytes_written == size){
      success = true;
      break;
    }
    else if (bytes_written > 0){
      // if we wrote less than requested... something weird happened... signal error and give up.
      break;
    }

    // if we failed to write anything, there may be a transient error with maprfs... worth trying again...

    //LOG(INFO) << "seek attempt failed: " << i;
    //LOG(INFO) << "path:" << path_ << "\n position: " << position << "\n length: " << length << "\n size: " << size_;  // success if returns 0
    CHECK_EQ(hdfsCloseFile(output_->fs_, output_->file_), 0);
    CHECK_EQ(hdfsDisconnect(output_->fs_), 0);
    std::string host = "default";
    output_->fs_ = hdfsConnect(host.c_str(), 0); // use default config file settings
    CHECK(output_->fs_) << "error connecting to maprfs";
    output_->file_ = hdfsOpenFile(output_->fs_, output_->path_.c_str(), O_WRONLY, 0, 0, 0);
    CHECK(output_->file_ != NULL);
    sleep(2*i);
  }

  return success;
}
Exemple #2
0
// 这个函数返回一个构造好的chunkid和每个chunk的指针
// 这个里面的chunkId肯定是要在blockManager注册然后汇报信息的
// put的话也是会这样的,可以将这个函数中调用put然后统一汇报信息的接口
ChunkInfo BlockManager::loadFromHdfs(string file_name){
	// 由此函数得到的<blockId,指针>
	ChunkInfo ci;
	string file_name_former,file_name_latter;
	unsigned pos=file_name.rfind("$");
	file_name_former=file_name.substr(0,pos);
	file_name_latter=file_name.substr(pos+1,file_name.length());
	int offset=atoi(file_name_latter.c_str());
	hdfsFS fs=hdfsConnect(Config::hdfs_master_ip.c_str(),Config::hdfs_master_port);
	hdfsFile readFile=hdfsOpenFile(fs,file_name_former.c_str(),O_RDONLY,0,0,0);
	hdfsFileInfo *hdfsfile=hdfsGetPathInfo(fs,file_name_former.c_str());
	if(!readFile){
		cout<<"open file error"<<endl;
	}
	unsigned length=0;
	length=length+CHUNK_SIZE*offset;
	if(length<hdfsfile->mSize){
		void *rt=malloc(CHUNK_SIZE);		//newmalloc
		tSize bytes_num=hdfsPread(fs,readFile,length,rt,CHUNK_SIZE);
		ostringstream chunkid;
		chunkid<<file_name.c_str()<<"$"<<offset;
//		ci.chunkId=chunkid.gestr().c_str();
		ci.hook=rt;
	}else{
		ostringstream chunkid;
		chunkid<<file_name.c_str()<<"$"<<offset;
//		ci.chunkId=chunkid.str().c_str();
		ci.hook=0;
	}
	hdfsCloseFile(fs,readFile);
	hdfsDisconnect(fs);
	return ci;
}
Exemple #3
0
int BlockManager::loadFromHdfs(const ChunkID& chunk_id, void* const &desc,const unsigned & length){
	lock.acquire();
	int ret;
	int offset=chunk_id.chunk_off;
	hdfsFS fs=hdfsConnect(Config::hdfs_master_ip.c_str(),Config::hdfs_master_port);
	hdfsFile readFile=hdfsOpenFile(fs,chunk_id.partition_id.getPathAndName().c_str(),O_RDONLY,0,0,0);
	hdfsFileInfo *hdfsfile=hdfsGetPathInfo(fs,chunk_id.partition_id.getPathAndName().c_str());// to be refined after communicating with Zhang Lei
	if(!readFile){
		logging_->elog("Fail to open file [%s].Reason:%s",chunk_id.partition_id.getPathAndName().c_str(),strerror(errno));
		hdfsDisconnect(fs);
		lock.release();
		return -1;
	}
	else{
		logging_->log("file [%s] is opened for offset[%d]\n",chunk_id.partition_id.getPathAndName().c_str(),offset);
	}
	 long int start_pos=CHUNK_SIZE*offset;
	if(start_pos<hdfsfile->mSize){
		ret=hdfsPread(fs,readFile,start_pos,desc,length);
	}else{
		lock.release();
		ret= -1;
	}
	hdfsCloseFile(fs,readFile);
	hdfsDisconnect(fs);
	lock.release();
	return ret;
}
tSize HDFSReadWrite::HDFSWrite(std::string fileName, const char* buffer,
		tSize len) {
	fileName = "/" + fileName;
	int flag = O_WRONLY | O_CREAT;
	if (hdfsExists(::HdfsConnectionPool::hdfs(), fileName.c_str()) == 0) {
		flag = O_WRONLY | O_APPEND;
	}
	hdfsFile writeFile = hdfsOpenFile(::HdfsConnectionPool::hdfs(),
			fileName.c_str(), flag, 0, 0, 0);

	if (writeFile != NULL) {

		tSize bytesWriten = hdfsWrite(::HdfsConnectionPool::hdfs(), writeFile,
				buffer, len);
		;

		if (hdfsFlush(::HdfsConnectionPool::hdfs(), writeFile) == -1) {
			return -1;
		}

		hdfsCloseFile(::HdfsConnectionPool::hdfs(), writeFile);
		return bytesWriten;
	} else {
		DEBUG("hdfs open file failed!");
	}
	return -1;
}
Exemple #5
0
HdfsFile::HdfsFile(const char* fname, const char* mode, unsigned /* opts */) :
	IDBDataFile( fname ),
	m_file(0),
	m_fs(0)
{
	int savedErrno;

	m_flags = modeStrToFlags(mode);
	if( m_flags == -1 )
	{
		ostringstream oss;
		oss << "Error opening file " << fname << " - unsupported mode " << mode;
		throw std::runtime_error(oss.str());
	}

	m_fs = HdfsFsCache::fs();

	// @bug5476, HDFS do not support O_CREAT|O_APPEND as of 2.0
	// special handle for O_APPEND
	if ((m_flags & O_APPEND) && (hdfsExists(m_fs, fname) != 0))
		m_flags &= ~O_APPEND;

	m_file = hdfsOpenFile(m_fs, fname, m_flags, 0, 0, 0);
	savedErrno = errno;

	if(!m_file)
	{
		ostringstream oss;
		oss << "Error opening file " << fname << ": " << strerror(savedErrno);
		throw std::runtime_error(oss.str());
	}
}
Exemple #6
0
int main(int argc, char **argv) {

	hdfsFS fs = hdfsConnect("default", 0);
	const char* writePath = "/test2/2.txt";
	hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY |O_CREAT, 0, 0, 0);
	if(!writeFile) {
		fprintf(stderr, "Failed to open %s for writing!\n", writePath);
		return -1;
	}
	/*
	//try open again, ERR msg: No lease on /test2/2.txt
	hdfsFile writeFile2 = hdfsOpenFile(fs, writePath, O_WRONLY |O_CREAT, 0, 0, 0);
	if(!writeFile2) {
		fprintf(stderr, "Failed to open %s for writing!\n", writePath);
		return -1;
	}
	*/
	const char* buffer = "Hello, World!";
	tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
	if (hdfsFlush(fs, writeFile)) {
		fprintf(stderr, "Failed to 'flush' %s\n", writePath);
		return -1;
	}
	hdfsCloseFile(fs, writeFile);
	return 0;
}
Exemple #7
0
HDFSChunkReaderIterator::HDFSChunkReaderIterator(const ChunkID& chunk_id, unsigned& chunk_size,const unsigned& block_size)
:ChunkReaderIterator(chunk_id,block_size,chunk_size){
	block_buffer_=new Block(block_size_);
	fs_=hdfsConnect(Config::hdfs_master_ip.c_str(),Config::hdfs_master_port);
	hdfs_fd_=hdfsOpenFile(fs_,chunk_id.partition_id.getName().c_str(),O_RDONLY,0,0,0);
	if(!hdfs_fd_){
		printf("fails to open HDFS file [%s]\n",chunk_id.partition_id.getName().c_str());
		number_of_blocks_=0;
	}


	const unsigned start_pos=start_pos+CHUNK_SIZE*chunk_id_.chunk_off;
	if(hdfsSeek(fs_,hdfs_fd_,start_pos)==-1){
		printf("fails to set the start offset %d for [%s]\n",start_pos,chunk_id.partition_id.getName().c_str());
		number_of_blocks_=0;
	}
	hdfsFileInfo *file_info=hdfsGetPathInfo(fs_,"/imdb/");// to be refined after communicating with Zhang Lei
	if(start_pos+CHUNK_SIZE<file_info->mSize){
		number_of_blocks_=CHUNK_SIZE/block_size_;
	}
	else{
		number_of_blocks_=(file_info->mSize-start_pos)/block_size_;
	}
	hdfsFreeFileInfo(file_info,1);

}
Exemple #8
0
ReadStreamPtr Hdfs3OpenReadStream(
    const std::string& _path, const common::Range& range) {

    std::string path = _path;
    // crop off hdfs://
    die_unless(common::StartsWith(path, "hdfs://"));
    path = path.substr(7);

    // split uri into host/path
    std::vector<std::string> splitted = common::Split(path, '/', 2);
    die_unless(splitted.size() == 2);

    // prepend root /
    splitted[1] = "/" + splitted[1];

    hdfsFS fs = Hdfs3FindConnection(splitted[0]);

    // construct file handler
    hdfsFile file = hdfsOpenFile(
        fs, splitted[1].c_str(), O_RDONLY, /* bufferSize */ 0,
        /* replication */ 0, /* blocksize */ 0);
    if (!file)
        die("Could not open HDFS file \"" << _path << "\": " << hdfsGetLastError());

    return tlx::make_counting<Hdfs3ReadStream>(
        fs, file, /* start_byte */ range.begin, /* byte_count */ range.size());
}
Exemple #9
0
bool MaprInputCodedBlockFile::Open(std::string uri) {
  CHECK(!is_open_) << "File already open.";
  if (!IsValidUri(uri, "maprfs")) {
    LOG(ERROR) << "failed to validate uri: " << uri;
    return false;
  }
  std::string scheme, path;
  CHECK(ParseUri(uri, &scheme, &path)) << "Invalid uri format: " << uri;

  file_ = hdfsOpenFile(fs_, path.c_str(), O_RDONLY, 0, 0, 0);
  if (file_ == NULL) {
    LOG(ERROR) << "Failed to open file: " << path;
    return false;
  }
  is_open_ = true;
  path_ = path;

  // Cache file size
  hdfsFileInfo* info = hdfsGetPathInfo(fs_, path_.c_str());
  CHECK(info);
  size_ = info->mSize;
  hdfsFreeFileInfo(info, 1);

  return true;
}
Exemple #10
0
static struct libhdfs_data *libhdfs_data_create(const struct options *opts)
{
    struct libhdfs_data *ldata = NULL;
    struct hdfsBuilder *builder = NULL;
    hdfsFileInfo *pinfo = NULL;

    ldata = calloc(1, sizeof(struct libhdfs_data));
    if (!ldata) {
        fprintf(stderr, "Failed to allocate libhdfs test data.\n");
        goto error;
    }
    builder = hdfsNewBuilder();
    if (!builder) {
        fprintf(stderr, "Failed to create builder.\n");
        goto error;
    }
    hdfsBuilderSetNameNode(builder, opts->rpc_address);
    hdfsBuilderConfSetStr(builder,
        "dfs.client.read.shortcircuit.skip.checksum", "true");
    ldata->fs = hdfsBuilderConnect(builder);
    if (!ldata->fs) {
        fprintf(stderr, "Could not connect to default namenode!\n");
        goto error;
    }
    pinfo = hdfsGetPathInfo(ldata->fs, opts->path);
    if (!pinfo) {
        int err = errno;
        fprintf(stderr, "hdfsGetPathInfo(%s) failed: error %d (%s).  "
                "Attempting to re-create file.\n",
            opts->path, err, strerror(err));
        if (libhdfs_data_create_file(ldata, opts))
            goto error;
    } else if (pinfo->mSize != opts->length) {
        fprintf(stderr, "hdfsGetPathInfo(%s) failed: length was %lld, "
                "but we want length %lld.  Attempting to re-create file.\n",
                opts->path, (long long)pinfo->mSize, (long long)opts->length);
        if (libhdfs_data_create_file(ldata, opts))
            goto error;
    }
    ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_RDONLY, 0, 0, 0);
    if (!ldata->file) {
        int err = errno;
        fprintf(stderr, "hdfsOpenFile(%s) failed: error %d (%s)\n",
            opts->path, err, strerror(err));
        goto error;
    }
    ldata->length = opts->length;
    return ldata;

error:
    if (pinfo)
        hdfsFreeFileInfo(pinfo, 1);
    if (ldata)
        libhdfs_data_free(ldata);
    return NULL;
}
Exemple #11
0
bool HdfsConnector::assgin_open_file(open_flag open_flag_){

	vector<vector<string> >::iterator prj_writepath;
	vector<string>::iterator par_writepath;

	for (prj_writepath = writepath.begin(); prj_writepath != writepath.end(); prj_writepath++)
	{
		vector<hdfsFile> prj_writefile;
		prj_writefile.clear();
		for (par_writepath = (*prj_writepath).begin(); par_writepath != (*prj_writepath).end(); par_writepath++)
		{
			switch (open_flag_)
			{
			case CREATEE:
			{
				if (hdfsExists(fs, (*par_writepath).c_str()) == 0)
					cout << "[WARNINIG: Hdfsconnector.cpp->assgin_open_file()]: The file " << *par_writepath << " is already exits! It will be override!\n";
				prj_writefile.push_back(hdfsOpenFile(fs, (*par_writepath).c_str(), O_WRONLY|O_CREAT, 0, 0, 0));
				break;
			}
			case APPENDD:
			{
				if (hdfsExists(fs, (*par_writepath).c_str()) == -1)
				{
					prj_writefile.push_back(hdfsOpenFile(fs, (*par_writepath).c_str(), O_WRONLY|O_CREAT, 0, 0, 0));
					break;
//					cout << "[ERROR: Hdfsconnector.cpp->assgin_open_file()]: The file " << *par_writepath << "is not exits!\n";
//					return false;
				}
				prj_writefile.push_back(hdfsOpenFile(fs, (*par_writepath).c_str(), O_WRONLY|O_APPEND, 0, 0, 0));
				break;
			}
			default:
			{
				cout << "[ERROR: Hdfsconnector.cpp->assgin_open_file()]: Illegal file open flag for data loading!\n";
				return false;
			}
			}
		}
		file_handles_.push_back(prj_writefile);
	}
	return true;
}
int main(int argc, char **argv) {

    const char* rfile;
    tSize fileTotalSize, bufferSize, curSize, totalReadSize;
    hdfsFS fs;
    hdfsFile readFile;
    char *buffer = NULL;
    
    if (argc != 4) {
        fprintf(stderr, "Usage: test_libwebhdfs_read"
                " <filename> <filesize> <buffersize>\n");
        exit(1);
    }
    
    fs = hdfsConnect("localhost", 50070);
    if (!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(1);
    }
    
    rfile = argv[1];
    fileTotalSize = strtoul(argv[2], NULL, 10);
    bufferSize = strtoul(argv[3], NULL, 10);
    
    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
    if (!readFile) {
        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
        exit(1);
    }
    
    // data to be written to the file
    buffer = malloc(sizeof(char) * bufferSize);
    if(buffer == NULL) {
        fprintf(stderr, "Failed to allocate buffer.\n");
        exit(1);
    }
    
    // read from the file
    curSize = bufferSize;
    totalReadSize = 0;
    for (; (curSize = hdfsRead(fs, readFile, buffer, bufferSize)) == bufferSize; ) {
        totalReadSize += curSize;
    }
    totalReadSize += curSize;
    
    fprintf(stderr, "size of the file: %d; reading size: %d\n",
            fileTotalSize, totalReadSize);
    
    free(buffer);
    hdfsCloseFile(fs, readFile);
    hdfsDisconnect(fs);
    
    return 0;
}
Exemple #13
0
int main(int argc, char **argv) {

    if (argc != 3) {
        fprintf(stderr, "Usage: hdfs_read <filename> <buffersize>\n");
        exit(-1);
    }
    
    hdfsFS fs = hdfsConnect("default", 0);
    if (!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-2);
    } 
 
    const char* rfile = argv[1];
    tSize bufferSize = strtoul(argv[2], NULL, 10);

   
    hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
    if (!readFile) {
        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
        exit(-3);
    }

	FILE *outf = fopen(rfile, "wb");
	if (outf == NULL) {
		printf("FILEIO error %d\n", errno);
		exit(-4);
	}

    // data to be written to the file
    char* buffer = malloc(sizeof(char) * bufferSize);
    if(buffer == NULL) {
        return -5;
    }
    
	//printf("buffersize is %d\n", bufferSize);
    // read from the file
    tSize curSize = bufferSize;
    for (; curSize == bufferSize;) {
		//printf("cursize before is %d\n", curSize);
        curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
		//printf("cursize is %d, errno is %d\n", curSize, errno);
		fwrite((void *)buffer, sizeof(char), curSize, outf);
		//printf("%.*s", bufferSize, buffer);
    }

	fclose(outf);
    free(buffer);
    hdfsCloseFile(fs, readFile);
    hdfsDisconnect(fs);

    return 0;
}
Exemple #14
0
 hdfs_device(const hdfs& hdfs_fs, const std::string& filename,
             const bool write = false) :
   filesystem(hdfs_fs.filesystem) {
   ASSERT_TRUE(filesystem != NULL);
   // open the file
   const int flags = write? O_WRONLY : O_RDONLY;
   const int buffer_size = 0; // use default
   const short replication = 0; // use default
   const tSize block_size = 0; // use default;
   file = hdfsOpenFile(filesystem, filename.c_str(), flags, buffer_size,
                       replication, block_size);
 }
    ReadBufferFromHDFSImpl(const std::string & hdfs_name_)
        : hdfs_uri(hdfs_name_)
        , builder(createHDFSBuilder(hdfs_uri))
        , fs(createHDFSFS(builder.get()))
    {

        auto & path = hdfs_uri.getPath();
        fin = hdfsOpenFile(fs.get(), path.c_str(), O_RDONLY, 0, 0, 0);

        if (fin == nullptr)
            throw Exception("Unable to open HDFS file: " + path + " error: " + std::string(hdfsGetLastError()),
                ErrorCodes::CANNOT_OPEN_FILE);
    }
/**
 * For now implement truncate here and only for size == 0.
 * Weak implementation in that we just delete the file and 
 * then re-create it, but don't set the user, group, and times to the old
 * file's metadata. 
 */
int dfs_truncate(const char *path, off_t size)
{
  struct hdfsConn *conn = NULL;
  hdfsFS fs;
  dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;

  TRACE1("truncate", path)

  assert(path);
  assert('/' == *path);
  assert(dfs);

  if (size != 0) {
    return 0;
  }

  int ret = dfs_unlink(path);
  if (ret != 0) {
    return ret;
  }

  ret = fuseConnectAsThreadUid(&conn);
  if (ret) {
    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
            "connection!  error %d.\n", ret);
    ret = -EIO;
    goto cleanup;
  }
  fs = hdfsConnGetFs(conn);

  int flags = O_WRONLY | O_CREAT;

  hdfsFile file;
  if ((file = (hdfsFile)hdfsOpenFile(fs, path, flags,  0, 0, 0)) == NULL) {
    ERROR("Could not connect open file %s", path);
    ret = -EIO;
    goto cleanup;
  }

  if (hdfsCloseFile(fs, file) != 0) {
    ERROR("Could not close file %s", path);
    ret = -EIO;
    goto cleanup;
  }

cleanup:
  if (conn) {
    hdfsConnRelease(conn);
  }
  return ret;
}
Exemple #17
0
int HDFSFileSplitter::read_block(const std::string& fn) {
    file_ = hdfsOpenFile(fs_, fn.c_str(), O_RDONLY, 0, 0, 0);
    assert(file_ != NULL);
    hdfsSeek(fs_, file_, offset_);
    size_t start = 0;
    size_t nbytes = 0;
    while (start < hdfs_block_size) {
        // only 128KB per hdfsRead
        nbytes = hdfsRead(fs_, file_, data_ + start, hdfs_block_size);
        start += nbytes;
        if (nbytes == 0)
            break;
    }
    return start;
}
Exemple #18
0
static bs_file_t  __hlfs_file_open(struct back_storage *storage,const char*path,int flags){
	//HLOG_DEBUG("hdfs -- enter func %s", __func__);
    char full_path[256];
    build_hdfs_path(full_path,storage->dir,storage->fs_name,path);
    //HLOG_DEBUG("hdfs full path %s",full_path);
    hdfsFile file = NULL;
    file =  hdfsOpenFile((hdfsFS)storage->fs_handler,full_path,flags, 0, 0, 0);
    if(NULL == file){
	   //HLOG_DEBUG("hdfsOpenFile error");
       return NULL;
    }
    //HLOG_DEBUG("hdfs file:%p",file);
	//HLOG_DEBUG("hdfs -- leave func %s", __func__);
    return (bs_file_t)file;
}
Exemple #19
0
int HdfsFile::reopen()
{
	// if we are trying to repoen, something has already happened
	// (case we know of is a stale read handle after file rewrite)
	// so don't check the return value of close()
	close();

	m_file = hdfsOpenFile(m_fs, m_fname.c_str(), m_flags, 0, 0, 0);
	int savedErrno = errno;

	if( IDBLogger::isEnabled() )
		IDBLogger::logNoArg(m_fname, this, "reopen", m_file != NULL);

	errno = savedErrno;
	return (m_file != NULL ? 0 : -1);
}
Exemple #20
0
int main(int argc, char **argv) {

    hdfsFS fs;
    char* rfile;
    int bufferSize;
    hdfsFile readFile;
    char* buffer;
    int curSize;
    
    if (argc != 4) {
        fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
        exit(-1);
    }
    
    fs = hdfsConnect("default", 0);
    if (!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-1);
    } 
 
    rfile = argv[1];
    bufferSize = strtoul(argv[3], NULL, 10);
   
    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
    if (!readFile) {
        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
        exit(-2);
    }

    /* data to be written to the file */
    buffer = malloc(sizeof(char) * bufferSize);
    if(buffer == NULL) {
        return -2;
    }
    
    /* read from the file */
    curSize = bufferSize;
    for (; curSize == bufferSize;) {
        curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
    }
    
    free(buffer);
    hdfsCloseFile(fs, readFile);
    hdfsDisconnect(fs);

    return 0;
}
/**
 * For now implement truncate here and only for size == 0.
 * Weak implementation in that we just delete the file and 
 * then re-create it, but don't set the user, group, and times to the old
 * file's metadata. 
 */
int dfs_truncate(const char *path, off_t size)
{
  TRACE1("truncate", path)

  dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;

  assert(path);
  assert('/' == *path);
  assert(dfs);

  if (size != 0) {
    return -ENOTSUP;
  }

  int ret = dfs_unlink(path);
  if (ret != 0) {
    return ret;
  }

  hdfsFS userFS = doConnectAsUser(dfs->nn_hostname, dfs->nn_port);
  if (userFS == NULL) {
    ERROR("Could not connect");
    ret = -EIO;
    goto cleanup;
  }

  int flags = O_WRONLY | O_CREAT;

  hdfsFile file;
  if ((file = (hdfsFile)hdfsOpenFile(userFS, path, flags,  0, 0, 0)) == NULL) {
    ERROR("Could not connect open file %s", path);
    ret = -EIO;
    goto cleanup;
  }

  if (hdfsCloseFile(userFS, file) != 0) {
    ERROR("Could not close file %s", path);
    ret = -EIO;
    goto cleanup;
  }

cleanup:
  if (doDisconnect(userFS)) {
    ret = -EIO;
  }
  return ret;
}
Exemple #22
0
int main(int argc, char **argv) {

    hdfsFS fs = hdfsConnect("default", 0);
    const char* writePath = "/tmp/testfile.txt";
    hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
    if(!writeFile) {
          fprintf(stderr, "Failed to open %s for writing!\n", writePath);
          exit(-1);
    }
    char* buffer = "Hello, World!";
    tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
    if (hdfsFlush(fs, writeFile)) {
           fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
          exit(-1);
    }
   hdfsCloseFile(fs, writeFile);
}
Exemple #23
0
void ClaimsHDFS::claimsRead(){
	hdfsFS fs;
	hdfsFile fd;
	string filename="/home/casa/data/kmeans_data.txt";
	fs=hdfsConnect("10.11.1.174",9000);
	fd=hdfsOpenFile(fs,filename.c_str(),O_RDONLY,0,0,0);
	if(!fd){
		cout<<"failed to open hdfs file!!!"<<endl;
	}

	char array[72];
    tSize bytes=hdfsRead(fs,fd,array,72);
	cout<<"string is: "<<array<<endl;

	hdfsCloseFile(fs,fd);
	hdfsDisconnect(fs);
}
Exemple #24
0
static int libhdfs_data_create_file(struct libhdfs_data *ldata,
                                    const struct options *opts)
{
    int ret;
    double *chunk = NULL;
    long long offset = 0;

    ldata->file = hdfsOpenFile(ldata->fs, opts->path, O_WRONLY, 0, 1, 0);
    if (!ldata->file) {
        ret = errno;
        fprintf(stderr, "libhdfs_data_create_file: hdfsOpenFile(%s, "
            "O_WRONLY) failed: error %d (%s)\n", opts->path, ret,
            strerror(ret));
        goto done;
    }
    ret = test_file_chunk_setup(&chunk);
    if (ret)
        goto done;
    while (offset < opts->length) {
        ret = hdfsWrite(ldata->fs, ldata->file, chunk, VECSUM_CHUNK_SIZE);
        if (ret < 0) {
            ret = errno;
            fprintf(stderr, "libhdfs_data_create_file: got error %d (%s) at "
                    "offset %lld of %s\n", ret, strerror(ret),
                    offset, opts->path);
            goto done;
        } else if (ret < VECSUM_CHUNK_SIZE) {
            fprintf(stderr, "libhdfs_data_create_file: got short write "
                    "of %d at offset %lld of %s\n", ret, offset, opts->path);
            goto done;
        }
        offset += VECSUM_CHUNK_SIZE;
    }
    ret = 0;
done:
    free(chunk);
    if (ldata->file) {
        if (hdfsCloseFile(ldata->fs, ldata->file)) {
            fprintf(stderr, "libhdfs_data_create_file: hdfsCloseFile error.");
            ret = EIO;
        }
        ldata->file = NULL;
    }
    return ret;
}
Exemple #25
0
int libhdfsconnector::streamInFile(const char * rfile, int bufferSize)
{
    if (!fs)
    {
        fprintf(stderr, "Could not connect to hdfs on");
        return RETURN_FAILURE;
    }

    unsigned long fileTotalSize = 0;

    hdfsFileInfo *fileInfo = NULL;
    if ((fileInfo = hdfsGetPathInfo(fs, rfile)) != NULL)
    {
        fileTotalSize = fileInfo->mSize;
        hdfsFreeFileInfo(fileInfo, 1);
    }
    else
    {
        fprintf(stderr, "Error: hdfsGetPathInfo for %s - FAILED!\n", rfile);
        return RETURN_FAILURE;
    }

    hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
    if (!readFile)
    {
        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
        return RETURN_FAILURE;
    }

    unsigned char buff[bufferSize + 1];
    buff[bufferSize] = '\0';

    for (unsigned long bytes_read = 0; bytes_read < fileTotalSize;)
    {
        unsigned long read_length = hdfsRead(fs, readFile, buff, bufferSize);
        bytes_read += read_length;
        for (unsigned long i = 0; i < read_length; i++)
            fprintf(stdout, "%c", buff[i]);
    }

    hdfsCloseFile(fs, readFile);

    return 0;
}
Exemple #26
0
bool MaprOutputCodedBlockFile::Open(std::string uri, short replication, uint64 chunk_size) {
  CHECK_GE(replication, 0);
  CHECK_LE(replication, 6);
  CHECK_GE(chunk_size, 0);

  CHECK(!is_open_);
  if (!IsValidUri(uri, "maprfs")) {
    return false;
  }

  std::string scheme, path;
  CHECK(ParseUri(uri, &scheme, &path)) << "Invalid uri format: " << uri;

  path_ = path;

  // note a chunk_size of zero to hdfs means use hdfs's default... however, we want to use maprfs's default... which should be based on the settings of the parent directory... if it exists
  string parent_path = fs::path(path).remove_filename().string();;
  if (chunk_size == 0){
    string parent_uri = Uri(scheme, parent_path);
    while (!Exists(parent_uri)){
      parent_path = fs::path(parent_path).remove_filename().string();
      parent_uri = Uri(scheme, parent_path);
      LOG(INFO) << "parent_uri: " << parent_uri;
    }
    CHECK(ChunkSize(parent_uri, &chunk_size));
  }

  CHECK_EQ(chunk_size % (1 << 16), 0) << "MaprFS requires chunk size is a multiple of 2^16";
  CHECK_LE(chunk_size, 1024 * (1<<20)) << "hdfs.h uses a signed 32 int which artificially limits the chunk size to 1GB... maprfs can do more, but not through the c api... ;-(";

  file_ = hdfsOpenFile(fs_, path.c_str(), O_WRONLY, 0, replication, chunk_size);
  if (file_ == NULL){
    LOG(ERROR) << "Failed to open file: " << path;
    return false;
  }

  copying_output_stream_.reset(new MaprCopyingOutputStream(this));
  output_stream_.reset(new google::protobuf::io::CopyingOutputStreamAdaptor(copying_output_stream_.get()));

  is_open_ = true;
  uri_ = uri;
  return true;
}
Exemple #27
0
qioerr hdfs_open(void** fd, const char* path, int* flags, mode_t mode, qio_hint_t iohints, void* fs)
{
  qioerr err_out = 0;
  int rc;
  hdfs_file* fl = (hdfs_file*)qio_calloc(sizeof(hdfs_file), 1);

  STARTING_SLOW_SYSCALL;
  DO_RETAIN(((hdfs_fs*)fs));

  // assert that we connected
  CREATE_ERROR((to_hdfs_fs(fs)->hfs == NULL), err_out, ECONNREFUSED,"Unable to open HDFS file", error);

  fl->file =  hdfsOpenFile(to_hdfs_fs(fs)->hfs, path, *flags, 0, 0, 0);

  // Assert that we opened the file
  if (fl->file == NULL) {
    err_out = qio_mkerror_errno();
    goto error;
  }

  DONE_SLOW_SYSCALL;

  fl->pathnm = path;

  rc = *flags | ~O_ACCMODE;
  rc &= O_ACCMODE;
  if( rc == O_RDONLY ) {
    *flags |= QIO_FDFLAG_READABLE;
  } else if( rc == O_WRONLY ) {
    *flags |= QIO_FDFLAG_WRITEABLE;
  } else if( rc == O_RDWR ) {
    *flags |= QIO_FDFLAG_READABLE;
    *flags |= QIO_FDFLAG_WRITEABLE;
  }

  *fd = fl; // Set fd to fl and return
  return err_out;

error:
  qio_free(fl);
  return err_out;
}
Exemple #28
0
bool FileManagerHdfs::readBlockOrBlob(const block_id block,
                                      void *buffer,
                                      const size_t length) {
  DEBUG_ASSERT(buffer);
  DEBUG_ASSERT(length % kSlotSizeBytes == 0);

  string filename(blockFilename(block));

  hdfsFile file_handle = hdfsOpenFile(hdfs_,
                                      filename.c_str(),
                                      O_RDONLY,
                                      kSlotSizeBytes,
                                      FLAGS_hdfs_num_replications,
                                      kSlotSizeBytes);
  if (file_handle == nullptr) {
    LOG_WARNING("Failed to open file " << filename << " with error: " << strerror(errno));
    return false;
  }

  size_t bytes_total = 0;
  while (bytes_total < length) {
    tSize bytes = hdfsRead(hdfs_, file_handle, static_cast<char*>(buffer) + bytes_total, length - bytes_total);
    if (bytes > 0) {
      bytes_total += bytes;
    } else if (bytes == -1) {
      if (errno != EINTR) {
        LOG_WARNING("Failed to read file " << filename << " with error: " << strerror(errno));
        break;
      }
    } else {
      LOG_WARNING("Failed to read file " << filename << " since EOF was reached unexpectedly");
      break;
    }
  }

  if (hdfsCloseFile(hdfs_, file_handle) != 0) {
    LOG_WARNING("Failed to close file " << filename << " with error: " << strerror(errno));
  }

  return (bytes_total == length);
}
static int createZeroCopyTestFile(hdfsFS fs, char *testFileName,
                                  size_t testFileNameLen)
{
    int blockIdx, blockLen;
    hdfsFile file;
    uint8_t *data;

    snprintf(testFileName, testFileNameLen, "/zeroCopyTestFile.%d.%d",
             getpid(), rand());
    file = hdfsOpenFile(fs, testFileName, O_WRONLY, 0, 1,
                        TEST_ZEROCOPY_FULL_BLOCK_SIZE);
    EXPECT_NONNULL(file);
    for (blockIdx = 0; blockIdx < TEST_ZEROCOPY_NUM_BLOCKS; blockIdx++) {
        blockLen = getZeroCopyBlockLen(blockIdx);
        data = getZeroCopyBlockData(blockIdx);
        EXPECT_NONNULL(data);
        EXPECT_INT_EQ(blockLen, hdfsWrite(fs, file, data, blockLen));
    }
    EXPECT_ZERO(hdfsCloseFile(fs, file));
    return 0;
}
Exemple #30
0
bool FileManagerHdfs::writeBlockOrBlob(const block_id block,
                                       const void *buffer,
                                       const size_t length) {
  DEBUG_ASSERT(buffer);
  DEBUG_ASSERT(length % kSlotSizeBytes == 0);

  string filename(blockFilename(block));

  hdfsFile file_handle = hdfsOpenFile(hdfs_,
                                      filename.c_str(),
                                      O_WRONLY,
                                      kSlotSizeBytes,
                                      FLAGS_hdfs_num_replications,
                                      kSlotSizeBytes);
  if (file_handle == nullptr) {
    LOG_WARNING("Failed to open file " << filename << " with error: " << strerror(errno));
    return false;
  }

  size_t bytes_total = 0;
  while (bytes_total < length) {
    tSize bytes = hdfsWrite(hdfs_, file_handle, static_cast<const char*>(buffer) + bytes_total, length - bytes_total);
    if (bytes > 0) {
      bytes_total += bytes;
    } else if (bytes == -1) {
      LOG_WARNING("Failed to write file " << filename << " with error: " << strerror(errno));
      break;
    }
  }

  if (hdfsSync(hdfs_, file_handle) != 0) {
    LOG_WARNING("Failed to sync file " << filename << " with error: " << strerror(errno));
  }

  if (hdfsCloseFile(hdfs_, file_handle) != 0) {
    LOG_WARNING("Failed to close file " << filename << " with error: " << strerror(errno));
  }

  return (bytes_total == length);
}