コード例 #1
0
ファイル: BlockManager.cpp プロジェクト: Jackson1992/CLAIMS
int BlockManager::loadFromHdfs(const ChunkID& chunk_id, void* const &desc,const unsigned & length){
	lock.acquire();
	int ret;
	int offset=chunk_id.chunk_off;
	hdfsFS fs=hdfsConnect(Config::hdfs_master_ip.c_str(),Config::hdfs_master_port);
	hdfsFile readFile=hdfsOpenFile(fs,chunk_id.partition_id.getPathAndName().c_str(),O_RDONLY,0,0,0);
	hdfsFileInfo *hdfsfile=hdfsGetPathInfo(fs,chunk_id.partition_id.getPathAndName().c_str());// to be refined after communicating with Zhang Lei
	if(!readFile){
		logging_->elog("Fail to open file [%s].Reason:%s",chunk_id.partition_id.getPathAndName().c_str(),strerror(errno));
		hdfsDisconnect(fs);
		lock.release();
		return -1;
	}
	else{
		logging_->log("file [%s] is opened for offset[%d]\n",chunk_id.partition_id.getPathAndName().c_str(),offset);
	}
	 long int start_pos=CHUNK_SIZE*offset;
	if(start_pos<hdfsfile->mSize){
		ret=hdfsPread(fs,readFile,start_pos,desc,length);
	}else{
		lock.release();
		ret= -1;
	}
	hdfsCloseFile(fs,readFile);
	hdfsDisconnect(fs);
	lock.release();
	return ret;
}
コード例 #2
0
ファイル: hdfs_storage.c プロジェクト: Aruiwen/cloudxy
int hdfs_disconnect(struct back_storage *storage){
	//HLOG_DEBUG("hdfs -- enter func %s", __func__);
	hdfsFS fs = (hdfsFS)storage->fs_handler;
	hdfsDisconnect(fs);
	//HLOG_DEBUG("hdfs -- leave func %s", __func__);
	return 0;
}
コード例 #3
0
NABoolean HHDFSTableStats::connectHDFS(const NAString &host, Int32 port)
{
  NABoolean result = TRUE;

  // establish connection to HDFS if needed
  if (fs_ == NULL ||
      currHdfsHost_ != host ||
      currHdfsPort_ != port)
    {
      if (fs_)
        {
          hdfsDisconnect(fs_);
          fs_ = NULL;
        }
      fs_ = hdfsConnect(host, port);
      
      if (fs_ == NULL)
        {
          CMPASSERT(fs_);
          // TBD:DIAGS
          result = FALSE;
        }
      currHdfsHost_ = host;
      currHdfsPort_ = port;
    }
  return result;
}
コード例 #4
0
ファイル: fs_mapr.cpp プロジェクト: Minione/iwct
bool MaprCopyingOutputStream::Write(const void * buffer, int size){
  CHECK(output_->is_open_);

  // Starting with MapR V2.1.1, sometimes write fails and the hdfs connection
  // must be reset in order to try again.  Not sure why this transient error
  // happens with MapR V2.1.1 but not earlier versions.
  //TODO(heathkh): find source of this bug and remove need for retry!
  bool success = false;
  for (int i=0; i < 10; ++i){
    int bytes_written = hdfsWrite(output_->fs_, output_->file_, buffer, size);
    if (bytes_written == size){
      success = true;
      break;
    }
    else if (bytes_written > 0){
      // if we wrote less than requested... something weird happened... signal error and give up.
      break;
    }

    // if we failed to write anything, there may be a transient error with maprfs... worth trying again...

    //LOG(INFO) << "seek attempt failed: " << i;
    //LOG(INFO) << "path:" << path_ << "\n position: " << position << "\n length: " << length << "\n size: " << size_;  // success if returns 0
    CHECK_EQ(hdfsCloseFile(output_->fs_, output_->file_), 0);
    CHECK_EQ(hdfsDisconnect(output_->fs_), 0);
    std::string host = "default";
    output_->fs_ = hdfsConnect(host.c_str(), 0); // use default config file settings
    CHECK(output_->fs_) << "error connecting to maprfs";
    output_->file_ = hdfsOpenFile(output_->fs_, output_->path_.c_str(), O_WRONLY, 0, 0, 0);
    CHECK(output_->file_ != NULL);
    sleep(2*i);
  }

  return success;
}
コード例 #5
0
ファイル: hdfs3_file.cpp プロジェクト: bingmann/thrill
void Hdfs3Deinitialize() {
    std::unique_lock<std::mutex> lock(s_hdfs_mutex);
    for (auto& hdfs : s_hdfs_map) {
        hdfsDisconnect(hdfs.second);
    }
    s_hdfs_map.clear();
}
コード例 #6
0
ファイル: BlockManager.cpp プロジェクト: Jackson1992/CLAIMS
// 这个函数返回一个构造好的chunkid和每个chunk的指针
// 这个里面的chunkId肯定是要在blockManager注册然后汇报信息的
// put的话也是会这样的,可以将这个函数中调用put然后统一汇报信息的接口
ChunkInfo BlockManager::loadFromHdfs(string file_name){
	// 由此函数得到的<blockId,指针>
	ChunkInfo ci;
	string file_name_former,file_name_latter;
	unsigned pos=file_name.rfind("$");
	file_name_former=file_name.substr(0,pos);
	file_name_latter=file_name.substr(pos+1,file_name.length());
	int offset=atoi(file_name_latter.c_str());
	hdfsFS fs=hdfsConnect(Config::hdfs_master_ip.c_str(),Config::hdfs_master_port);
	hdfsFile readFile=hdfsOpenFile(fs,file_name_former.c_str(),O_RDONLY,0,0,0);
	hdfsFileInfo *hdfsfile=hdfsGetPathInfo(fs,file_name_former.c_str());
	if(!readFile){
		cout<<"open file error"<<endl;
	}
	unsigned length=0;
	length=length+CHUNK_SIZE*offset;
	if(length<hdfsfile->mSize){
		void *rt=malloc(CHUNK_SIZE);		//newmalloc
		tSize bytes_num=hdfsPread(fs,readFile,length,rt,CHUNK_SIZE);
		ostringstream chunkid;
		chunkid<<file_name.c_str()<<"$"<<offset;
//		ci.chunkId=chunkid.gestr().c_str();
		ci.hook=rt;
	}else{
		ostringstream chunkid;
		chunkid<<file_name.c_str()<<"$"<<offset;
//		ci.chunkId=chunkid.str().c_str();
		ci.hook=0;
	}
	hdfsCloseFile(fs,readFile);
	hdfsDisconnect(fs);
	return ci;
}
コード例 #7
0
ファイル: HDFSIOStore.cpp プロジェクト: agtorre/plfs-core
/**
 * HFDS_Probe: safely probe an HDFS to see if we can connect to it.
 *
 * XXX:
 * we probe HDFS in a child process to avoid JVM issues (FUSE forks a
 * daemon after this call, and the daemon's JVM calls all hang because
 * the JVM was inited in the parent process).  the fork avoids this
 * issue.
 *
 * @return PLFS_SUCCESS or PLFS_E*
 */
plfs_error_t HDFSIOStore::HDFS_Probe() {
    pid_t child;
    hdfsFS tmpfs;
    int status;

    child = fork();
    if (child == 0) {
        /* note: don't wrap this connect/disconnect call */
        tmpfs = hdfsConnect(this->hdfs_host, this->hdfs_port);
        if (tmpfs != NULL) {
            hdfsDisconnect(tmpfs);
        }
        exit((tmpfs == NULL) ? 1 : 0);
    }
    status = -1;
    if (child != -1) 
        (void)waitpid(child, &status, 0);
    if (status != 0) {
        mlog(STO_ERR, "HDFS_Probe(%s,%d): connect failed.",
             this->hdfs_host, this->hdfs_port);
        return PLFS_EIO;
    }

    return PLFS_SUCCESS;
}
コード例 #8
0
ファイル: fs_mapr.cpp プロジェクト: Minione/iwct
bool MaprFileSystem::ListDirectory(const std::string& uri, std::vector<std::string>* contents){
  CHECK(contents);
  contents->clear();
  std::string path = GetUriPathOrDie(uri);
  std::string host = "default";
  hdfsFS fs = hdfsConnect(host.c_str(), 0); // use default config file settings
  int num_entries;
  hdfsFileInfo* entries = hdfsListDirectory(fs, path.c_str(), &num_entries);
  hdfsFileInfo* cur_entry = entries;
  for (int i=0; i < num_entries; ++i) {
    // Sometimes the list directory command returns paths with the scheme and sometimes it doesn't
    // Strange.
    // Anyway, we need to consistently output uris with a proper scheme prefix.
    std::string cur_scheme, cur_path, error;
    if (ParseUri(cur_entry->mName, &cur_scheme, &cur_path, &error)){
      CHECK_EQ(cur_scheme, "maprfs"); // if it has a scheme prefix, make sure it is maprfs as expected
    }
    else{
      // this doesn't have a uri scheme prefix, so assume it is just the path portion
      cur_path = cur_entry->mName;
    }

    contents->push_back(Uri("maprfs", cur_path));

    cur_entry++;
  }
  hdfsFreeFileInfo(entries, num_entries);
  CHECK_EQ(hdfsDisconnect(fs), 0);
  return true;
}
コード例 #9
0
ファイル: file_system.c プロジェクト: ssalevan/ruby-hdfs
/**
 * call-seq:
 *    hdfs.disconnect -> nil
 *
 * Disconnects the client connection.
 */
VALUE HDFS_File_System_disconnect(VALUE self) {
  FSData* data = NULL;
  Data_Get_Struct(self, FSData, data);
  if (data->fs != NULL) {
    hdfsDisconnect(data->fs);
    data->fs = NULL;
  }
  return Qnil;
}
コード例 #10
0
ファイル: fs_mapr.cpp プロジェクト: Minione/iwct
bool MaprFileSystem::MakeDirectory(const std::string& uri){
  std::string path = GetUriPathOrDie(uri);
  std::string host = "default";
  hdfsFS fs = hdfsConnect(host.c_str(), 0); // use default config file settings
  CHECK(fs);
  bool success = (hdfsCreateDirectory(fs, path.c_str()) == 0);
  CHECK_EQ(hdfsDisconnect(fs), 0);
  return success;
}
コード例 #11
0
ファイル: fs_mapr.cpp プロジェクト: Minione/iwct
bool MaprFileSystem::Exists(const std::string& uri){
  std::string path = GetUriPathOrDie(uri);
  std::string host = "default";
  hdfsFS fs = hdfsConnect(host.c_str(), 0); // use default config file settings
  CHECK(fs) << "Can't connect to filesystem for this uri: " << uri;
  bool exists = (hdfsExists(fs, path.c_str()) == 0);
  CHECK_EQ(hdfsDisconnect(fs), 0);
  return exists;
}
コード例 #12
0
ファイル: vecsum.c プロジェクト: simonzhangsm/CDH5.1.x
static void libhdfs_data_free(struct libhdfs_data *ldata)
{
    if (ldata->fs) {
        free(ldata->buf);
        if (ldata->file) {
            hdfsCloseFile(ldata->fs, ldata->file);
        }
        hdfsDisconnect(ldata->fs);
    }
    free(ldata);
}
コード例 #13
0
int main(int argc, char **argv) {

    const char* rfile;
    tSize fileTotalSize, bufferSize, curSize, totalReadSize;
    hdfsFS fs;
    hdfsFile readFile;
    char *buffer = NULL;
    
    if (argc != 4) {
        fprintf(stderr, "Usage: test_libwebhdfs_read"
                " <filename> <filesize> <buffersize>\n");
        exit(1);
    }
    
    fs = hdfsConnect("localhost", 50070);
    if (!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(1);
    }
    
    rfile = argv[1];
    fileTotalSize = strtoul(argv[2], NULL, 10);
    bufferSize = strtoul(argv[3], NULL, 10);
    
    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
    if (!readFile) {
        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
        exit(1);
    }
    
    // data to be written to the file
    buffer = malloc(sizeof(char) * bufferSize);
    if(buffer == NULL) {
        fprintf(stderr, "Failed to allocate buffer.\n");
        exit(1);
    }
    
    // read from the file
    curSize = bufferSize;
    totalReadSize = 0;
    for (; (curSize = hdfsRead(fs, readFile, buffer, bufferSize)) == bufferSize; ) {
        totalReadSize += curSize;
    }
    totalReadSize += curSize;
    
    fprintf(stderr, "size of the file: %d; reading size: %d\n",
            fileTotalSize, totalReadSize);
    
    free(buffer);
    hdfsCloseFile(fs, readFile);
    hdfsDisconnect(fs);
    
    return 0;
}
コード例 #14
0
ファイル: test_libhdfs_get.c プロジェクト: jaypatel384/random
int main(int argc, char **argv) {

    if (argc != 3) {
        fprintf(stderr, "Usage: hdfs_read <filename> <buffersize>\n");
        exit(-1);
    }
    
    hdfsFS fs = hdfsConnect("default", 0);
    if (!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-2);
    } 
 
    const char* rfile = argv[1];
    tSize bufferSize = strtoul(argv[2], NULL, 10);

   
    hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
    if (!readFile) {
        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
        exit(-3);
    }

	FILE *outf = fopen(rfile, "wb");
	if (outf == NULL) {
		printf("FILEIO error %d\n", errno);
		exit(-4);
	}

    // data to be written to the file
    char* buffer = malloc(sizeof(char) * bufferSize);
    if(buffer == NULL) {
        return -5;
    }
    
	//printf("buffersize is %d\n", bufferSize);
    // read from the file
    tSize curSize = bufferSize;
    for (; curSize == bufferSize;) {
		//printf("cursize before is %d\n", curSize);
        curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
		//printf("cursize is %d, errno is %d\n", curSize, errno);
		fwrite((void *)buffer, sizeof(char), curSize, outf);
		//printf("%.*s", bufferSize, buffer);
    }

	fclose(outf);
    free(buffer);
    hdfsCloseFile(fs, readFile);
    hdfsDisconnect(fs);

    return 0;
}
コード例 #15
0
ファイル: fs_mapr.cpp プロジェクト: Minione/iwct
MaprOutputCodedBlockFile::~MaprOutputCodedBlockFile() {
  //LOG(INFO) << "MaprOutputCodedBlockFile::~MaprOutputCodedBlockFile()";
  // force destructors to be called that cause a write to happen before
  // releasing resources needed for a write
  output_stream_.reset(NULL);
  copying_output_stream_.reset(NULL);
  CHECK_EQ(hdfsFlush(fs_, file_), 0);
  //LOG(INFO) << "closing file: " << file_;
  CHECK_EQ(hdfsCloseFile(fs_, file_), 0);
  //LOG(INFO) << "disconnecting fs: " << fs_;
  CHECK_EQ(hdfsDisconnect(fs_), 0);
}
コード例 #16
0
static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
{
    hdfsFS fs = NULL;
    struct tlhPaths paths;

    fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
        ti->threadIdx);
    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
    EXPECT_ZERO(setupPaths(ti, &paths));
    // test some operations
    EXPECT_ZERO(doTestHdfsOperations(ti, fs, &paths));
    EXPECT_ZERO(hdfsDisconnect(fs));
    // reconnect as user "foo" and verify that we get permission errors
    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, "foo"));
    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, paths.file1, "ha3", NULL), EACCES);
    EXPECT_ZERO(hdfsDisconnect(fs));
    // reconnect to do the final delete.
    EXPECT_ZERO(hdfsSingleNameNodeConnect(tlhCluster, &fs, NULL));
    EXPECT_ZERO(hdfsDelete(fs, paths.prefix, 1));
    EXPECT_ZERO(hdfsDisconnect(fs));
    return 0;
}
コード例 #17
0
ファイル: fs.c プロジェクト: HsuJv/Note
int dfsRemove(const char* path){
    hdfsFS fs = hdfsConnect("default", 0);

    if (hdfsDelete(fs, path, 1) < 0){
        perror("Delete error");
        exit(-1);
    }

    /* Remove ends */
    hdfsDisconnect(fs);

    return 0;
}
コード例 #18
0
ファイル: test_libhdfs_read.c プロジェクト: CoREse/snap
int main(int argc, char **argv) {

    hdfsFS fs;
    char* rfile;
    int bufferSize;
    hdfsFile readFile;
    char* buffer;
    int curSize;
    
    if (argc != 4) {
        fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
        exit(-1);
    }
    
    fs = hdfsConnect("default", 0);
    if (!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-1);
    } 
 
    rfile = argv[1];
    bufferSize = strtoul(argv[3], NULL, 10);
   
    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
    if (!readFile) {
        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
        exit(-2);
    }

    /* data to be written to the file */
    buffer = malloc(sizeof(char) * bufferSize);
    if(buffer == NULL) {
        return -2;
    }
    
    /* read from the file */
    curSize = bufferSize;
    for (; curSize == bufferSize;) {
        curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
    }
    
    free(buffer);
    hdfsCloseFile(fs, readFile);
    hdfsDisconnect(fs);

    return 0;
}
コード例 #19
0
ファイル: ClaimsHDFS.cpp プロジェクト: doliu/datastructure
void ClaimsHDFS::claimsRead(){
	hdfsFS fs;
	hdfsFile fd;
	string filename="/home/casa/data/kmeans_data.txt";
	fs=hdfsConnect("10.11.1.174",9000);
	fd=hdfsOpenFile(fs,filename.c_str(),O_RDONLY,0,0,0);
	if(!fd){
		cout<<"failed to open hdfs file!!!"<<endl;
	}

	char array[72];
    tSize bytes=hdfsRead(fs,fd,array,72);
	cout<<"string is: "<<array<<endl;

	hdfsCloseFile(fs,fd);
	hdfsDisconnect(fs);
}
コード例 #20
0
ファイル: fs_mapr.cpp プロジェクト: Minione/iwct
bool MaprFileSystem::IsDirectory(const std::string& uri){
  std::string path = GetUriPathOrDie(uri);
  std::string host = "default";
  hdfsFS fs = hdfsConnect(host.c_str(), 0); // use default config file settings
  CHECK(fs);
  hdfsFileInfo* info = hdfsGetPathInfo(fs, path.c_str());
  bool is_directory = false;
  if (info){
    is_directory = (info->mKind == kObjectKindDirectory);
    hdfsFreeFileInfo(info,1);
  }
  else{
    LOG(FATAL) << "uri does not exist: " << uri;
  }
  CHECK_EQ(hdfsDisconnect(fs), 0);
  return is_directory;
}
コード例 #21
0
ファイル: qio_plugin_hdfs.c プロジェクト: CoryMcCartan/chapel
static
qioerr hdfs_disconnect_and_free(void* fs)
{
  qioerr err = 0;
  int ret= 0;

  STARTING_SLOW_SYSCALL;
  errno = 0;
  ret = hdfsDisconnect(((hdfs_fs*)fs)->hfs);

  if ((ret == -2) || (ret == -1))  {
    err = qio_mkerror_errno();
  } else errno = 0;
  DONE_SLOW_SYSCALL;

  qio_free(fs);
  return err;
}
コード例 #22
0
ファイル: fs.c プロジェクト: HsuJv/Note
int dfsList(const char* path){
    hdfsFS fs = hdfsConnect("default", 0);
    int i, entries;
    hdfsFileInfo *files, *head;

    /* Get the list info */
    files = hdfsListDirectory(fs, path, &entries);
    if (!files){
        perror("Get directory info");
        exit(-1);
    }
    head = files;

    /* Print the info */
    fprintf(stdout, "%s %-50s %-9s %s\n",
            "Kind", "Name", "Size", "Replicas");

    for (i = 0; i < entries; i++){
        const char* unit[] = {" B", "KB", "MB", "GB", "TB", "PB"};
        double size = files->mSize;
        unsigned int u = 0;

        while (size > 1024){
            u++;
            size /= 1024;
        }

        assert(u < 6);

        fprintf(stdout, "%4c %-50s %-7.2lf%s %8d\n", 
                files->mKind, files->mName,
                size, unit[u],
                files->mReplication);

        files += 1;
    }

    /* List ends */
    hdfsFreeFileInfo(head, entries);
    hdfsDisconnect(fs);
    
    return 0;
}
コード例 #23
0
/**
 * Test that we can write a file with libhdfs and then read it back
 */
int main(void)
{
    int port;
    struct NativeMiniDfsConf conf = {
        1, /* doFormat */
        0, /* webhdfsEnabled */
        0, /* namenodeHttpPort */
        1, /* configureShortCircuit */
    };
    char testFileName[TEST_FILE_NAME_LENGTH];
    hdfsFS fs;
    struct NativeMiniDfsCluster* cl;
    struct hdfsBuilder *bld;

    cl = nmdCreate(&conf);
    EXPECT_NONNULL(cl);
    EXPECT_ZERO(nmdWaitClusterUp(cl));
    port = nmdGetNameNodePort(cl);
    if (port < 0) {
        fprintf(stderr, "TEST_ERROR: test_zerocopy: "
                "nmdGetNameNodePort returned error %d\n", port);
        return EXIT_FAILURE;
    }
    bld = hdfsNewBuilder();
    EXPECT_NONNULL(bld);
    EXPECT_ZERO(nmdConfigureHdfsBuilder(cl, bld));
    hdfsBuilderSetForceNewInstance(bld);
    hdfsBuilderConfSetStr(bld, "dfs.block.size",
                          TO_STR(TEST_ZEROCOPY_FULL_BLOCK_SIZE));
    /* ensure that we'll always get our mmaps */
    hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit.skip.checksum",
                          "true");
    fs = hdfsBuilderConnect(bld);
    EXPECT_NONNULL(fs);
    EXPECT_ZERO(createZeroCopyTestFile(fs, testFileName,
          TEST_FILE_NAME_LENGTH));
    EXPECT_ZERO(doTestZeroCopyReads(fs, testFileName));
    EXPECT_ZERO(hdfsDisconnect(fs));
    EXPECT_ZERO(nmdShutdown(cl));
    nmdFree(cl);
    fprintf(stderr, "TEST_SUCCESS\n"); 
    return EXIT_SUCCESS;
}
コード例 #24
0
int main(int argc, char* argv[]) {
  if (argc < 4) {
    printf("usage: hdfs_get <name node address> <name node port> <input file>\n");
    return 1;
  }
  // Sleep for 100ms.
  usleep(100 * 1000);
  struct hdfsBuilder* hdfs_builder = hdfsNewBuilder();
  if (!hdfs_builder) {
    printf("Could not create HDFS builder");
    return 1;
  }
  hdfsBuilderSetNameNode(hdfs_builder, argv[1]);
  int port = atoi(argv[2]);
  hdfsBuilderSetNameNodePort(hdfs_builder, port);
  hdfsBuilderConfSetStr(hdfs_builder, "dfs.client.read.shortcircuit", "false");
  hdfsFS fs = hdfsBuilderConnect(hdfs_builder);
  hdfsFreeBuilder(hdfs_builder);
  if (!fs) {
    printf("Could not connect to HDFS");
    return 1;
  }

  hdfsFile file_in = hdfsOpenFile(fs, argv[3], O_RDONLY, 0, 0, 0);
  char buffer[1048576];
  int done = 0;
  do {
    done = hdfsRead(fs, file_in, &buffer, 1048576);
  } while (done > 0);
  if (done < 0) {
    printf("Failed to read file: %s", hdfsGetLastError());
    return 1;
  }

  hdfsCloseFile(fs, file_in);
  hdfsDisconnect(fs);
  return 0;
}
コード例 #25
0
ファイル: fs_mapr.cpp プロジェクト: Minione/iwct
// Caller takes ownership of returned object and must delete it when done
google::protobuf::io::CodedInputStream*
MaprInputCodedBlockFile::CreateCodedStream(uint64 position, uint64 length) {
  CHECK(is_open_);

  // Seek to requested position (relative to start of file).
  CHECK_LT(position, size_);
  CHECK_LE(position+length, size_);

  // Starting with MapR V2.1.1, sometimes seek fails and the hdfs connection
  // must be reset in order to try again.  Not sure why this transient error
  // happens with MapR V2.1.1 but not earlier versions.
  bool success = false;
  for (int i=0; i < 10; ++i){
   if (hdfsSeek(fs_, file_, position) == 0){
     success = true;
     break;
   }
    //LOG(INFO) << "seek attempt failed: " << i;
    //LOG(INFO) << "path:" << path_ << "\n position: " << position << "\n length: " << length << "\n size: " << size_;  // success if returns 0
    CHECK_EQ(hdfsCloseFile(fs_, file_), 0);
    CHECK_EQ(hdfsDisconnect(fs_), 0);
    std::string host = "default";
    fs_ = hdfsConnect(host.c_str(), 0); // use default config file settings
    CHECK(fs_) << "error connecting to maprfs";
    file_ = hdfsOpenFile(fs_, path_.c_str(), O_RDONLY, 0, 0, 0);
    CHECK(file_ != NULL);
    sleep(2*i);
  }
  CHECK(success);

  // Create a coded stream (hold it in a scoped ptr to manage deleting).
  limiting_stream_.reset(NULL); // the destructor references the copying_stream_, so must destroy it before destroying it
  copying_stream_.reset(new google::protobuf::io::CopyingInputStreamAdaptor(copying_input_stream_.get()));
  limiting_stream_.reset(new google::protobuf::io::LimitingInputStream(copying_stream_.get(), length));
  return new google::protobuf::io::CodedInputStream(limiting_stream_.get());
}
コード例 #26
0
static void *testHdfsOperations(void *v)
{
    struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
    hdfsFS fs = NULL;
    int ret;
    
    fprintf(stderr, "testHdfsOperations(threadIdx=%d): starting\n",
            ti->threadIdx);
    ret = hdfsSingleNameNodeConnect(cluster, &fs);
    if (ret) {
        fprintf(stderr, "testHdfsOperations(threadIdx=%d): "
                "hdfsSingleNameNodeConnect failed with error %d.\n",
                ti->threadIdx, ret);
        ti->success = EIO;
        return NULL;
    }
    ti->success = doTestHdfsOperations(ti, fs);
    if (hdfsDisconnect(fs)) {
        ret = errno;
        fprintf(stderr, "hdfsDisconnect error %d\n", ret);
        ti->success = ret;
    }
    return NULL;
}
コード例 #27
0
ファイル: hdfs_test.c プロジェクト: LefKok/upright
int main(int argc, char **argv) {

    hdfsFS fs = hdfsConnect("default", 0);
    if(!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-1);
    } 
 
    hdfsFS lfs = hdfsConnect(NULL, 0);
    if(!lfs) {
        fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
        exit(-1);
    } 
 
        const char* writePath = "/tmp/testfile.txt";
    {
        //Write tests
        
        
        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
        if(!writeFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
            exit(-1);
        }
        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);

        char* buffer = "Hello, World!";
        tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);

        tOffset currentPos = -1;
        if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
            fprintf(stderr, 
                    "Failed to get current file position correctly! Got %ld!\n",
                    currentPos);
            exit(-1);
        }
        fprintf(stderr, "Current position: %ld\n", currentPos);

        if (hdfsFlush(fs, writeFile)) {
            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
            exit(-1);
        }
        fprintf(stderr, "Flushed %s successfully!\n", writePath); 

        hdfsCloseFile(fs, writeFile);
    }

    {
        //Read tests
        
        const char* readPath = "/tmp/testfile.txt";
        int exists = hdfsExists(fs, readPath);

        if (exists) {
          fprintf(stderr, "Failed to validate existence of %s\n", readPath);
          exit(-1);
        }

        hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
        if (!readFile) {
            fprintf(stderr, "Failed to open %s for reading!\n", readPath);
            exit(-1);
        }

        fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));

        tOffset seekPos = 1;
        if(hdfsSeek(fs, readFile, seekPos)) {
            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
            exit(-1);
        }

        tOffset currentPos = -1;
        if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
            fprintf(stderr, 
                    "Failed to get current file position correctly! Got %ld!\n", 
                    currentPos);
            exit(-1);
        }
        fprintf(stderr, "Current position: %ld\n", currentPos);

        static char buffer[32];
        tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, 
                sizeof(buffer));
        fprintf(stderr, "Read following %d bytes:\n%s\n", 
                num_read_bytes, buffer);

        num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer, 
                sizeof(buffer));
        fprintf(stderr, "Read following %d bytes:\n%s\n", 
                num_read_bytes, buffer);

        hdfsCloseFile(fs, readFile);
    }

    int totalResult = 0;
    int result = 0;
    {
        //Generic file-system operations

        const char* srcPath = "/tmp/testfile.txt";
        const char* dstPath = "/tmp/testfile2.txt";

        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
        totalResult += result;

        const char* slashTmp = "/tmp";
        const char* newDirectory = "/tmp/newdir";
        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) ? "Failed!" : "Success!"));
        totalResult += result;

        char buffer[256];
        const char *resp;
        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
        totalResult += (resp ? 0 : 1);
        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
        totalResult += (resp ? 0 : 1);

        fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs));
        fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
        fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));

        hdfsFileInfo *fileInfo = NULL;
        if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
            fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
            fprintf(stderr, "Name: %s, ", fileInfo->mName);
            fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
            fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
            fprintf(stderr, "BlockSize: %ld, ", fileInfo->mBlockSize);
            fprintf(stderr, "Size: %ld, ", fileInfo->mSize);
            fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
            fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
            fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
            char permissions[10];
            permission_disp(fileInfo->mPermissions, permissions);
            fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
            hdfsFreeFileInfo(fileInfo, 1);
        } else {
            totalResult++;
            fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
        }

        hdfsFileInfo *fileList = 0;
        int numEntries = 0;
        if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
            int i = 0;
            for(i=0; i < numEntries; ++i) {
                fprintf(stderr, "Name: %s, ", fileList[i].mName);
                fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
                fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
                fprintf(stderr, "BlockSize: %ld, ", fileList[i].mBlockSize);
                fprintf(stderr, "Size: %ld, ", fileList[i].mSize);
                fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
                fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
                fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
                char permissions[10];
                permission_disp(fileList[i].mPermissions, permissions);
                fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
            }
            hdfsFreeFileInfo(fileList, numEntries);
        } else {
            if (errno) {
                totalResult++;
                fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
            } else {
                fprintf(stderr, "Empty directory!\n");
            }
        }

        char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1);
        if(hosts) {
            fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
            int i=0; 
            while(hosts[i]) {
                int j = 0;
                while(hosts[i][j]) {
                    fprintf(stderr, 
                            "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
                    ++j;
                }
                ++i;
            }
        } else {
            totalResult++;
            fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
        }
       
        char *newOwner = "root";
        // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
        short newPerm = 0666;

        // chown write
        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!"));
        totalResult += result;
        // chmod write
        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!"));
        totalResult += result;



        sleep(2);
        tTime newMtime = time(NULL);
        tTime newAtime = time(NULL);

        // utime write
        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!"));

        totalResult += result;

        // chown/chmod/utime read
        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);

        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
        totalResult += result;

        // will later use /tmp/ as a different user so enable it
        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr,"newMTime=%ld\n",newMtime);
        fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);


        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) ? "Failed!" : "Success!"));
        totalResult += result;

        // No easy way to turn on access times from hdfs_test right now
        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) ? "Failed!" : "Success!"));
        //        totalResult += result;

        hdfsFreeFileInfo(finfo, 1);

        // Clean up
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
        totalResult += (result ? 0 : 1);
    }


    totalResult += (hdfsDisconnect(fs) != 0);

    {
      //
      // Now test as connecting as a specific user
      // This is only meant to test that we connected as that user, not to test
      // the actual fs user capabilities. Thus just create a file and read
      // the owner is correct.

      const char *tuser = "******";
      const char* writePath = "/tmp/usertestfile.txt";
      const char **groups =  (const char**)malloc(sizeof(char*)* 2);
      groups[0] = "users";
      groups[1] = "nobody";

      fs = hdfsConnectAsUser("default", 0, tuser, groups, 2);
      if(!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
        exit(-1);
      } 

        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
        if(!writeFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
            exit(-1);
        }
        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);

        char* buffer = "Hello, World!";
        tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);

        if (hdfsFlush(fs, writeFile)) {
            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
            exit(-1);
        }
        fprintf(stderr, "Flushed %s successfully!\n", writePath); 

        hdfsCloseFile(fs, writeFile);

        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
        totalResult += result;
    }
    
    totalResult += (hdfsDisconnect(fs) != 0);

    if (totalResult != 0) {
        return -1;
    } else {
        return 0;
    }
}
コード例 #28
0
ファイル: libhdfsconnector.hpp プロジェクト: JamesDeFabia/h2h
 ~libhdfsconnector()
 {
     if (fs)
         fprintf(stderr, "\nhdfsDisconnect returned: %d\n", hdfsDisconnect(fs));
 };
コード例 #29
0
ファイル: file_system.c プロジェクト: ssalevan/ruby-hdfs
void free_fs_data(FSData* data) {
  if (data && data->fs != NULL) {
    hdfsDisconnect(data->fs);
    data->fs = NULL;
  }
}
コード例 #30
0
void HHDFSTableStats::disconnectHDFS()
{
  if (fs_)
    hdfsDisconnect(fs_);
  fs_ = NULL;
}