예제 #1
0
int HdfsFileSystem::listDirectory(const char* pathname, std::list<std::string>& contents) const
{
	// clear the return list
	contents.erase( contents.begin(), contents.end() );

	int numEntries;
	hdfsFileInfo* fileinfo;
	if( !exists( pathname ) ) {
		errno = ENOENT;
		return -1;
	}

	// hdfs not happy if you call list directory on a path that does not exist
	fileinfo = hdfsListDirectory(m_fs,pathname, &numEntries);
	for( int i = 0; i < numEntries && fileinfo; ++i )
	{
		// hdfs returns a fully specified path name but we want to
		// only return paths relative to the directory passed in.
	    boost::filesystem::path filepath( fileinfo[i].mName );
		contents.push_back( filepath.filename().c_str() );
	}
	if( fileinfo )
		hdfsFreeFileInfo(fileinfo, numEntries);

	return 0;
}
예제 #2
0
파일: fs_mapr.cpp 프로젝트: Minione/iwct
bool MaprFileSystem::ListDirectory(const std::string& uri, std::vector<std::string>* contents){
  CHECK(contents);
  contents->clear();
  std::string path = GetUriPathOrDie(uri);
  std::string host = "default";
  hdfsFS fs = hdfsConnect(host.c_str(), 0); // use default config file settings
  int num_entries;
  hdfsFileInfo* entries = hdfsListDirectory(fs, path.c_str(), &num_entries);
  hdfsFileInfo* cur_entry = entries;
  for (int i=0; i < num_entries; ++i) {
    // Sometimes the list directory command returns paths with the scheme and sometimes it doesn't
    // Strange.
    // Anyway, we need to consistently output uris with a proper scheme prefix.
    std::string cur_scheme, cur_path, error;
    if (ParseUri(cur_entry->mName, &cur_scheme, &cur_path, &error)){
      CHECK_EQ(cur_scheme, "maprfs"); // if it has a scheme prefix, make sure it is maprfs as expected
    }
    else{
      // this doesn't have a uri scheme prefix, so assume it is just the path portion
      cur_path = cur_entry->mName;
    }

    contents->push_back(Uri("maprfs", cur_path));

    cur_entry++;
  }
  hdfsFreeFileInfo(entries, num_entries);
  CHECK_EQ(hdfsDisconnect(fs), 0);
  return true;
}
예제 #3
0
bs_file_info_t *hdfs_list_dir(struct back_storage *storage, \
		const char *dir_path, uint32_t *num_entries){
	HLOG_DEBUG("hdfs -- enter func %s", __func__);
	char full_path[256];
	build_hdfs_path(full_path, storage->dir, storage->fs_name, dir_path);
	int num;
	hdfsFileInfo *hinfos = \
			       hdfsListDirectory((hdfsFS)storage->fs_handler, full_path, &num);
	if (NULL == hinfos) {
		//HLOG_ERROR("hdfsListDirectory error");
		return NULL; 
	}
	hdfsFileInfo *hinfo = hinfos;
	bs_file_info_t *infos = \
				(bs_file_info_t*)g_malloc0(sizeof(bs_file_info_t)*8192);
	if (NULL == infos) {
		//HLOG_ERROR("Allocate Error!");
		return NULL;
	}
	bs_file_info_t *info = infos;
	int i;
	for (i = 0;i < num;i++) {
		strcpy((char *)info->name, \
				(const char *)g_path_get_basename(hinfo->mName));
		info->is_dir = 0;
		info->size = hinfo->mSize;
		info->lmtime = hinfo->mLastMod;
		info++;
		hinfo++;
	}
    hdfsFreeFileInfo(hinfos, num);
	*num_entries = num;
	HLOG_DEBUG("hdfs -- leave func %s", __func__);
	return infos;
}
예제 #4
0
block_id_counter FileManagerHdfs::getMaxUsedBlockCounter(const block_id_domain block_domain) const {
  int num_files = 0;
  hdfsFileInfo *file_infos = hdfsListDirectory(hdfs_, storage_path_.c_str(), &num_files);
  if (file_infos == nullptr) {
    if (errno != ENOENT) {
      LOG_WARNING("Failed to list file info with error: " << strerror(errno));
    }
    return 0;
  }

  string filename_pattern("/qsblk_");
  filename_pattern.append(ToZeroPaddedString(block_domain, kBlockIdDomainLengthInDigits));
  filename_pattern.append("_%");
  filename_pattern.append(SCNu64);
  filename_pattern.append(".qsb");

  block_id_counter counter_max = 0, counter;
  for (int i = 0; i < num_files; ++i) {
    // NOTE(zuyu): mName looks like
    // "/user/<username>/<storage_path_>/qsblk_<block_domain>_[0-9]*.qsb".
    const char *filename = std::strrchr(file_infos[i].mName, '/');
    if (filename != nullptr
        && sscanf(filename, filename_pattern.c_str(), &counter) == 1
        && counter > counter_max) {
      counter_max = counter;
    }
  }

  hdfsFreeFileInfo(file_infos, num_files);

  return counter_max;
}
예제 #5
0
void Hdfs3Glob(const std::string& _path, const GlobType& gtype,
               FileList& filelist) {

    std::string path = _path;
    // crop off hdfs://
    die_unless(common::StartsWith(path, "hdfs://"));
    path = path.substr(7);

    // split uri into host/path
    std::vector<std::string> splitted = common::Split(path, '/', 2);

    hdfsFS fs = Hdfs3FindConnection(splitted[0]);
    std::string hosturi = "hdfs://" + splitted[0];

    // prepend root /
    splitted[1] = "/" + splitted[1];

    // list directory
    int num_entries = 0;
    hdfsFileInfo* list = hdfsListDirectory(
        fs, splitted[1].c_str(), &num_entries);

    if (!list) return;

    for (int i = 0; i < num_entries; ++i) {
        FileInfo fi;

        fi.path = list[i].mName;
        // remove leading slashes
        while (fi.path.size() >= 2 && fi.path[0] == '/' && fi.path[1] == '/')
            fi.path.erase(fi.path.begin(), fi.path.begin() + 1);
        // prepend host uri
        fi.path = hosturi + fi.path;

        if (list[i].mKind == kObjectKindFile) {
            if (gtype == GlobType::All || gtype == GlobType::File) {
                // strangely full file name globs return the file with a / at
                // the end.
                while (fi.path.back() == '/')
                    fi.path.resize(fi.path.size() - 1);
                fi.type = Type::File;
                fi.size = list[i].mSize;
                filelist.emplace_back(fi);
            }
        }
        else if (list[i].mKind == kObjectKindDirectory) {
            if (gtype == GlobType::All || gtype == GlobType::Directory) {
                fi.type = Type::Directory;
                fi.size = list[i].mSize;
                filelist.emplace_back(fi);
            }
        }
    }

    hdfsFreeFileInfo(list, num_entries);
}
예제 #6
0
  void dir_cpi_impl::sync_list (std::vector <saga::url> & list, 
                                std::string               pattern, 
                                int                       flags)
  {
     instance_data idata(this);
     int size = 0;
     hdfsFileInfo *results;

     results = hdfsListDirectory(fs_, idata->location_.get_path().c_str(), &size);
     if(hdfsListDirectory(fs_, idata->location_.get_path().c_str(), &size) == NULL)
     {
        SAGA_ADAPTOR_THROW ("List error", saga::NoSuccess);
     }
     for(int counter = 0; counter < size; counter++)
     {
        std::string string(results[counter].mName);
        list.push_back(saga::url(string));
     }
  }
예제 #7
0
NABoolean HHDFSListPartitionStats::populate(hdfsFS fs,
                                            const NAString &dir,
                                            Int32 numOfBuckets, 
                                            NABoolean doEstimation,
                                            char recordTerminator,
                                            NABoolean isSequenceFile)
{
  NABoolean result = TRUE;
  int numFiles = 0;

  // remember parameters
  partitionDir_     = dir;
  defaultBucketIdx_ = (numOfBuckets >= 1) ? numOfBuckets : 0;
  doEstimation_     = doEstimation;
  recordTerminator_ = recordTerminator;
  isSequenceFile_   = isSequenceFile;

  // list all the files in this directory, they all belong
  // to this partition and either belong to a specific bucket
  // or to the default bucket
  hdfsFileInfo *fileInfos = hdfsListDirectory(fs,
                                              dir.data(),
                                              &numFiles);

  // populate partition stats
  for (int f=0; f<numFiles && result; f++)
    if (fileInfos[f].mKind == kObjectKindFile)
      {
        // the default (unbucketed) bucket number is
        // defaultBucketIdx_
        Int32 bucketNum = determineBucketNum(fileInfos[f].mName);
        HHDFSBucketStats *bucketStats = NULL;

        if (! bucketStatsList_.used(bucketNum))
          {
            bucketStats = new(heap_) HHDFSBucketStats(heap_);
            bucketStatsList_.insertAt(bucketNum, bucketStats);
          }
        else
          bucketStats = bucketStatsList_[bucketNum];

        if (! bucketStats->addFile(fs, &fileInfos[f], doEstimation, recordTerminator, isSequenceFile))
          result = FALSE;
      }

  hdfsFreeFileInfo(fileInfos, numFiles);

  // aggregate statistics over all buckets
  for (Int32 b=0; b<=defaultBucketIdx_; b++)
    if (bucketStatsList_.used(b))
      add(bucketStatsList_[b]);

  return result;
}
예제 #8
0
 inline std::vector<std::string> list_files(const std::string& path) {
   int num_files = 0;
   hdfsFileInfo* hdfs_file_list_ptr = 
     hdfsListDirectory(filesystem, path.c_str(), &num_files);
   // copy the file list to the string array
   std::vector<std::string> files(num_files);
   for(int i = 0; i < num_files; ++i) 
     files[i] = std::string(hdfs_file_list_ptr[i].mName);
   // free the file list pointer
   hdfsFreeFileInfo(hdfs_file_list_ptr, num_files);
   return files;
 } // end of list_files
예제 #9
0
int dfs_rmdir(const char *path)
{
  struct hdfsConn *conn = NULL;
  hdfsFS fs;
  int ret;
  dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
  int numEntries = 0;
  hdfsFileInfo *info = NULL;

  TRACE1("rmdir", path)

  assert(path);
  assert(dfs);
  assert('/' == *path);

  if (is_protected(path)) {
    ERROR("Trying to delete protected directory %s", path);
    ret = -EACCES;
    goto cleanup;
  }

  ret = fuseConnectAsThreadUid(&conn);
  if (ret) {
    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
            "connection!  error %d.\n", ret);
    ret = -EIO;
    goto cleanup;
  }
  fs = hdfsConnGetFs(conn);
  info = hdfsListDirectory(fs, path, &numEntries);
  if (numEntries) {
    ret = -ENOTEMPTY;
    goto cleanup;
  }

  if (hdfsDeleteWithTrash(fs, path, dfs->usetrash)) {
    ERROR("Error trying to delete directory %s", path);
    ret = -EIO;
    goto cleanup;
  }
  ret = 0;

cleanup:
  if (info) {
    hdfsFreeFileInfo(info, numEntries);
  }
  if (conn) {
    hdfsConnRelease(conn);
  }
  return ret;
}
예제 #10
0
int dfs_getattr(const char *path, struct stat *st)
{
  TRACE1("getattr", path)

  // retrieve dfs specific data
  dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;

  // check params and the context var
  assert(dfs);
  assert(path);
  assert(st);

  // if not connected, try to connect and fail out if we can't.
  if (NULL == dfs->fs && NULL == (dfs->fs = hdfsConnect(dfs->nn_hostname,dfs->nn_port))) {
    syslog(LOG_ERR, "ERROR: could not connect to %s:%d %s:%d\n", dfs->nn_hostname, dfs->nn_port,__FILE__, __LINE__);
    return -EIO;
  }

  // call the dfs API to get the actual information
  hdfsFileInfo *info = hdfsGetPathInfo(dfs->fs,path);

  if (NULL == info) {
    return -ENOENT;
  }

  fill_stat_structure(&info[0], st);

  // setup hard link info - for a file it is 1 else num entries in a dir + 2 (for . and ..)
  if (info[0].mKind == kObjectKindDirectory) {
    int numEntries = 0;
    hdfsFileInfo *info = hdfsListDirectory(dfs->fs,path,&numEntries);

    if (info) {
      hdfsFreeFileInfo(info,numEntries);
    }
    st->st_nlink = numEntries + 2;
  } else {
    // not a directory
    st->st_nlink = 1;
  }

  // free the info pointer
  hdfsFreeFileInfo(info,1);

  return 0;
}
예제 #11
0
파일: fs.c 프로젝트: HsuJv/Note
int dfsList(const char* path){
    hdfsFS fs = hdfsConnect("default", 0);
    int i, entries;
    hdfsFileInfo *files, *head;

    /* Get the list info */
    files = hdfsListDirectory(fs, path, &entries);
    if (!files){
        perror("Get directory info");
        exit(-1);
    }
    head = files;

    /* Print the info */
    fprintf(stdout, "%s %-50s %-9s %s\n",
            "Kind", "Name", "Size", "Replicas");

    for (i = 0; i < entries; i++){
        const char* unit[] = {" B", "KB", "MB", "GB", "TB", "PB"};
        double size = files->mSize;
        unsigned int u = 0;

        while (size > 1024){
            u++;
            size /= 1024;
        }

        assert(u < 6);

        fprintf(stdout, "%4c %-50s %-7.2lf%s %8d\n", 
                files->mKind, files->mName,
                size, unit[u],
                files->mReplication);

        files += 1;
    }

    /* List ends */
    hdfsFreeFileInfo(head, entries);
    hdfsDisconnect(fs);
    
    return 0;
}
예제 #12
0
/**
 * call-seq:
 *    hdfs.ls(path) -> file_infos
 *
 * Lists the directory at the supplied path, returning an Array of
 * HDFS::FileInfo objects.  If this fails, raises a DFSException.
 */
VALUE HDFS_File_System_ls(VALUE self, VALUE path) {
  FSData* data = get_FSData(self);
  VALUE file_infos = rb_ary_new();
  int num_files = -1;
  hdfsFileInfo* infos = hdfsListDirectory(data->fs, StringValuePtr(path),
      &num_files);
  if (infos == NULL && num_files == -1) {
    rb_raise(e_dfs_exception, "Failed to list directory %s: %s",
        StringValuePtr(path), get_error(errno));
    return Qnil;
  }
  int i;
  for (i = 0; i < num_files; i++) {
    hdfsFileInfo* cur_info = infos + i;
    rb_ary_push(file_infos, new_HDFS_File_Info(cur_info));
  }
  hdfsFreeFileInfo(infos, num_files);
  return file_infos;
}
예제 #13
0
파일: Hdfs.cpp 프로젝트: Nanonid/hootenanny
std::vector<FileStatus> Hdfs::listStatus(string path, const bool sortByPath)
{
  if (exists(path) == false)
  {
    throw ios_base::failure("Error retrieving status on non-existant path (" + path + ")");
  }
  std::vector<FileStatus> result;

  int numEntries;
  hdfsFileInfo* fis = hdfsListDirectory(_getFs(), path.data(), &numEntries);

  if (fis == NULL)
  {
    throw ios_base::failure("Error listing directory contents. (" + path + ")");
  }

  result.resize(numEntries);
  for (int i = 0; i < numEntries; i++)
  {
    hdfsFileInfo* fi = fis + i;
    FileStatus& r = result[i];
    r._setAccessTime(fi->mLastAccess);
    r._setModificationTime(fi->mLastMod);
    r._setLen(fi->mSize);
    r._setKind(fi->mKind);
    r._setPath(fi->mName);
  }

  hdfsFreeFileInfo(fis, numEntries);

  if (sortByPath)
  {
    sort(result.begin(), result.end(), _fileStatusPathCompare);
  }

  return result;
}
예제 #14
0
파일: hdfs_test.c 프로젝트: LefKok/upright
int main(int argc, char **argv) {

    hdfsFS fs = hdfsConnect("default", 0);
    if(!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-1);
    } 
 
    hdfsFS lfs = hdfsConnect(NULL, 0);
    if(!lfs) {
        fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
        exit(-1);
    } 
 
        const char* writePath = "/tmp/testfile.txt";
    {
        //Write tests
        
        
        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
        if(!writeFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
            exit(-1);
        }
        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);

        char* buffer = "Hello, World!";
        tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);

        tOffset currentPos = -1;
        if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
            fprintf(stderr, 
                    "Failed to get current file position correctly! Got %ld!\n",
                    currentPos);
            exit(-1);
        }
        fprintf(stderr, "Current position: %ld\n", currentPos);

        if (hdfsFlush(fs, writeFile)) {
            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
            exit(-1);
        }
        fprintf(stderr, "Flushed %s successfully!\n", writePath); 

        hdfsCloseFile(fs, writeFile);
    }

    {
        //Read tests
        
        const char* readPath = "/tmp/testfile.txt";
        int exists = hdfsExists(fs, readPath);

        if (exists) {
          fprintf(stderr, "Failed to validate existence of %s\n", readPath);
          exit(-1);
        }

        hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
        if (!readFile) {
            fprintf(stderr, "Failed to open %s for reading!\n", readPath);
            exit(-1);
        }

        fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));

        tOffset seekPos = 1;
        if(hdfsSeek(fs, readFile, seekPos)) {
            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
            exit(-1);
        }

        tOffset currentPos = -1;
        if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
            fprintf(stderr, 
                    "Failed to get current file position correctly! Got %ld!\n", 
                    currentPos);
            exit(-1);
        }
        fprintf(stderr, "Current position: %ld\n", currentPos);

        static char buffer[32];
        tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, 
                sizeof(buffer));
        fprintf(stderr, "Read following %d bytes:\n%s\n", 
                num_read_bytes, buffer);

        num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer, 
                sizeof(buffer));
        fprintf(stderr, "Read following %d bytes:\n%s\n", 
                num_read_bytes, buffer);

        hdfsCloseFile(fs, readFile);
    }

    int totalResult = 0;
    int result = 0;
    {
        //Generic file-system operations

        const char* srcPath = "/tmp/testfile.txt";
        const char* dstPath = "/tmp/testfile2.txt";

        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
        totalResult += result;

        const char* slashTmp = "/tmp";
        const char* newDirectory = "/tmp/newdir";
        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) ? "Failed!" : "Success!"));
        totalResult += result;

        char buffer[256];
        const char *resp;
        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
        totalResult += (resp ? 0 : 1);
        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
        totalResult += (resp ? 0 : 1);

        fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs));
        fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
        fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));

        hdfsFileInfo *fileInfo = NULL;
        if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
            fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
            fprintf(stderr, "Name: %s, ", fileInfo->mName);
            fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
            fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
            fprintf(stderr, "BlockSize: %ld, ", fileInfo->mBlockSize);
            fprintf(stderr, "Size: %ld, ", fileInfo->mSize);
            fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
            fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
            fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
            char permissions[10];
            permission_disp(fileInfo->mPermissions, permissions);
            fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
            hdfsFreeFileInfo(fileInfo, 1);
        } else {
            totalResult++;
            fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
        }

        hdfsFileInfo *fileList = 0;
        int numEntries = 0;
        if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
            int i = 0;
            for(i=0; i < numEntries; ++i) {
                fprintf(stderr, "Name: %s, ", fileList[i].mName);
                fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
                fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
                fprintf(stderr, "BlockSize: %ld, ", fileList[i].mBlockSize);
                fprintf(stderr, "Size: %ld, ", fileList[i].mSize);
                fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
                fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
                fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
                char permissions[10];
                permission_disp(fileList[i].mPermissions, permissions);
                fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
            }
            hdfsFreeFileInfo(fileList, numEntries);
        } else {
            if (errno) {
                totalResult++;
                fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
            } else {
                fprintf(stderr, "Empty directory!\n");
            }
        }

        char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1);
        if(hosts) {
            fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
            int i=0; 
            while(hosts[i]) {
                int j = 0;
                while(hosts[i][j]) {
                    fprintf(stderr, 
                            "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
                    ++j;
                }
                ++i;
            }
        } else {
            totalResult++;
            fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
        }
       
        char *newOwner = "root";
        // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
        short newPerm = 0666;

        // chown write
        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!"));
        totalResult += result;
        // chmod write
        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!"));
        totalResult += result;



        sleep(2);
        tTime newMtime = time(NULL);
        tTime newAtime = time(NULL);

        // utime write
        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!"));

        totalResult += result;

        // chown/chmod/utime read
        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);

        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
        totalResult += result;

        // will later use /tmp/ as a different user so enable it
        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr,"newMTime=%ld\n",newMtime);
        fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);


        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) ? "Failed!" : "Success!"));
        totalResult += result;

        // No easy way to turn on access times from hdfs_test right now
        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) ? "Failed!" : "Success!"));
        //        totalResult += result;

        hdfsFreeFileInfo(finfo, 1);

        // Clean up
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath)) ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
        totalResult += (result ? 0 : 1);
    }


    totalResult += (hdfsDisconnect(fs) != 0);

    {
      //
      // Now test as connecting as a specific user
      // This is only meant to test that we connected as that user, not to test
      // the actual fs user capabilities. Thus just create a file and read
      // the owner is correct.

      const char *tuser = "******";
      const char* writePath = "/tmp/usertestfile.txt";
      const char **groups =  (const char**)malloc(sizeof(char*)* 2);
      groups[0] = "users";
      groups[1] = "nobody";

      fs = hdfsConnectAsUser("default", 0, tuser, groups, 2);
      if(!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
        exit(-1);
      } 

        hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
        if(!writeFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
            exit(-1);
        }
        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);

        char* buffer = "Hello, World!";
        tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);

        if (hdfsFlush(fs, writeFile)) {
            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
            exit(-1);
        }
        fprintf(stderr, "Flushed %s successfully!\n", writePath); 

        hdfsCloseFile(fs, writeFile);

        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
        totalResult += result;
    }
    
    totalResult += (hdfsDisconnect(fs) != 0);

    if (totalResult != 0) {
        return -1;
    } else {
        return 0;
    }
}
예제 #15
0
void HHDFSListPartitionStats::populate(hdfsFS fs,
                                       const NAString &dir,
                                       Int32 numOfBuckets,
                                       HHDFSDiags &diags,
                                       NABoolean doEstimation,
                                       char recordTerminator)
{
  int numFiles = 0;

  // remember parameters
  partitionDir_     = dir;
  defaultBucketIdx_ = (numOfBuckets >= 1) ? numOfBuckets : 0;
  doEstimation_     = doEstimation;
  recordTerminator_ = recordTerminator;

  // to avoid a crash, due to lacking permissions, check the directory
  // itself first
  hdfsFileInfo *dirInfo = hdfsGetPathInfo(fs, dir.data());
  
  if (!dirInfo)
    {
      diags.recordError(NAString("Could not access HDFS directory ") + dir,
                        "HHDFSListPartitionStats::populate");
    }
  else
    {
      dirInfo_ = *dirInfo;

      // list all the files in this directory, they all belong
      // to this partition and either belong to a specific bucket
      // or to the default bucket
      hdfsFileInfo *fileInfos = hdfsListDirectory(fs,
                                                  dir.data(),
                                                  &numFiles);

      // populate partition stats
      for (int f=0; f<numFiles && diags.isSuccess(); f++)
        if (fileInfos[f].mKind == kObjectKindFile)
          {
            // the default (unbucketed) bucket number is
            // defaultBucketIdx_
            Int32 bucketNum = determineBucketNum(fileInfos[f].mName);
            HHDFSBucketStats *bucketStats = NULL;

            if (! bucketStatsList_.used(bucketNum))
              {
                bucketStats = new(heap_) HHDFSBucketStats(heap_, getTable());
                bucketStatsList_.insertAt(bucketNum, bucketStats);
              }
            else
              bucketStats = bucketStatsList_[bucketNum];

            bucketStats->addFile(fs, &fileInfos[f], diags, doEstimation, recordTerminator);
          }

      hdfsFreeFileInfo(fileInfos, numFiles);
      hdfsFreeFileInfo(dirInfo,1);

      // aggregate statistics over all buckets
      for (Int32 b=0; b<=defaultBucketIdx_; b++)
        if (bucketStatsList_.used(b))
          add(bucketStatsList_[b]);
    }
}
예제 #16
0
int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
                       off_t offset, struct fuse_file_info *fi)
{
  TRACE1("readdir",path)

  (void) offset;
  (void) fi;

  // retrieve dfs specific data
  dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;

  // check params and the context var
  assert(dfs);
  assert(path);
  assert(buf);

  int path_len = strlen(path);

  hdfsFS userFS;
  // if not connected, try to connect and fail out if we can't.
  if ((userFS = doConnectAsUser(dfs->nn_hostname,dfs->nn_port))== NULL) {
    syslog(LOG_ERR, "ERROR: could not connect to dfs %s:%d\n", __FILE__, __LINE__);
    return -EIO;
  }

  // call dfs to read the dir
  int numEntries = 0;
  hdfsFileInfo *info = hdfsListDirectory(userFS,path,&numEntries);
  userFS = NULL;

  // NULL means either the directory doesn't exist or maybe IO error.
  if (NULL == info) {
    return -ENOENT;
  }

  int i ;
  for (i = 0; i < numEntries; i++) {

    // check the info[i] struct
    if (NULL == info[i].mName) {
      syslog(LOG_ERR,"ERROR: for <%s> info[%d].mName==NULL %s:%d", path, i, __FILE__,__LINE__);
      continue;
    }

    struct stat st;
    fill_stat_structure(&info[i], &st);

    // hack city: todo fix the below to something nicer and more maintainable but
    // with good performance
    // strip off the path but be careful if the path is solely '/'
    // NOTE - this API started returning filenames as full dfs uris
    const char *const str = info[i].mName + dfs->dfs_uri_len + path_len + ((path_len == 1 && *path == '/') ? 0 : 1);

    // pack this entry into the fuse buffer
    int res = 0;
    if ((res = filler(buf,str,&st,0)) != 0) {
      syslog(LOG_ERR, "ERROR: readdir filling the buffer %d %s:%d\n",res, __FILE__, __LINE__);
    }
  }

  // insert '.' and '..'
  const char *const dots [] = { ".",".."};
  for (i = 0 ; i < 2 ; i++)
    {
      struct stat st;
      memset(&st, 0, sizeof(struct stat));

      // set to 0 to indicate not supported for directory because we cannot (efficiently) get this info for every subdirectory
      st.st_nlink =  0;

      // setup stat size and acl meta data
      st.st_size    = 512;
      st.st_blksize = 512;
      st.st_blocks  =  1;
      st.st_mode    = (S_IFDIR | 0777);
      st.st_uid     = default_id;
      st.st_gid     = default_id;
      // todo fix below times
      st.st_atime   = 0;
      st.st_mtime   = 0;
      st.st_ctime   = 0;

      const char *const str = dots[i];

      // flatten the info using fuse's function into a buffer
      int res = 0;
      if ((res = filler(buf,str,&st,0)) != 0) {
        syslog(LOG_ERR, "ERROR: readdir filling the buffer %d %s:%d", res, __FILE__, __LINE__);
      }
    }
  // free the info pointers
  hdfsFreeFileInfo(info,numEntries);
  return 0;
}
예제 #17
0
static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
                                const struct tlhPaths *paths)
{
    char tmp[4096];
    hdfsFile file;
    int ret, expected, numEntries;
    hdfsFileInfo *fileInfo;
    struct hdfsReadStatistics *readStats = NULL;

    if (hdfsExists(fs, paths->prefix) == 0) {
        EXPECT_ZERO(hdfsDelete(fs, paths->prefix, 1));
    }
    EXPECT_ZERO(hdfsCreateDirectory(fs, paths->prefix));

    EXPECT_ZERO(doTestGetDefaultBlockSize(fs, paths->prefix));

    /* There should be no entry in the directory. */
    errno = EACCES; // see if errno is set to 0 on success
    EXPECT_NULL_WITH_ERRNO(hdfsListDirectory(fs, paths->prefix, &numEntries), 0);
    if (numEntries != 0) {
        fprintf(stderr, "hdfsListDirectory set numEntries to "
                "%d on empty directory.", numEntries);
    }

    /* There should not be any file to open for reading. */
    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0));

    /* hdfsOpenFile should not accept mode = 3 */
    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, 3, 0, 0, 0));

    file = hdfsOpenFile(fs, paths->file1, O_WRONLY, 0, 0, 0);
    EXPECT_NONNULL(file);

    /* TODO: implement writeFully and use it here */
    expected = (int)strlen(paths->prefix);
    ret = hdfsWrite(fs, file, paths->prefix, expected);
    if (ret < 0) {
        ret = errno;
        fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
        return ret;
    }
    if (ret != expected) {
        fprintf(stderr, "hdfsWrite was supposed to write %d bytes, but "
                "it wrote %d\n", ret, expected);
        return EIO;
    }
    EXPECT_ZERO(hdfsFlush(fs, file));
    EXPECT_ZERO(hdfsHSync(fs, file));
    EXPECT_ZERO(hdfsCloseFile(fs, file));

    /* There should be 1 entry in the directory. */
    EXPECT_NONNULL(hdfsListDirectory(fs, paths->prefix, &numEntries));
    if (numEntries != 1) {
        fprintf(stderr, "hdfsListDirectory set numEntries to "
                "%d on directory containing 1 file.", numEntries);
    }

    /* Let's re-open the file for reading */
    file = hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0);
    EXPECT_NONNULL(file);

    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
    errno = 0;
    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead);
    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead);
    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead);
    hdfsFileFreeReadStatistics(readStats);
    /* TODO: implement readFully and use it here */
    ret = hdfsRead(fs, file, tmp, sizeof(tmp));
    if (ret < 0) {
        ret = errno;
        fprintf(stderr, "hdfsRead failed and set errno %d\n", ret);
        return ret;
    }
    if (ret != expected) {
        fprintf(stderr, "hdfsRead was supposed to read %d bytes, but "
                "it read %d\n", ret, expected);
        return EIO;
    }
    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
    errno = 0;
    EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead);
    hdfsFileFreeReadStatistics(readStats);
    EXPECT_ZERO(hdfsFileClearReadStatistics(file));
    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
    EXPECT_UINT64_EQ((uint64_t)0, readStats->totalBytesRead);
    hdfsFileFreeReadStatistics(readStats);
    EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
    EXPECT_ZERO(hdfsCloseFile(fs, file));

    // TODO: Non-recursive delete should fail?
    //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
    EXPECT_ZERO(hdfsCopy(fs, paths->file1, fs, paths->file2));

    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, NULL));
    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, "doop"));
    fileInfo = hdfsGetPathInfo(fs, paths->file2);
    EXPECT_NONNULL(fileInfo);
    EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
    EXPECT_ZERO(hdfsFileIsEncrypted(fileInfo));
    hdfsFreeFileInfo(fileInfo, 1);

    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha", "doop2"));
    fileInfo = hdfsGetPathInfo(fs, paths->file2);
    EXPECT_NONNULL(fileInfo);
    EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
    hdfsFreeFileInfo(fileInfo, 1);

    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha2", NULL));
    fileInfo = hdfsGetPathInfo(fs, paths->file2);
    EXPECT_NONNULL(fileInfo);
    EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
    hdfsFreeFileInfo(fileInfo, 1);

    snprintf(tmp, sizeof(tmp), "%s/nonexistent-file-name", paths->prefix);
    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, tmp, "ha3", NULL), ENOENT);
    return 0;
}
예제 #18
0
NABoolean HHDFSListPartitionStats::validateAndRefresh(hdfsFS fs, NABoolean refresh)
{
  NABoolean result = TRUE;

  // assume we get the files sorted by file name
  int numFiles = 0;
  Int32 lastBucketNum = -1;
  ARRAY(Int32) fileNumInBucket(getLastValidBucketIndx()+1);
  HHDFSBucketStats *bucketStats = NULL;

  for (CollIndex i=0; i<=getLastValidBucketIndx(); i++)
    fileNumInBucket.insertAt(i, (Int32) -1);

  // recursively call processDirectory() for each subdirectory
  hdfsFileInfo *fileInfos = hdfsListDirectory(fs,
                                              partitionDir_.data(),
                                              &numFiles);

  // populate partition stats
  for (int f=0; f<numFiles && result; f++)
    if (fileInfos[f].mKind == kObjectKindFile)
      {
        Int32 bucketNum = determineBucketNum(fileInfos[f].mName);

        if (bucketNum != lastBucketNum)
          {
            if (! bucketStatsList_.used(bucketNum))
              {
                // first file for a new bucket got added
                if (!refresh)
                  return FALSE;
                bucketStats = new(heap_) HHDFSBucketStats(heap_);
                bucketStatsList_.insertAt(bucketNum, bucketStats);
              }
            else
              bucketStats = bucketStatsList_[bucketNum];
            lastBucketNum = bucketNum;
          }

        // file stats for an existing file, or NULL
        // for a new file
        HHDFSFileStats *fileStats = NULL;
        // position in bucketStats of the file (existing or new)
        fileNumInBucket[bucketNum] = fileNumInBucket[bucketNum] + 1;

        if (fileNumInBucket[bucketNum] < bucketStats->entries())
          fileStats = (*bucketStats)[fileNumInBucket[bucketNum]];
        // else this is a new file, indicated by fileStats==NULL

        if (fileStats &&
            fileStats->getFileName() == fileInfos[f].mName)
          {
            // file still exists, check modification timestamp
            if (fileStats->getModificationTS() !=
                fileInfos[f].mLastMod ||
                fileStats->getTotalSize() !=
                (Int64) fileInfos[f].mSize)
              {
                if (refresh)
                  {
                    // redo this file, it changed
                    subtract(fileStats);
                    bucketStats->removeAt(fileNumInBucket[bucketNum]);
                    fileStats = NULL;
                  }
                else
                  result = FALSE;
              }
            // else this file is unchanged from last time
          } // file name matches
        else
          {
            if (refresh)
              {
                if (fileStats)
                  {
                    // We are looking at a file in the directory, fileInfos[f]
                    // and at a file stats entry, with names that do not match.
                    // This could be because a new file got inserted or because
                    // the file of our file stats entry got deleted or both.
                    // We can only refresh this object in the first case, if
                    // a file got deleted we will return FALSE and not refresh.

                    // check whether fileStats got deleted,
                    // search for fileStats->getFileName() in the directory
                    int f2;
                    for (f2=f+1; f2<numFiles; f2++)
                      if (fileStats->getFileName() == fileInfos[f2].mName)
                        break;

                    if (f2<numFiles)
                      {
                        // file fileInfos[f] got added, don't consume
                        // a FileStats entry, instead add it below
                        fileStats = NULL;
                      }
                    else
                      {
                        // file fileStats->getFileName() got deleted,
                        // it's gone from the HDFS directory,
                        // give up and redo the whole thing
                        result = FALSE;
                      }
                  }
                // else file was inserted (fileStats is NULL)
              }
            else
              result = FALSE;
          } // file names for HHDFSFileStats and directory don't match

        if (result && !fileStats)
          {
            // add this file
            if (! bucketStats->addFile(fs,
                                       &fileInfos[f],
                                       doEstimation_,
                                       recordTerminator_,
                                       isSequenceFile_,
                                       fileNumInBucket[bucketNum]))
              result = FALSE;
            add((*bucketStats)[fileNumInBucket[bucketNum]]);
          }
      } // loop over actual files in the directory

  hdfsFreeFileInfo(fileInfos, numFiles);

  // check for file stats that we did not visit at the end of each bucket
  for (CollIndex i=0; i<=getLastValidBucketIndx() && result; i++)
    if (bucketStatsList_.used(i) &&
        bucketStatsList_[i]->entries() != fileNumInBucket[i] + 1)
      result = FALSE; // some files got deleted at the end

  return result;
}
예제 #19
0
int main(int argc, char **argv) {
    const char *writePath = "/tmp/testfile.txt";
    const char *fileContents = "Hello, World!";
    const char *readPath = "/tmp/testfile.txt";
    const char *srcPath = "/tmp/testfile.txt";
    const char *dstPath = "/tmp/testfile2.txt";
    const char *slashTmp = "/tmp";
    const char *newDirectory = "/tmp/newdir";
    const char *newOwner = "root";
    const char *tuser = "******";
    const char *appendPath = "/tmp/appends";
    const char *userPath = "/tmp/usertestfile.txt";

    char buffer[32], buffer2[256], rdbuffer[32];
    tSize num_written_bytes, num_read_bytes;
    hdfsFS fs, lfs;
    hdfsFile writeFile, readFile, localFile, appendFile, userFile;
    tOffset currentPos, seekPos;
    int exists, totalResult, result, numEntries, i, j;
    const char *resp;
    hdfsFileInfo *fileInfo, *fileList, *finfo;
    char *buffer3;
    char permissions[10];
    char ***hosts;
    short newPerm = 0666;
    tTime newMtime, newAtime;

    fs = hdfsConnectNewInstance("default", 0);
    if(!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-1);
    } 
 
    lfs = hdfsConnectNewInstance(NULL, 0);
    if(!lfs) {
        fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
        exit(-1);
    } 

    {
        //Write tests
        
        writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
        if(!writeFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
            exit(-1);
        }
        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
        num_written_bytes =
          hdfsWrite(fs, writeFile, (void*)fileContents,
            (tSize)(strlen(fileContents)+1));
        if (num_written_bytes != strlen(fileContents) + 1) {
          fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
                  (int)(strlen(fileContents) + 1), (int)num_written_bytes);
            exit(-1);
        }
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);

        currentPos = -1;
        if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
            fprintf(stderr, 
                    "Failed to get current file position correctly! Got %" PRId64 "!\n",
                    currentPos);
            exit(-1);
        }
        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);

        if (hdfsFlush(fs, writeFile)) {
            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
            exit(-1);
        }
        fprintf(stderr, "Flushed %s successfully!\n", writePath); 

        if (hdfsHFlush(fs, writeFile)) {
            fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
            exit(-1);
        }
        fprintf(stderr, "HFlushed %s successfully!\n", writePath);

        hdfsCloseFile(fs, writeFile);
    }

    {
        //Read tests
        
        exists = hdfsExists(fs, readPath);

        if (exists) {
          fprintf(stderr, "Failed to validate existence of %s\n", readPath);
          exit(-1);
        }

        readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
        if (!readFile) {
            fprintf(stderr, "Failed to open %s for reading!\n", readPath);
            exit(-1);
        }

        if (!hdfsFileIsOpenForRead(readFile)) {
            fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
                    "with O_RDONLY, and it did not show up as 'open for "
                    "read'\n");
            exit(-1);
        }

        fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));

        seekPos = 1;
        if(hdfsSeek(fs, readFile, seekPos)) {
            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
            exit(-1);
        }

        currentPos = -1;
        if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
            fprintf(stderr, 
                    "Failed to get current file position correctly! Got %" PRId64 "!\n",
                    currentPos);
            exit(-1);
        }
        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);

        if (!hdfsFileUsesDirectRead(readFile)) {
          fprintf(stderr, "Direct read support incorrectly not detected "
                  "for HDFS filesystem\n");
          exit(-1);
        }

        fprintf(stderr, "Direct read support detected for HDFS\n");

        // Test the direct read path
        if(hdfsSeek(fs, readFile, 0)) {
            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
            exit(-1);
        }
        memset(buffer, 0, sizeof(buffer));
        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
                sizeof(buffer));
        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
            fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
                    fileContents, buffer, num_read_bytes);
            exit(-1);
        }
        fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
                num_read_bytes, buffer);
        if (hdfsSeek(fs, readFile, 0L)) {
            fprintf(stderr, "Failed to seek to file start!\n");
            exit(-1);
        }

        // Disable the direct read path so that we really go through the slow
        // read path
        hdfsFileDisableDirectRead(readFile);

        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, 
                sizeof(buffer));
        fprintf(stderr, "Read following %d bytes:\n%s\n", 
                num_read_bytes, buffer);

        memset(buffer, 0, strlen(fileContents + 1));

        num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer, 
                sizeof(buffer));
        fprintf(stderr, "Read following %d bytes:\n%s\n", 
                num_read_bytes, buffer);

        hdfsCloseFile(fs, readFile);

        // Test correct behaviour for unsupported filesystems
        localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
        if(!localFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
            exit(-1);
        }

        num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
                                      (tSize)(strlen(fileContents) + 1));

        hdfsCloseFile(lfs, localFile);
        localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);

        if (hdfsFileUsesDirectRead(localFile)) {
          fprintf(stderr, "Direct read support incorrectly detected for local "
                  "filesystem\n");
          exit(-1);
        }

        hdfsCloseFile(lfs, localFile);
    }

    totalResult = 0;
    result = 0;
    {
        //Generic file-system operations

        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
        totalResult += (resp ? 0 : 1);
        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
        totalResult += (resp ? 0 : 1);

        fprintf(stderr, "hdfsGetDefaultBlockSize: %" PRId64 "\n", hdfsGetDefaultBlockSize(fs));
        fprintf(stderr, "hdfsGetCapacity: %" PRId64 "\n", hdfsGetCapacity(fs));
        fprintf(stderr, "hdfsGetUsed: %" PRId64 "\n", hdfsGetUsed(fs));

        fileInfo = NULL;
        if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
            fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
            fprintf(stderr, "Name: %s, ", fileInfo->mName);
            fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
            fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
            fprintf(stderr, "BlockSize: %" PRId64 ", ", fileInfo->mBlockSize);
            fprintf(stderr, "Size: %" PRId64 ", ", fileInfo->mSize);
            fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
            fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
            fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
            permission_disp(fileInfo->mPermissions, permissions);
            fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
            hdfsFreeFileInfo(fileInfo, 1);
        } else {
            totalResult++;
            fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
        }

        fileList = 0;
        fileList = hdfsListDirectory(fs, newDirectory, &numEntries);
        if (!(fileList == NULL && numEntries == 0 && !errno)) {
            fprintf(stderr, "waah! hdfsListDirectory for empty %s - FAILED!\n", newDirectory);
            totalResult++;
        } else {
            fprintf(stderr, "hdfsListDirectory for empty %s - SUCCESS!\n", newDirectory);
        }

        fileList = 0;
        if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
            for(i=0; i < numEntries; ++i) {
                fprintf(stderr, "Name: %s, ", fileList[i].mName);
                fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
                fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
                fprintf(stderr, "BlockSize: %" PRId64 ", ", fileList[i].mBlockSize);
                fprintf(stderr, "Size: %" PRId64 ", ", fileList[i].mSize);
                fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
                fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
                fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
                permission_disp(fileList[i].mPermissions, permissions);
                fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
            }
            hdfsFreeFileInfo(fileList, numEntries);
        } else {
            if (errno) {
                totalResult++;
                fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
            } else {
                fprintf(stderr, "Empty directory!\n");
            }
        }

        hosts = hdfsGetHosts(fs, srcPath, 0, 1);
        if(hosts) {
            fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
            i=0; 
            while(hosts[i]) {
                j = 0;
                while(hosts[i][j]) {
                    fprintf(stderr, 
                            "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
                    ++j;
                }
                ++i;
            }
        } else {
            totalResult++;
            fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
        }
       
        // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it

        // chown write
        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        // chmod write
        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;



        sleep(2);
        newMtime = time(NULL);
        newAtime = time(NULL);

        // utime write
        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));

        totalResult += result;

        // chown/chmod/utime read
        finfo = hdfsGetPathInfo(fs, writePath);

        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;

        // will later use /tmp/ as a different user so enable it
        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;

        fprintf(stderr,"newMTime=%ld\n",newMtime);
        fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);


        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;

        // No easy way to turn on access times from hdfs_test right now
        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
        //        totalResult += result;

        hdfsFreeFileInfo(finfo, 1);

        // Clean up
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!"));
        totalResult += (result ? 0 : 1);
    }

    {
      // TEST APPENDS

      // CREATE
      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
      if(!appendFile) {
        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
        exit(-1);
      }
      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);

      buffer3 = "Hello,";
      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
        (tSize)strlen(buffer3));
      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);

      if (hdfsFlush(fs, appendFile)) {
        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
        exit(-1);
        }
      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 

      hdfsCloseFile(fs, appendFile);

      // RE-OPEN
      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
      if(!appendFile) {
        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
        exit(-1);
      }
      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);

      buffer3 = " World";
      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
        (tSize)(strlen(buffer3) + 1));
      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);

      if (hdfsFlush(fs, appendFile)) {
        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
        exit(-1);
      }
      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 

      hdfsCloseFile(fs, appendFile);

      // CHECK size
      finfo = hdfsGetPathInfo(fs, appendPath);
      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!"));
      totalResult += (result ? 0 : 1);

      // READ and check data
      readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
      if (!readFile) {
        fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
        exit(-1);
      }

      num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
      fprintf(stderr, "Read following %d bytes:\n%s\n", 
              num_read_bytes, rdbuffer);

      fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));

      hdfsCloseFile(fs, readFile);

      // DONE test appends
    }
      
      
    totalResult += (hdfsDisconnect(fs) != 0);

    {
      //
      // Now test as connecting as a specific user
      // This is only meant to test that we connected as that user, not to test
      // the actual fs user capabilities. Thus just create a file and read
      // the owner is correct.

      fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
      if(!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
        exit(-1);
      } 

        userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
        if(!userFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", userPath);
            exit(-1);
        }
        fprintf(stderr, "Opened %s for writing successfully...\n", userPath);

        num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
          (tSize)(strlen(fileContents)+1));
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);

        if (hdfsFlush(fs, userFile)) {
            fprintf(stderr, "Failed to 'flush' %s\n", userPath); 
            exit(-1);
        }
        fprintf(stderr, "Flushed %s successfully!\n", userPath); 

        hdfsCloseFile(fs, userFile);

        finfo = hdfsGetPathInfo(fs, userPath);
        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
        totalResult += result;
    }
    
    totalResult += (hdfsDisconnect(fs) != 0);

    if (totalResult != 0) {
        return -1;
    } else {
        return 0;
    }
}
예제 #20
0
int main(int argc, char **argv)
{
    char buffer[32];
    tSize num_written_bytes;
    const char* slashTmp = "/tmp";
    int nnPort;
    char *rwTemplate, *rwTemplate2, *newDirTemplate,
    *appendTemplate, *userTemplate, *rwPath = NULL;
    const char* fileContents = "Hello, World!";
    const char* nnHost = NULL;
    
    if (argc != 2) {
        fprintf(stderr, "usage: test_libwebhdfs_ops <username>\n");
        exit(1);
    }
    
    struct NativeMiniDfsConf conf = {
        .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070,
    };
    cluster = nmdCreate(&conf);
    if (!cluster) {
        fprintf(stderr, "Failed to create the NativeMiniDfsCluster.\n");
        exit(1);
    }
    if (nmdWaitClusterUp(cluster)) {
        fprintf(stderr, "Error when waiting for cluster to be ready.\n");
        exit(1);
    }
    if (nmdGetNameNodeHttpAddress(cluster, &nnPort, &nnHost)) {
        fprintf(stderr, "Error when retrieving namenode host address.\n");
        exit(1);
    }
    
    hdfsFS fs = hdfsConnectAsUserNewInstance(nnHost, nnPort, argv[1]);
    if(!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-1);
    }
    
    {
        // Write tests
        rwTemplate = strdup("/tmp/helloWorldXXXXXX");
        if (!rwTemplate) {
            fprintf(stderr, "Failed to create rwTemplate!\n");
            exit(1);
        }
        rwPath = mktemp(rwTemplate);
        // hdfsOpenFile
        hdfsFile writeFile = hdfsOpenFile(fs, rwPath,
                                          O_WRONLY|O_CREAT, 0, 0, 0);

        if(!writeFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", rwPath);
            exit(1);
        }
        fprintf(stderr, "Opened %s for writing successfully...\n", rwPath);
        // hdfsWrite
        num_written_bytes = hdfsWrite(fs, writeFile, (void*)fileContents,
                                      (int) strlen(fileContents) + 1);
        if (num_written_bytes != strlen(fileContents) + 1) {
            fprintf(stderr, "Failed to write correct number of bytes - "
                    "expected %d, got %d\n",
                    (int)(strlen(fileContents) + 1), (int) num_written_bytes);
            exit(1);
        }
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
        
        // hdfsTell
        tOffset currentPos = -1;
        if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
            fprintf(stderr,
                    "Failed to get current file position correctly. Got %"
                    PRId64 "!\n", currentPos);
            exit(1);
        }
        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
        
        hdfsCloseFile(fs, writeFile);
        // Done test write
    }
    
    sleep(1);
    
    {
        //Read tests
        int available = 0, exists = 0;
        
        // hdfsExists
        exists = hdfsExists(fs, rwPath);
        if (exists) {
            fprintf(stderr, "Failed to validate existence of %s\n", rwPath);
            exists = hdfsExists(fs, rwPath);
            if (exists) {
                fprintf(stderr,
                        "Still failed to validate existence of %s\n", rwPath);
                exit(1);
            }
        }
        
        hdfsFile readFile = hdfsOpenFile(fs, rwPath, O_RDONLY, 0, 0, 0);
        if (!readFile) {
            fprintf(stderr, "Failed to open %s for reading!\n", rwPath);
            exit(1);
        }
        if (!hdfsFileIsOpenForRead(readFile)) {
            fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
                    "with O_RDONLY, and it did not show up as 'open for "
                    "read'\n");
            exit(1);
        }
        
        available = hdfsAvailable(fs, readFile);
        fprintf(stderr, "hdfsAvailable: %d\n", available);
        
        // hdfsSeek, hdfsTell
        tOffset seekPos = 1;
        if(hdfsSeek(fs, readFile, seekPos)) {
            fprintf(stderr, "Failed to seek %s for reading!\n", rwPath);
            exit(1);
        }
        
        tOffset currentPos = -1;
        if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
            fprintf(stderr,
                    "Failed to get current file position correctly! Got %"
                    PRId64 "!\n", currentPos);

            exit(1);
        }
        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
        
        if(hdfsSeek(fs, readFile, 0)) {
            fprintf(stderr, "Failed to seek %s for reading!\n", rwPath);
            exit(1);
        }
        
        // hdfsRead
        memset(buffer, 0, sizeof(buffer));
        tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer));
        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
            fprintf(stderr, "Failed to read (direct). "
                    "Expected %s but got %s (%d bytes)\n",
                    fileContents, buffer, num_read_bytes);
            exit(1);
        }
        fprintf(stderr, "Read following %d bytes:\n%s\n",
                num_read_bytes, buffer);
        
        if (hdfsSeek(fs, readFile, 0L)) {
            fprintf(stderr, "Failed to seek to file start!\n");
            exit(1);
        }
        
        // hdfsPread
        memset(buffer, 0, strlen(fileContents + 1));
        num_read_bytes = hdfsPread(fs, readFile, 0, buffer, sizeof(buffer));
        fprintf(stderr, "Read following %d bytes:\n%s\n",
                num_read_bytes, buffer);
        
        hdfsCloseFile(fs, readFile);
        // Done test read
    }
    
    int totalResult = 0;
    int result = 0;
    {
        //Generic file-system operations
        char *srcPath = rwPath;
        char buffer[256];
        const char *resp;
        rwTemplate2 = strdup("/tmp/helloWorld2XXXXXX");
        if (!rwTemplate2) {
            fprintf(stderr, "Failed to create rwTemplate2!\n");
            exit(1);
        }
        char *dstPath = mktemp(rwTemplate2);
        newDirTemplate = strdup("/tmp/newdirXXXXXX");
        if (!newDirTemplate) {
            fprintf(stderr, "Failed to create newDirTemplate!\n");
            exit(1);
        }
        char *newDirectory = mktemp(newDirTemplate);
        
        // hdfsRename
        fprintf(stderr, "hdfsRename: %s\n",
                ((result = hdfsRename(fs, rwPath, dstPath)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsRename back: %s\n",
                ((result = hdfsRename(fs, dstPath, srcPath)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        
        // hdfsCreateDirectory
        fprintf(stderr, "hdfsCreateDirectory: %s\n",
                ((result = hdfsCreateDirectory(fs, newDirectory)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        
        // hdfsSetReplication
        fprintf(stderr, "hdfsSetReplication: %s\n",
                ((result = hdfsSetReplication(fs, srcPath, 1)) ?
                 "Failed!" : "Success!"));
        totalResult += result;

        // hdfsGetWorkingDirectory, hdfsSetWorkingDirectory
        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n",
                ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ?
                 buffer : "Failed!"));
        totalResult += (resp ? 0 : 1);

        const char* path[] = {"/foo", "/foo/bar", "foobar", "//foo/bar//foobar",
                              "foo//bar", "foo/bar///", "/", "////"};
        int i;
        for (i = 0; i < 8; i++) {
            fprintf(stderr, "hdfsSetWorkingDirectory: %s, %s\n",
                    ((result = hdfsSetWorkingDirectory(fs, path[i])) ?
                     "Failed!" : "Success!"),
                    hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer)));
            totalResult += result;
        }

        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n",
                ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n",
                ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ?
                 buffer : "Failed!"));
        totalResult += (resp ? 0 : 1);

        // hdfsGetPathInfo
        hdfsFileInfo *fileInfo = NULL;
        if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
            fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
            fprintf(stderr, "Name: %s, ", fileInfo->mName);
            fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
            fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
            fprintf(stderr, "BlockSize: %"PRId64", ", fileInfo->mBlockSize);
            fprintf(stderr, "Size: %"PRId64", ", fileInfo->mSize);
            fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod));
            fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
            fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
            char permissions[10];
            permission_disp(fileInfo->mPermissions, permissions);
            fprintf(stderr, "Permissions: %d (%s)\n",
                    fileInfo->mPermissions, permissions);
            hdfsFreeFileInfo(fileInfo, 1);
        } else {
            totalResult++;
            fprintf(stderr, "hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
        }
        
        // hdfsListDirectory
        hdfsFileInfo *fileList = 0;
        int numEntries = 0;
        if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
            int i = 0;
            for(i=0; i < numEntries; ++i) {
                fprintf(stderr, "Name: %s, ", fileList[i].mName);
                fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
                fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
                fprintf(stderr, "BlockSize: %"PRId64", ", fileList[i].mBlockSize);
                fprintf(stderr, "Size: %"PRId64", ", fileList[i].mSize);
                fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
                fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
                fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
                char permissions[10];
                permission_disp(fileList[i].mPermissions, permissions);
                fprintf(stderr, "Permissions: %d (%s)\n",
                        fileList[i].mPermissions, permissions);
            }
            hdfsFreeFileInfo(fileList, numEntries);
        } else {
            if (errno) {
                totalResult++;
                fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
            } else {
                fprintf(stderr, "Empty directory!\n");
            }
        }
        
        char *newOwner = "root";
        // Setting tmp dir to 777 so later when connectAsUser nobody,
        // we can write to it
        short newPerm = 0666;
        
        // hdfsChown
        fprintf(stderr, "hdfsChown: %s\n",
                ((result = hdfsChown(fs, rwPath, NULL, "users")) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsChown: %s\n",
                ((result = hdfsChown(fs, rwPath, newOwner, NULL)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        // hdfsChmod
        fprintf(stderr, "hdfsChmod: %s\n",
                ((result = hdfsChmod(fs, rwPath, newPerm)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        
        sleep(2);
        tTime newMtime = time(NULL);
        tTime newAtime = time(NULL);
        
        // utime write
        fprintf(stderr, "hdfsUtime: %s\n",
                ((result = hdfsUtime(fs, rwPath, newMtime, newAtime)) ?
                 "Failed!" : "Success!"));        
        totalResult += result;
        
        // chown/chmod/utime read
        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, rwPath);
        
        fprintf(stderr, "hdfsChown read: %s\n",
                ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        
        fprintf(stderr, "hdfsChmod read: %s\n",
                ((result = (finfo->mPermissions != newPerm)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        
        // will later use /tmp/ as a different user so enable it
        fprintf(stderr, "hdfsChmod: %s\n",
                ((result = hdfsChmod(fs, slashTmp, 0777)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        
        fprintf(stderr,"newMTime=%ld\n",newMtime);
        fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
        
        
        fprintf(stderr, "hdfsUtime read (mtime): %s\n",
                ((result = (finfo->mLastMod != newMtime / 1000)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        
        // Clean up
        hdfsFreeFileInfo(finfo, 1);
        fprintf(stderr, "hdfsDelete: %s\n",
                ((result = hdfsDelete(fs, newDirectory, 1)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsDelete: %s\n",
                ((result = hdfsDelete(fs, srcPath, 1)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        fprintf(stderr, "hdfsExists: %s\n",
                ((result = hdfsExists(fs, newDirectory)) ?
                 "Success!" : "Failed!"));
        totalResult += (result ? 0 : 1);
        // Done test generic operations
    }
    
    {
        // Test Appends
        appendTemplate = strdup("/tmp/appendsXXXXXX");
        if (!appendTemplate) {
            fprintf(stderr, "Failed to create appendTemplate!\n");
            exit(1);
        }
        char *appendPath = mktemp(appendTemplate);
        const char* helloBuffer = "Hello,";
        hdfsFile writeFile = NULL;
        
        // Create
        writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
        if(!writeFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
            exit(1);
        }
        fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
        
        num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer,
                                      (int) strlen(helloBuffer));
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
        hdfsCloseFile(fs, writeFile);
        
        fprintf(stderr, "hdfsSetReplication: %s\n",
                ((result = hdfsSetReplication(fs, appendPath, 1)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        
        // Re-Open for Append
        writeFile = hdfsOpenFile(fs, appendPath, O_WRONLY | O_APPEND, 0, 0, 0);
        if(!writeFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
            exit(1);
        }
        fprintf(stderr, "Opened %s for appending successfully...\n",
                appendPath);
        
        helloBuffer = " World";
        num_written_bytes = hdfsWrite(fs, writeFile, helloBuffer,
                                      (int)strlen(helloBuffer) + 1);
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
        
        hdfsCloseFile(fs, writeFile);

        // Check size
        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, appendPath);
        fprintf(stderr, "fileinfo->mSize: == total %s\n",
                ((result = (finfo->mSize == strlen("Hello, World") + 1)) ?
                 "Success!" : "Failed!"));
        totalResult += (result ? 0 : 1);
        
        // Read and check data
        hdfsFile readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
        if (!readFile) {
            fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
            exit(1);
        }
        
        tSize num_read_bytes = hdfsRead(fs, readFile, buffer, sizeof(buffer));
        fprintf(stderr, "Read following %d bytes:\n%s\n",
                num_read_bytes, buffer);
        fprintf(stderr, "read == Hello, World %s\n",
                (result = (strcmp(buffer, "Hello, World") == 0)) ?
                "Success!" : "Failed!");
        hdfsCloseFile(fs, readFile);
        
        // Cleanup
        fprintf(stderr, "hdfsDelete: %s\n",
                ((result = hdfsDelete(fs, appendPath, 1)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        // Done test appends
    }
    
    totalResult += (hdfsDisconnect(fs) != 0);
    
    {
        //
        // Now test as connecting as a specific user
        // This only meant to test that we connected as that user, not to test
        // the actual fs user capabilities. Thus just create a file and read
        // the owner is correct.
        const char *tuser = "******";
        userTemplate = strdup("/tmp/usertestXXXXXX");
        if (!userTemplate) {
            fprintf(stderr, "Failed to create userTemplate!\n");
            exit(1);
        }
        char* userWritePath = mktemp(userTemplate);
        hdfsFile writeFile = NULL;
        
        fs = hdfsConnectAsUserNewInstance("default", 50070, tuser);
        if(!fs) {
            fprintf(stderr,
                    "Oops! Failed to connect to hdfs as user %s!\n",tuser);
            exit(1);
        }
        
        writeFile = hdfsOpenFile(fs, userWritePath, O_WRONLY|O_CREAT, 0, 0, 0);
        if(!writeFile) {
            fprintf(stderr, "Failed to open %s for writing!\n", userWritePath);
            exit(1);
        }
        fprintf(stderr, "Opened %s for writing successfully...\n",
                userWritePath);
        
        num_written_bytes = hdfsWrite(fs, writeFile, fileContents,
                                      (int)strlen(fileContents) + 1);
        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
        hdfsCloseFile(fs, writeFile);
        
        hdfsFileInfo *finfo = hdfsGetPathInfo(fs, userWritePath);
        if (finfo) {
            fprintf(stderr, "hdfs new file user is correct: %s\n",
                    ((result = (strcmp(finfo->mOwner, tuser) != 0)) ?
                     "Failed!" : "Success!"));
        } else {
            fprintf(stderr,
                    "hdfsFileInfo returned by hdfsGetPathInfo is NULL\n");
            result = -1;
        }
        totalResult += result;
        
        // Cleanup
        fprintf(stderr, "hdfsDelete: %s\n",
                ((result = hdfsDelete(fs, userWritePath, 1)) ?
                 "Failed!" : "Success!"));
        totalResult += result;
        // Done test specific user
    }

    totalResult += (hdfsDisconnect(fs) != 0);
    
    // Shutdown the native minidfscluster
    nmdShutdown(cluster);
    nmdFree(cluster);
    
    fprintf(stderr, "totalResult == %d\n", totalResult);
    if (totalResult != 0) {
        return -1;
    } else {
        return 0;
    }
}