Esempio n. 1
0
int span_table_get(struct span_table *st, struct htrace_span **out,
                   const char *desc, const char *trid)
{
    struct htable *ht = (struct htable *)st;
    struct htrace_span *span;

    span = htable_get(ht, desc);
    EXPECT_NONNULL(span);
    EXPECT_STR_EQ(desc, span->desc);
    EXPECT_UINT64_GE(span->begin_ms, span->end_ms);
    EXPECT_TRUE(0 !=
        htrace_span_id_compare(&INVALID_SPAN_ID, &span->span_id));
    EXPECT_NONNULL(span->trid);
    EXPECT_STR_EQ(trid, span->trid);
    *out = span;
    return EXIT_SUCCESS;
}
static int htraced_rcv_test(struct rtest *rt)
{
    char err[512], *conf_str, *json_path;
    size_t err_len = sizeof(err);
    struct mini_htraced_params params;
    struct mini_htraced *ht = NULL;
    struct span_table *st;
    uint64_t start_ms;

    params.name = rt->name;
    params.confstr = "";
    mini_htraced_build(&params, &ht, err, err_len);
    EXPECT_STR_EQ("", err);

    EXPECT_INT_GE(0, asprintf(&json_path, "%s/%s",
                ht->root_dir, "spans.json"));
    EXPECT_INT_GE(0, asprintf(&conf_str, "%s=%s;%s=%s",
                HTRACE_SPAN_RECEIVER_KEY, "htraced",
                HTRACED_ADDRESS_KEY, ht->htraced_hrpc_addr));
    EXPECT_INT_ZERO(rt->run(rt, conf_str));
    start_ms = monotonic_now_ms(NULL);
    //
    // It may take a little while for htraced to commit the incoming spans sent
    // via RPC to its data store.  htraced does not have read-after-write
    // consistency, in other words.  This isn't normally an issue since trace
    // collection is done in the background.
    //
    // For this unit test, it means that we want to retry if we find too few
    // spans the first time we dump the htraced data store contents.
    //
    while (1) {
        int nspans;

        // This uses the bin/htracedTool program to dump the spans to a json file.
        mini_htraced_dump_spans(ht, err, err_len, json_path);
        EXPECT_STR_EQ("", err);
        st = span_table_alloc();
        EXPECT_NONNULL(st);
        nspans = load_trace_span_file(json_path, st);
        EXPECT_INT_GE(0, nspans);
        if (nspans >= rt->spans_created) {
            break;
        }
        span_table_free(st);
        st = NULL;
        EXPECT_UINT64_GE(start_ms, monotonic_now_ms(NULL) + 30000);
        sleep_ms(100);
        fprintf(stderr, "htraced_test_app1: retrying htrace dumpAll...\n");
    }
    EXPECT_INT_ZERO(rt->verify(rt, st));
    free(conf_str);
    free(json_path);
    span_table_free(st);
    mini_htraced_stop(ht);
    mini_htraced_free(ht);

    return EXIT_SUCCESS;
}
Esempio n. 3
0
/**
 * Test that we can write a file with libhdfs and then read it back
 */
int main(void)
{
    int port;
    struct NativeMiniDfsConf conf = {
        1, /* doFormat */
        0, /* webhdfsEnabled */
        0, /* namenodeHttpPort */
        1, /* configureShortCircuit */
    };
    char testFileName[TEST_FILE_NAME_LENGTH];
    hdfsFS fs;
    struct NativeMiniDfsCluster* cl;
    struct hdfsBuilder *bld;

    cl = nmdCreate(&conf);
    EXPECT_NONNULL(cl);
    EXPECT_ZERO(nmdWaitClusterUp(cl));
    port = nmdGetNameNodePort(cl);
    if (port < 0) {
        fprintf(stderr, "TEST_ERROR: test_zerocopy: "
                "nmdGetNameNodePort returned error %d\n", port);
        return EXIT_FAILURE;
    }
    bld = hdfsNewBuilder();
    EXPECT_NONNULL(bld);
    EXPECT_ZERO(nmdConfigureHdfsBuilder(cl, bld));
    hdfsBuilderSetForceNewInstance(bld);
    hdfsBuilderConfSetStr(bld, "dfs.block.size",
                          TO_STR(TEST_ZEROCOPY_FULL_BLOCK_SIZE));
    /* ensure that we'll always get our mmaps */
    hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit.skip.checksum",
                          "true");
    fs = hdfsBuilderConnect(bld);
    EXPECT_NONNULL(fs);
    EXPECT_ZERO(createZeroCopyTestFile(fs, testFileName,
          TEST_FILE_NAME_LENGTH));
    EXPECT_ZERO(doTestZeroCopyReads(fs, testFileName));
    EXPECT_ZERO(hdfsDisconnect(fs));
    EXPECT_ZERO(nmdShutdown(cl));
    nmdFree(cl);
    fprintf(stderr, "TEST_SUCCESS\n"); 
    return EXIT_SUCCESS;
}
Esempio n. 4
0
/**
 * Test that we can create a MiniDFSCluster and shut it down.
 */
int main(void) {
    struct NativeMiniDfsCluster* cl;
    
    cl = nmdCreate(&conf);
    EXPECT_NONNULL(cl);
    EXPECT_ZERO(nmdWaitClusterUp(cl));
    EXPECT_ZERO(nmdShutdown(cl));
    nmdFree(cl);

    return 0;
}
Esempio n. 5
0
static int createZeroCopyTestFile(hdfsFS fs, char *testFileName,
                                  size_t testFileNameLen)
{
    int blockIdx, blockLen;
    hdfsFile file;
    uint8_t *data;

    snprintf(testFileName, testFileNameLen, "/zeroCopyTestFile.%d.%d",
             getpid(), rand());
    file = hdfsOpenFile(fs, testFileName, O_WRONLY, 0, 1,
                        TEST_ZEROCOPY_FULL_BLOCK_SIZE);
    EXPECT_NONNULL(file);
    for (blockIdx = 0; blockIdx < TEST_ZEROCOPY_NUM_BLOCKS; blockIdx++) {
        blockLen = getZeroCopyBlockLen(blockIdx);
        data = getZeroCopyBlockData(blockIdx);
        EXPECT_NONNULL(data);
        EXPECT_INT_EQ(blockLen, hdfsWrite(fs, file, data, blockLen));
    }
    EXPECT_ZERO(hdfsCloseFile(fs, file));
    return 0;
}
Esempio n. 6
0
bool TestClear() {
    BEGIN_TEST;

    StringList list;
    list.push_front("bar");

    EXPECT_NONNULL(list.first());
    list.clear();
    EXPECT_NULL(list.next());
    EXPECT_NULL(list.first());
    EXPECT_EQ(list.length(), 0);

    END_TEST;
}
/**
 * Test that we can write a file with libhdfs and then read it back
 */
int main(int argc, const char *args[])
{
    int i, tlhNumThreads;
    const char *tlhNumThreadsStr;
    struct tlhThreadInfo ti[TLH_MAX_THREADS];
    
    if (argc != 2) {
        fprintf(stderr, "usage: test_libwebhdfs_threaded <username>\n");
        exit(1);
    }
    user = args[1];
    
    struct NativeMiniDfsConf conf = {
        .doFormat = 1, .webhdfsEnabled = 1, .namenodeHttpPort = 50070,
    };
    cluster = nmdCreate(&conf);
    EXPECT_NONNULL(cluster);
    EXPECT_ZERO(nmdWaitClusterUp(cluster));
    
    tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
    if (!tlhNumThreadsStr) {
        tlhNumThreadsStr = "3";
    }
    tlhNumThreads = atoi(tlhNumThreadsStr);
    if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
        fprintf(stderr, "testLibHdfs: must have a number of threads "
                "between 1 and %d inclusive, not %d\n",
                TLH_MAX_THREADS, tlhNumThreads);
        return EXIT_FAILURE;
    }
    memset(&ti[0], 0, sizeof(ti));
    for (i = 0; i < tlhNumThreads; i++) {
        ti[i].threadIdx = i;
    }
    
    for (i = 0; i < tlhNumThreads; i++) {
        EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
                                   testHdfsOperations, &ti[i]));
    }
    for (i = 0; i < tlhNumThreads; i++) {
        EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
    }
    
    EXPECT_ZERO(nmdShutdown(cluster));
    nmdFree(cluster);
    return checkFailures(ti, tlhNumThreads);
}
Esempio n. 8
0
/**
 * Test that we can write a file with libhdfs and then read it back
 */
int main(void)
{
    int i, tlhNumThreads;
    const char *tlhNumThreadsStr;
    struct tlhThreadInfo ti[TLH_MAX_THREADS];
    struct NativeMiniDfsConf conf = {
        1, /* doFormat */
    };

    tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
    if (!tlhNumThreadsStr) {
        tlhNumThreadsStr = "3";
    }
    tlhNumThreads = atoi(tlhNumThreadsStr);
    if ((tlhNumThreads <= 0) || (tlhNumThreads > TLH_MAX_THREADS)) {
        fprintf(stderr, "testLibHdfs: must have a number of threads "
                "between 1 and %d inclusive, not %d\n",
                TLH_MAX_THREADS, tlhNumThreads);
        return EXIT_FAILURE;
    }
    memset(&ti[0], 0, sizeof(ti));
    for (i = 0; i < tlhNumThreads; i++) {
        ti[i].threadIdx = i;
    }

    tlhCluster = nmdCreate(&conf);
    EXPECT_NONNULL(tlhCluster);
    EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));

    for (i = 0; i < tlhNumThreads; i++) {
        ti[i].theThread.start = testHdfsOperations;
        ti[i].theThread.arg = &ti[i];
        EXPECT_ZERO(threadCreate(&ti[i].theThread));
    }
    for (i = 0; i < tlhNumThreads; i++) {
        EXPECT_ZERO(threadJoin(&ti[i].theThread));
    }

    EXPECT_ZERO(nmdShutdown(tlhCluster));
    nmdFree(tlhCluster);
    return checkFailures(ti, tlhNumThreads);
}
Esempio n. 9
0
static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
{
    hdfsFile file = NULL;
    struct hadoopRzOptions *opts = NULL;
    struct hadoopRzBuffer *buffer = NULL;
    uint8_t *block;

    file = hdfsOpenFile(fs, fileName, O_RDONLY, 0, 0, 0);
    EXPECT_NONNULL(file);
    opts = hadoopRzOptionsAlloc();
    EXPECT_NONNULL(opts);
    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 1));
    /* haven't read anything yet */
    EXPECT_ZERO(expectFileStats(file, 0LL, 0LL, 0LL, 0LL));
    block = getZeroCopyBlockData(0);
    EXPECT_NONNULL(block);
    /* first read is half of a block. */
    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2);
    EXPECT_NONNULL(buffer);
    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2,
          hadoopRzBufferLength(buffer));
    EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer), block,
          TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2));
    hadoopRzBufferFree(file, buffer);
    /* read the next half of the block */
    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2);
    EXPECT_NONNULL(buffer);
    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2,
          hadoopRzBufferLength(buffer));
    EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer),
          block + (TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2),
          TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2));
    hadoopRzBufferFree(file, buffer);
    free(block);
    EXPECT_ZERO(expectFileStats(file, TEST_ZEROCOPY_FULL_BLOCK_SIZE, 
              TEST_ZEROCOPY_FULL_BLOCK_SIZE,
              TEST_ZEROCOPY_FULL_BLOCK_SIZE,
              TEST_ZEROCOPY_FULL_BLOCK_SIZE));
    /* Now let's read just a few bytes. */
    buffer = hadoopReadZero(file, opts, SMALL_READ_LEN);
    EXPECT_NONNULL(buffer);
    EXPECT_INT_EQ(SMALL_READ_LEN, hadoopRzBufferLength(buffer));
    block = getZeroCopyBlockData(1);
    EXPECT_NONNULL(block);
    EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN));
    hadoopRzBufferFree(file, buffer);
    EXPECT_INT64_EQ(
          (int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN,
          hdfsTell(fs, file));
    EXPECT_ZERO(expectFileStats(file,
          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN));

    /* Clear 'skip checksums' and test that we can't do zero-copy reads any
     * more.  Since there is no ByteBufferPool set, we should fail with
     * EPROTONOSUPPORT.
     */
    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0));
    EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
    EXPECT_INT_EQ(EPROTONOSUPPORT, errno);

    /* Verify that setting a NULL ByteBufferPool class works. */
    EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts, NULL));
    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0));
    EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
    EXPECT_INT_EQ(EPROTONOSUPPORT, errno);

    /* Now set a ByteBufferPool and try again.  It should succeed this time. */
    EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts,
          ELASTIC_BYTE_BUFFER_POOL_CLASS));
    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE);
    EXPECT_NONNULL(buffer);
    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE, hadoopRzBufferLength(buffer));
    EXPECT_ZERO(expectFileStats(file,
          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN));
    EXPECT_ZERO(memcmp(block + SMALL_READ_LEN, hadoopRzBufferGet(buffer),
        TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN));
    free(block);
    block = getZeroCopyBlockData(2);
    EXPECT_NONNULL(block);
    EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) +
        (TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN));
    hadoopRzBufferFree(file, buffer);

    /* Check the result of a zero-length read. */
    buffer = hadoopReadZero(file, opts, 0);
    EXPECT_NONNULL(buffer);
    EXPECT_NONNULL(hadoopRzBufferGet(buffer));
    EXPECT_INT_EQ(0, hadoopRzBufferLength(buffer));
    hadoopRzBufferFree(file, buffer);

    /* Check the result of reading past EOF */
    EXPECT_INT_EQ(0, hdfsSeek(fs, file, TEST_ZEROCOPY_FILE_LEN));
    buffer = hadoopReadZero(file, opts, 1);
    EXPECT_NONNULL(buffer);
    EXPECT_NULL(hadoopRzBufferGet(buffer));
    hadoopRzBufferFree(file, buffer);

    /* Cleanup */
    free(block);
    hadoopRzOptionsFree(opts);
    EXPECT_ZERO(hdfsCloseFile(fs, file));
    return 0;
}
Esempio n. 10
0
static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
                                const struct tlhPaths *paths)
{
    char tmp[4096];
    hdfsFile file;
    int ret, expected, numEntries;
    hdfsFileInfo *fileInfo;
    struct hdfsReadStatistics *readStats = NULL;

    if (hdfsExists(fs, paths->prefix) == 0) {
        EXPECT_ZERO(hdfsDelete(fs, paths->prefix, 1));
    }
    EXPECT_ZERO(hdfsCreateDirectory(fs, paths->prefix));

    EXPECT_ZERO(doTestGetDefaultBlockSize(fs, paths->prefix));

    /* There should be no entry in the directory. */
    errno = EACCES; // see if errno is set to 0 on success
    EXPECT_NULL_WITH_ERRNO(hdfsListDirectory(fs, paths->prefix, &numEntries), 0);
    if (numEntries != 0) {
        fprintf(stderr, "hdfsListDirectory set numEntries to "
                "%d on empty directory.", numEntries);
    }

    /* There should not be any file to open for reading. */
    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0));

    /* hdfsOpenFile should not accept mode = 3 */
    EXPECT_NULL(hdfsOpenFile(fs, paths->file1, 3, 0, 0, 0));

    file = hdfsOpenFile(fs, paths->file1, O_WRONLY, 0, 0, 0);
    EXPECT_NONNULL(file);

    /* TODO: implement writeFully and use it here */
    expected = (int)strlen(paths->prefix);
    ret = hdfsWrite(fs, file, paths->prefix, expected);
    if (ret < 0) {
        ret = errno;
        fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
        return ret;
    }
    if (ret != expected) {
        fprintf(stderr, "hdfsWrite was supposed to write %d bytes, but "
                "it wrote %d\n", ret, expected);
        return EIO;
    }
    EXPECT_ZERO(hdfsFlush(fs, file));
    EXPECT_ZERO(hdfsHSync(fs, file));
    EXPECT_ZERO(hdfsCloseFile(fs, file));

    /* There should be 1 entry in the directory. */
    EXPECT_NONNULL(hdfsListDirectory(fs, paths->prefix, &numEntries));
    if (numEntries != 1) {
        fprintf(stderr, "hdfsListDirectory set numEntries to "
                "%d on directory containing 1 file.", numEntries);
    }

    /* Let's re-open the file for reading */
    file = hdfsOpenFile(fs, paths->file1, O_RDONLY, 0, 0, 0);
    EXPECT_NONNULL(file);

    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
    errno = 0;
    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead);
    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead);
    EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead);
    hdfsFileFreeReadStatistics(readStats);
    /* TODO: implement readFully and use it here */
    ret = hdfsRead(fs, file, tmp, sizeof(tmp));
    if (ret < 0) {
        ret = errno;
        fprintf(stderr, "hdfsRead failed and set errno %d\n", ret);
        return ret;
    }
    if (ret != expected) {
        fprintf(stderr, "hdfsRead was supposed to read %d bytes, but "
                "it read %d\n", ret, expected);
        return EIO;
    }
    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
    errno = 0;
    EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead);
    hdfsFileFreeReadStatistics(readStats);
    EXPECT_ZERO(hdfsFileClearReadStatistics(file));
    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
    EXPECT_UINT64_EQ((uint64_t)0, readStats->totalBytesRead);
    hdfsFileFreeReadStatistics(readStats);
    EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
    EXPECT_ZERO(hdfsCloseFile(fs, file));

    // TODO: Non-recursive delete should fail?
    //EXPECT_NONZERO(hdfsDelete(fs, prefix, 0));
    EXPECT_ZERO(hdfsCopy(fs, paths->file1, fs, paths->file2));

    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, NULL));
    EXPECT_ZERO(hdfsChown(fs, paths->file2, NULL, "doop"));
    fileInfo = hdfsGetPathInfo(fs, paths->file2);
    EXPECT_NONNULL(fileInfo);
    EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
    EXPECT_ZERO(hdfsFileIsEncrypted(fileInfo));
    hdfsFreeFileInfo(fileInfo, 1);

    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha", "doop2"));
    fileInfo = hdfsGetPathInfo(fs, paths->file2);
    EXPECT_NONNULL(fileInfo);
    EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
    hdfsFreeFileInfo(fileInfo, 1);

    EXPECT_ZERO(hdfsChown(fs, paths->file2, "ha2", NULL));
    fileInfo = hdfsGetPathInfo(fs, paths->file2);
    EXPECT_NONNULL(fileInfo);
    EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
    hdfsFreeFileInfo(fileInfo, 1);

    snprintf(tmp, sizeof(tmp), "%s/nonexistent-file-name", paths->prefix);
    EXPECT_NEGATIVE_ONE_WITH_ERRNO(hdfsChown(fs, tmp, "ha3", NULL), ENOENT);
    return 0;
}
Esempio n. 11
0
static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs)
{
    char prefix[256], tmp[256];
    hdfsFile file;
    int ret, expected;
    hdfsFileInfo *fileInfo;
    
    snprintf(prefix, sizeof(prefix), "/tlhData%04d", ti->threadIdx);
    
    if (hdfsExists(fs, prefix) == 0) {
        EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
    }
    EXPECT_ZERO(hdfsCreateDirectory(fs, prefix));
    snprintf(tmp, sizeof(tmp), "%s/file", prefix);
    
    EXPECT_NONNULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0));
    
    file = hdfsOpenFile(fs, tmp, O_WRONLY, 0, 0, 0);
    EXPECT_NONNULL(file);
    
    /* TODO: implement writeFully and use it here */
    expected = (int)strlen(prefix);
    ret = hdfsWrite(fs, file, prefix, expected);
    if (ret < 0) {
        ret = errno;
        fprintf(stderr, "hdfsWrite failed and set errno %d\n", ret);
        return ret;
    }
    if (ret != expected) {
        fprintf(stderr, "hdfsWrite was supposed to write %d bytes, but "
                "it wrote %d\n", ret, expected);
        return EIO;
    }
    EXPECT_ZERO(hdfsFlush(fs, file));
    EXPECT_ZERO(hdfsCloseFile(fs, file));
    
    /* Let's re-open the file for reading */
    file = hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0);
    EXPECT_NONNULL(file);
    
    /* TODO: implement readFully and use it here */
    ret = hdfsRead(fs, file, tmp, sizeof(tmp));
    if (ret < 0) {
        ret = errno;
        fprintf(stderr, "hdfsRead failed and set errno %d\n", ret);
        return ret;
    }
    if (ret != expected) {
        fprintf(stderr, "hdfsRead was supposed to read %d bytes, but "
                "it read %d\n", ret, expected);
        return EIO;
    }
    EXPECT_ZERO(memcmp(prefix, tmp, expected));
    EXPECT_ZERO(hdfsCloseFile(fs, file));
        
    snprintf(tmp, sizeof(tmp), "%s/file", prefix);
    EXPECT_NONZERO(hdfsChown(fs, tmp, NULL, NULL));
    EXPECT_ZERO(hdfsChown(fs, tmp, NULL, "doop"));
    fileInfo = hdfsGetPathInfo(fs, tmp);
    EXPECT_NONNULL(fileInfo);
    EXPECT_ZERO(strcmp("doop", fileInfo->mGroup));
    hdfsFreeFileInfo(fileInfo, 1);
    
    EXPECT_ZERO(hdfsChown(fs, tmp, "ha", "doop2"));
    fileInfo = hdfsGetPathInfo(fs, tmp);
    EXPECT_NONNULL(fileInfo);
    EXPECT_ZERO(strcmp("ha", fileInfo->mOwner));
    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
    hdfsFreeFileInfo(fileInfo, 1);
    
    EXPECT_ZERO(hdfsChown(fs, tmp, "ha2", NULL));
    fileInfo = hdfsGetPathInfo(fs, tmp);
    EXPECT_NONNULL(fileInfo);
    EXPECT_ZERO(strcmp("ha2", fileInfo->mOwner));
    EXPECT_ZERO(strcmp("doop2", fileInfo->mGroup));
    hdfsFreeFileInfo(fileInfo, 1);
    
    EXPECT_ZERO(hdfsDelete(fs, prefix, 1));
    return 0;
}