END_TEST START_TEST(test_append) { bool s; struct hdfs_object *e = NULL, *lb; const char *tf = "/HADOOFUS_TEST_APPEND", *client = "HADOOFUS_CLIENT"; // Create the file first hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); s = hdfs_complete(h, tf, client, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "complete returned false"); // Open for appending lb = hdfs_append(h, tf, client, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_object_free(lb); // Cleanup s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
END_TEST START_TEST(test_setReplication) { bool s; struct hdfs_object *e = NULL; const char *tf = "/HADOOFUS_TEST_SETREPLICATION", *client = "HADOOFUS_CLIENT"; // Create the file first hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); s = hdfs_setReplication(h, tf, 2, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "setReplication returned false"); // Cleanup s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
END_TEST START_TEST(test_addBlock) { bool s; struct hdfs_object *e = NULL, *lb; const char *tf = "/HADOOFUS_TEST_ADDBLOCK", *client = "HADOOFUS_CLIENT"; // Create the file first hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); mark_point(); lb = hdfs_addBlock(h, tf, client, NULL, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert(!hdfs_object_is_null(lb)); hdfs_object_free(lb); mark_point(); // Cleanup s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
END_TEST START_TEST(test_recoverLease) { bool s; struct hdfs_object *e = NULL; const char *tf = "/HADOOFUS_TEST_RECOVERLEASE", *client = "HADOOFUS_CLIENT", *client2 = "HADOOFUS_CLIENT_2"; hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); s = hdfs_recoverLease(h, tf, client2, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(!s, "recoverLease returned true"); s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
END_TEST START_TEST(test_complete) { bool s; struct hdfs_object *e = NULL; const char *tf = "/HADOOFUS_TEST_COMPLETE", *client = "HADOOFUS_CLIENT"; // Create the file first hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); // XXX this must be updated to cover v2.0+ (last_block/fileid) s = hdfs_complete(h, tf, client, NULL, 0, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "complete returned false"); // Cleanup s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
END_TEST START_TEST(test_abandonBlock) { bool s; struct hdfs_object *e = NULL, *lb, *bl; const char *tf = "/HADOOFUS_TEST_ABANDONBLOCK", *client = "HADOOFUS_CLIENT"; // Create the file first hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); mark_point(); // XXX this must be updated to cover v2.0+ (last_block/fileid) lb = hdfs_addBlock(h, tf, client, NULL, NULL, 0, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert(!hdfs_object_is_null(lb)); mark_point(); bl = hdfs_block_from_located_block(lb); hdfs_object_free(lb); mark_point(); hdfs_abandonBlock(h, bl, tf, client, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_object_free(bl); mark_point(); // Cleanup s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
END_TEST START_TEST(test_getBlockLocations2) { struct hdfs_object *e = NULL, *e2 = NULL, *bls; const char *tf = "/HADOOFUS_TEST_GET_BLOCK_LOCATIONS2", *client = "HADOOFUS_CLIENT"; hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); bls = hdfs_getBlockLocations(h, tf, 0L, 1000L, &e); hdfs_delete(h, tf, false/*recurse*/, &e2); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); if (e2) ck_abort_msg("exception: %s", hdfs_exception_get_message(e2)); ck_assert_msg(bls->ob_type == H_LOCATED_BLOCKS); }
END_TEST START_TEST(test_setTimes) { bool s; struct hdfs_object *e = NULL; const char *tf = "/HADOOFUS_TEST_SETTIMES", *client = "HADOOFUS_CLIENT"; hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); hdfs_setTimes(h, tf, -1, -1, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
END_TEST START_TEST(test_getPreferredBlockSize) { bool s; struct hdfs_object *e = NULL; const char *tf = "/HADOOFUS_TEST_GETPREFERREDBLOCKSIZE", *client = "HADOOFUS_CLIENT"; hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, 64*1024*1024, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); /*bs = */hdfs_getPreferredBlockSize(h, tf, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) ck_abort_msg("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }
/** * hdfsOpenFile - Open a file. File handles are not thread-safe. Use your own * synchonrization if you want to share them across threads. * * @param fs The configured filesystem handle. * @param path The full path to the file. * @param flags - OR of O_RDONLY, O_WRONLY (meaning create or overwrite i.e., implies O_TRUNCAT), * O_WRONLY|O_APPEND. Other flags are generally ignored other than (O_RDWR || (O_EXCL & O_CREAT)) * which return NULL and set errno equal ENOTSUP. * @param bufferSize Size of buffer for read/write - pass 0 if you want to use the default values. * @param replication Block replication - pass 0 if you want to use the default values. * @param blocksize Size of block - pass 0 if you want to use the default values. * @return Returns the handle to the open file or NULL on error. */ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags, int bufferSize, short replication, tSize blocksize) { struct hdfs_object *ex = NULL, *lb = NULL; struct hdfsFile_internal *res; struct hdfsFS_internal *fsclient = fs; enum hdfsFile_mode mode; char *client = NULL, *path_abs; int accmode; union { int64_t num; char bytes[8]; } client_u; const int clientlen = 32; accmode = (flags & O_ACCMODE); if (accmode == O_RDWR) { ERR(ENOTSUP, "Cannot open an hdfs file in O_RDWR mode"); return NULL; } if ((flags & O_CREAT) || (flags & O_EXCL)) WARN("hdfs does not really support O_CREAT and O_EXCL"); if (accmode == O_WRONLY) { if (flags & O_APPEND) mode = FILE_APPEND; else mode = FILE_WRITE; } else { assert(accmode == O_RDONLY); mode = FILE_READ; } path_abs = _makeabs(fs, path); // Defaults if (replication == 0) replication = DEFAULT_REPLICATION; if (blocksize == 0) blocksize = DEFAULT_BLOCK_SIZE; _urandbytes(client_u.bytes, nelem(client_u.bytes)); client = malloc(clientlen); assert(client); snprintf(client, clientlen-1, "DFSClient_%zd", (intmax_t)client_u.num); client[clientlen-1] = '\0'; if (mode == FILE_WRITE) { hdfs_create(fsclient->fs_namenode, path_abs, 0644, client, true/* overwrite */, true/* create parent */, replication, blocksize, &ex); if (ex) { ERR(EINVAL, "Error opening %s for writing: %s", path_abs, hdfs_exception_get_message(ex)); hdfs_object_free(ex); free(client); return NULL; } } else if (mode == FILE_APPEND) { lb = hdfs_append(fsclient->fs_namenode, path_abs, client, &ex); if (ex) { ERR(EINVAL, "Error opening %s for appending: %s", path_abs, hdfs_exception_get_message(ex)); hdfs_object_free(ex); free(client); return NULL; } } res = malloc(sizeof *res); assert(res); res->fi_offset = 0; res->fi_path = strdup(path_abs); assert(res->fi_path); res->fi_mode = mode; res->fi_blocksize = blocksize; res->fi_replication = replication; if (lb && lb->ob_type == H_NULL) { hdfs_object_free(lb); lb = NULL; } res->fi_lastblock = lb; if (mode == FILE_READ) { res->fi_wbuf = NULL; } else { res->fi_wbuf = malloc(blocksize); assert(res->fi_wbuf); } res->fi_wbuf_used = 0; res->fi_client = client; if (lb) hdfs_object_free(lb); if (path_abs != path) free(path_abs); return res; }
END_TEST START_TEST(test_dn_write_file) { const char *tf = "/HADOOFUS_TEST_WRITE_FILE", *client = "HADOOFUS_CLIENT", *err; bool s; struct hdfs_datanode *dn; struct hdfs_object *e = NULL, *bl, *fs, *bls; uint64_t begin, end; s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) fail("exception: %s", hdfs_exception_get_message(e)); hdfs_create(h, tf, 0644, client, true/*overwrite*/, false/*createparent*/, 1/*replication*/, blocksz, &e); if (e) fail("exception: %s", hdfs_exception_get_message(e)); begin = _now(); // write first block (full) bl = hdfs_addBlock(h, tf, client, NULL, &e); if (e) fail("exception: %s", hdfs_exception_get_message(e)); dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err); ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err); hdfs_object_free(bl); err = hdfs_datanode_write_file(dn, fd, blocksz, 0, _i/*crcs*/); fail_if(err, "error writing block: %s", err); hdfs_datanode_delete(dn); // write second block (partial) bl = hdfs_addBlock(h, tf, client, NULL, &e); if (e) fail("exception: %s", hdfs_exception_get_message(e)); dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err); ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err); hdfs_object_free(bl); err = hdfs_datanode_write_file(dn, fd, towrite-blocksz, blocksz, _i/*crcs*/); fail_if(err, "error writing block: %s", err); hdfs_datanode_delete(dn); end = _now(); fprintf(stderr, "Wrote %d MB from file in %ld ms%s, %02g MB/s\n", towrite/1024/1024, end - begin, _i? " (with crcs)":"", (double)towrite/(end-begin)/1024*1000/1024); fs = hdfs_getFileInfo(h, tf, &e); if (e) fail("exception: %s", hdfs_exception_get_message(e)); ck_assert(fs->ob_val._file_status._size == towrite); hdfs_object_free(fs); s = hdfs_complete(h, tf, client, &e); if (e) fail("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "did not complete"); bls = hdfs_getBlockLocations(h, tf, 0, towrite, &e); if (e) fail("exception: %s", hdfs_exception_get_message(e)); begin = _now(); for (int i = 0; i < bls->ob_val._located_blocks._num_blocks; i++) { struct hdfs_object *bl = bls->ob_val._located_blocks._blocks[i]; dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err); ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err); err = hdfs_datanode_read_file(dn, 0/*offset-in-block*/, bl->ob_val._located_block._len, ofd, i*blocksz/*fd offset*/, _i/*crcs*/); hdfs_datanode_delete(dn); if (err == HDFS_DATANODE_ERR_NO_CRCS) { fprintf(stderr, "Warning: test server doesn't support " "CRCs, skipping validation.\n"); _i = 0; // reconnect, try again without validating CRCs (for // isi_hdfs_d) dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err); ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err); err = hdfs_datanode_read_file(dn, 0/*offset-in-block*/, bl->ob_val._located_block._len, ofd, i*blocksz, false/*crcs*/); hdfs_datanode_delete(dn); } fail_if(err, "error reading block: %s", err); } end = _now(); fprintf(stderr, "Read %d MB to file in %ld ms%s, %02g MB/s\n", towrite/1024/1024, end - begin, _i? " (with crcs)":"", (double)towrite/(end-begin)/1024*1000/1024); hdfs_object_free(bls); fail_if(filecmp(fd, ofd, towrite), "read differed from write"); s = hdfs_delete(h, tf, false/*recurse*/, &e); if (e) fail("exception: %s", hdfs_exception_get_message(e)); ck_assert_msg(s, "delete returned false"); }