END_TEST

START_TEST(test_addBlock)
{
	bool s;
	struct hdfs_object *e = NULL, *lb;
	const char *tf = "/HADOOFUS_TEST_ADDBLOCK",
	      *client = "HADOOFUS_CLIENT";

	// Create the file first
	hdfs_create(h, tf, 0644, client, true/*overwrite*/,
	    false/*createparent*/, 1/*replication*/, 64*1024*1024, &e);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));

	mark_point();

	lb = hdfs_addBlock(h, tf, client, NULL, &e);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));
	ck_assert(!hdfs_object_is_null(lb));

	hdfs_object_free(lb);
	mark_point();

	// Cleanup
	s = hdfs_delete(h, tf, false/*recurse*/, &e);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));
	ck_assert_msg(s, "delete returned false");
}
Exemple #2
0
END_TEST

START_TEST(test_abandonBlock)
{
	bool s;
	struct hdfs_object *e = NULL, *lb, *bl;
	const char *tf = "/HADOOFUS_TEST_ABANDONBLOCK",
	      *client = "HADOOFUS_CLIENT";

	// Create the file first
	hdfs_create(h, tf, 0644, client, true/*overwrite*/,
	    false/*createparent*/, 1/*replication*/, 64*1024*1024, &e);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));

	mark_point();

	// XXX this must be updated to cover v2.0+ (last_block/fileid)
	lb = hdfs_addBlock(h, tf, client, NULL, NULL, 0, &e);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));
	ck_assert(!hdfs_object_is_null(lb));

	mark_point();

	bl = hdfs_block_from_located_block(lb);
	hdfs_object_free(lb);

	mark_point();

	hdfs_abandonBlock(h, bl, tf, client, &e);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));

	hdfs_object_free(bl);
	mark_point();

	// Cleanup
	s = hdfs_delete(h, tf, false/*recurse*/, &e);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));
	ck_assert_msg(s, "delete returned false");
}
Exemple #3
0
// Flushes internal buffer to disk. If passed buf is non-NULL, flushes that
// instead. Returns 0 on succ, -1 on err.
static int
_flush(struct hdfs_namenode *fs, struct hdfsFile_internal *f, const void *buf, size_t len)
{
	struct hdfs_object *lb = NULL, *ex = NULL, *excl = NULL, *block = NULL;
	struct hdfs_datanode *dn = NULL;
	int res = 0, tries = 3;
	const char *err = NULL, *msg_fmt;

	const char *wbuf = f->fi_wbuf;
	size_t wbuf_len = f->fi_wbuf_used;

	// Skip the extra copy if the user is feeding us full-sized blocks
	if (buf) {
		wbuf = buf;
		wbuf_len = len;
	}

	if (!wbuf_len)
		goto out;

	// For appends, we need to finish the last block
	if (f->fi_lastblock) {
		lb = f->fi_lastblock;
		f->fi_lastblock = NULL;
	}

	for (; tries > 0; tries--) {
		if (!lb) {
			lb = hdfs_addBlock(fs, f->fi_path, f->fi_client, excl, &ex);
			if (ex) {
				ERR(EIO, "addBlock failed: %s", hdfs_exception_get_message(ex));
				res = -1;
				goto out;
			}
			if (lb->ob_type == H_NULL) {
				ERR(EIO, "addBlock returned bogus null");
				res = -1;
				goto out;
			}
		}

		msg_fmt = "connect to datanode failed";
		dn = hdfs_datanode_new(lb, f->fi_client, HDFS_DATANODE_AP_1_0, &err);
		if (!dn)
			goto dn_failed;

		msg_fmt = "write failed";
		err = hdfs_datanode_write(dn, wbuf, wbuf_len, true/*crcs*/);
		hdfs_datanode_delete(dn);
		if (!err)
			break;

dn_failed:
		// On failure, either warn and try again, or give up
		if (tries == 1) {
			ERR(ECONNREFUSED, "%s: %s%s", msg_fmt, err, "");
			res = -1;
			goto out;
		}

		WARN("%s: %s%s", msg_fmt, err, ", retrying");

		block = hdfs_block_from_located_block(lb);
		hdfs_abandonBlock(fs, block, f->fi_path, f->fi_client, &ex);
		if (ex) {
			ERR(EIO, "abandonBlock failed: %s", hdfs_exception_get_message(ex));
			res = -1;
			goto out;
		}

		hdfs_object_free(block);
		block = NULL;

		// Add physical location of bad block's datanode to excluded
		// list
		if (lb->ob_val._located_block._num_locs > 0) {
			if (!excl)
				excl = hdfs_array_datanode_info_new();

			hdfs_array_datanode_info_append_datanode_info(
			    excl,
			    hdfs_datanode_info_copy(
				lb->ob_val._located_block._locs[0]
				)
			    );
		}

		hdfs_object_free(lb);
		lb = NULL;
	}

	// If we weren't given a user block directly, the internal buf has been
	// flushed:
	if (!buf)
		f->fi_wbuf_used = 0;

out:
	if (ex)
		hdfs_object_free(ex);
	if (lb)
		hdfs_object_free(lb);
	if (block)
		hdfs_object_free(block);
	if (excl)
		hdfs_object_free(excl);
	return res;
}
END_TEST

START_TEST(test_dn_write_file)
{
	const char *tf = "/HADOOFUS_TEST_WRITE_FILE",
	      *client = "HADOOFUS_CLIENT", *err;
	bool s;

	struct hdfs_datanode *dn;
	struct hdfs_object *e = NULL, *bl, *fs, *bls;
	uint64_t begin, end;

	s = hdfs_delete(h, tf, false/*recurse*/, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	hdfs_create(h, tf, 0644, client, true/*overwrite*/,
	    false/*createparent*/, 1/*replication*/, blocksz, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	begin = _now();

	// write first block (full)
	bl = hdfs_addBlock(h, tf, client, NULL, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err);
	ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err);

	hdfs_object_free(bl);

	err = hdfs_datanode_write_file(dn, fd, blocksz, 0, _i/*crcs*/);
	fail_if(err, "error writing block: %s", err);

	hdfs_datanode_delete(dn);

	// write second block (partial)
	bl = hdfs_addBlock(h, tf, client, NULL, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err);
	ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err);

	hdfs_object_free(bl);

	err = hdfs_datanode_write_file(dn, fd, towrite-blocksz, blocksz, _i/*crcs*/);
	fail_if(err, "error writing block: %s", err);

	hdfs_datanode_delete(dn);

	end = _now();
	fprintf(stderr, "Wrote %d MB from file in %ld ms%s, %02g MB/s\n",
	    towrite/1024/1024, end - begin, _i? " (with crcs)":"",
	    (double)towrite/(end-begin)/1024*1000/1024);

	fs = hdfs_getFileInfo(h, tf, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));
	ck_assert(fs->ob_val._file_status._size == towrite);
	hdfs_object_free(fs);

	s = hdfs_complete(h, tf, client, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));
	ck_assert_msg(s, "did not complete");

	bls = hdfs_getBlockLocations(h, tf, 0, towrite, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	begin = _now();
	for (int i = 0; i < bls->ob_val._located_blocks._num_blocks; i++) {
		struct hdfs_object *bl =
		    bls->ob_val._located_blocks._blocks[i];
		dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err);
		ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err);

		err = hdfs_datanode_read_file(dn, 0/*offset-in-block*/,
		    bl->ob_val._located_block._len,
		    ofd,
		    i*blocksz/*fd offset*/,
		    _i/*crcs*/);

		hdfs_datanode_delete(dn);

		if (err == HDFS_DATANODE_ERR_NO_CRCS) {
			fprintf(stderr, "Warning: test server doesn't support "
			    "CRCs, skipping validation.\n");
			_i = 0;

			// reconnect, try again without validating CRCs (for
			// isi_hdfs_d)
			dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err);
			ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err);

			err = hdfs_datanode_read_file(dn, 0/*offset-in-block*/,
			    bl->ob_val._located_block._len,
			    ofd,
			    i*blocksz,
			    false/*crcs*/);

			hdfs_datanode_delete(dn);
		}

		fail_if(err, "error reading block: %s", err);
	}
	end = _now();
	fprintf(stderr, "Read %d MB to file in %ld ms%s, %02g MB/s\n",
	    towrite/1024/1024, end - begin, _i? " (with crcs)":"",
	    (double)towrite/(end-begin)/1024*1000/1024);

	hdfs_object_free(bls);
	fail_if(filecmp(fd, ofd, towrite), "read differed from write");

	s = hdfs_delete(h, tf, false/*recurse*/, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));
	ck_assert_msg(s, "delete returned false");
}