示例#1
0
END_TEST

START_TEST(test_getBlockLocations)
{
	struct hdfs_object *e = NULL, *bls;
	bls = hdfs_getBlockLocations(h, "/", 0L, 1000L, &e);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));
	ck_assert_msg(hdfs_object_is_null(bls));
	ck_assert_msg(hdfs_null_type(bls) == H_LOCATED_BLOCKS);
}
示例#2
0
END_TEST

START_TEST(test_getBlockLocations2)
{
	struct hdfs_object *e = NULL, *e2 = NULL, *bls;
	const char *tf = "/HADOOFUS_TEST_GET_BLOCK_LOCATIONS2",
	      *client = "HADOOFUS_CLIENT";

	hdfs_create(h, tf, 0644, client, true/*overwrite*/,
	    false/*createparent*/, 1/*replication*/, 64*1024*1024, &e);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));

	bls = hdfs_getBlockLocations(h, tf, 0L, 1000L, &e);
	hdfs_delete(h, tf, false/*recurse*/, &e2);
	if (e)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e));
	if (e2)
		ck_abort_msg("exception: %s", hdfs_exception_get_message(e2));

	ck_assert_msg(bls->ob_type == H_LOCATED_BLOCKS);
}
示例#3
0
文件: hdfs.c 项目: 13141516/hadoofus
/**
 * hdfsPread - Positional read of data from an open file.
 *
 * @param fs The configured filesystem handle.
 * @param file The file handle.
 * @param position Position from which to read
 * @param buffer The buffer to copy read bytes into.
 * @param length The length of the buffer.
 * @return Returns the number of bytes actually read, possibly less than
 *     than length;-1 on error.
 */
tSize
hdfsPread(hdfsFS fs, hdfsFile file, tOffset position, void* buffer, tSize length)
{
	tSize res = -1;
	struct hdfsFS_internal *client = fs;
	struct hdfsFile_internal *f = file;
	struct hdfs_object *bls = NULL, *ex = NULL;
	struct hdfs_datanode *dn;
	bool verifycrcs = true;

	if (f->fi_mode != FILE_READ) {
		ERR(EINVAL, "can't read from file opened for writing");
		goto out;
	}

	if (length == 0) {
		res = 0;
		goto out;
	}

	bls = hdfs_getBlockLocations(client->fs_namenode, f->fi_path, position, length, &ex);
	if (ex) {
		ERR(EIO, "getBlockLocations(): %s", hdfs_exception_get_message(ex));
		goto out;
	}
	if (bls->ob_type == H_NULL) {
		ERR(ENOENT, "getBlockLocations(): %s doesn't exist", f->fi_path);
		goto out;
	}

	// We may need to read multiple blocks to satisfy the read
	for (int i = 0; i < bls->ob_val._located_blocks._num_blocks; i++) {
		struct hdfs_object *bl = bls->ob_val._located_blocks._blocks[i];
		const char *err = NULL;

		int64_t blbegin = 0,
			blend = bl->ob_val._located_block._len,
			bloff = bl->ob_val._located_block._offset;

		// Skip blocks earlier than we're looking for:
		if (bloff + blend <= position)
			continue;
		// Skip blocks after the range we want:
		if (bloff >= position + length)
			break;

		dn = hdfs_datanode_new(bl, f->fi_client, HDFS_DATANODE_AP_1_0, &err);
		if (!dn) {
			ERR(EIO, "Error connecting to datanode: %s", err);
			goto out;
		}

		// For each block, read the relevant part into the buffer.
		if (bloff < position)
			blbegin = position - bloff;
		if (bloff + blend > position + length)
			blend = position + length - bloff;

		err = hdfs_datanode_read(dn, blbegin/* offset in block */,
		    blend - blbegin/* len */, buffer, verifycrcs);

		// Disable crc verification if the server doesn't support them
		if (err == HDFS_DATANODE_ERR_NO_CRCS) {
			WARN("Server doesn't support CRCs, cannot verify integrity");
			verifycrcs = false;
			err = NULL;

			// Re-connect to datanode to retry read without CRC
			// verification:
			hdfs_datanode_delete(dn);
			dn = hdfs_datanode_new(bl, f->fi_client, HDFS_DATANODE_AP_1_0, &err);
			if (!dn) {
				ERR(EIO, "Error connecting to datanode: %s", err);
				goto out;
			}

			err = hdfs_datanode_read(dn, blbegin/* offset in block */,
			    blend - blbegin/* len */, buffer, verifycrcs);
		}

		hdfs_datanode_delete(dn);

		if (err) {
			ERR(EIO, "Error during read: %s", err);
			goto out;
		}

		buffer = (char*)buffer + (blend - blbegin);
	}

	res = length;

out:
	if (bls)
		hdfs_object_free(bls);
	if (ex)
		hdfs_object_free(ex);
	return res;
}
示例#4
0
END_TEST

START_TEST(test_dn_write_file)
{
	const char *tf = "/HADOOFUS_TEST_WRITE_FILE",
	      *client = "HADOOFUS_CLIENT", *err;
	bool s;

	struct hdfs_datanode *dn;
	struct hdfs_object *e = NULL, *bl, *fs, *bls;
	uint64_t begin, end;

	s = hdfs_delete(h, tf, false/*recurse*/, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	hdfs_create(h, tf, 0644, client, true/*overwrite*/,
	    false/*createparent*/, 1/*replication*/, blocksz, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	begin = _now();

	// write first block (full)
	bl = hdfs_addBlock(h, tf, client, NULL, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err);
	ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err);

	hdfs_object_free(bl);

	err = hdfs_datanode_write_file(dn, fd, blocksz, 0, _i/*crcs*/);
	fail_if(err, "error writing block: %s", err);

	hdfs_datanode_delete(dn);

	// write second block (partial)
	bl = hdfs_addBlock(h, tf, client, NULL, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err);
	ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err);

	hdfs_object_free(bl);

	err = hdfs_datanode_write_file(dn, fd, towrite-blocksz, blocksz, _i/*crcs*/);
	fail_if(err, "error writing block: %s", err);

	hdfs_datanode_delete(dn);

	end = _now();
	fprintf(stderr, "Wrote %d MB from file in %ld ms%s, %02g MB/s\n",
	    towrite/1024/1024, end - begin, _i? " (with crcs)":"",
	    (double)towrite/(end-begin)/1024*1000/1024);

	fs = hdfs_getFileInfo(h, tf, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));
	ck_assert(fs->ob_val._file_status._size == towrite);
	hdfs_object_free(fs);

	s = hdfs_complete(h, tf, client, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));
	ck_assert_msg(s, "did not complete");

	bls = hdfs_getBlockLocations(h, tf, 0, towrite, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));

	begin = _now();
	for (int i = 0; i < bls->ob_val._located_blocks._num_blocks; i++) {
		struct hdfs_object *bl =
		    bls->ob_val._located_blocks._blocks[i];
		dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err);
		ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err);

		err = hdfs_datanode_read_file(dn, 0/*offset-in-block*/,
		    bl->ob_val._located_block._len,
		    ofd,
		    i*blocksz/*fd offset*/,
		    _i/*crcs*/);

		hdfs_datanode_delete(dn);

		if (err == HDFS_DATANODE_ERR_NO_CRCS) {
			fprintf(stderr, "Warning: test server doesn't support "
			    "CRCs, skipping validation.\n");
			_i = 0;

			// reconnect, try again without validating CRCs (for
			// isi_hdfs_d)
			dn = hdfs_datanode_new(bl, client, HDFS_DATANODE_AP_1_0, &err);
			ck_assert_msg((intptr_t)dn, "error connecting to datanode: %s", err);

			err = hdfs_datanode_read_file(dn, 0/*offset-in-block*/,
			    bl->ob_val._located_block._len,
			    ofd,
			    i*blocksz,
			    false/*crcs*/);

			hdfs_datanode_delete(dn);
		}

		fail_if(err, "error reading block: %s", err);
	}
	end = _now();
	fprintf(stderr, "Read %d MB to file in %ld ms%s, %02g MB/s\n",
	    towrite/1024/1024, end - begin, _i? " (with crcs)":"",
	    (double)towrite/(end-begin)/1024*1000/1024);

	hdfs_object_free(bls);
	fail_if(filecmp(fd, ofd, towrite), "read differed from write");

	s = hdfs_delete(h, tf, false/*recurse*/, &e);
	if (e)
		fail("exception: %s", hdfs_exception_get_message(e));
	ck_assert_msg(s, "delete returned false");
}