Ejemplo n.º 1
0
/**
 * Inserts a data block into the hashtable
 */
static void bufferDataBlock(Hashtable* ht, FieldData* data)
{
	uint16_t hash = getHash(ht, data);
	HashBucket* bucket = ht->bucket[hash];

	if (bucket == 0) {
		/* This slot is still free, place the bucket here */
		DPRINTF("bufferDataBlock: creating bucket\n");
		ht->bucket[hash] = createBucket(ht, data);
		return;
	}

	/* This slot is already used, search spill chain for equal flow */
	while(1) {
		if (equalFlow(ht, bucket->data, data)) {
			DPRINTF("appending to bucket\n");

			aggregateFlow(ht, bucket->data, data);
			bucket->expireTime = time(0) + ht->minBufferTime;

			/* The flow's data block is no longer needed */
			free(data);
			break;
		}

		if (bucket->next == 0) {
			DPRINTF("creating bucket\n");

			bucket->next = createBucket(ht, data);
			break;
		}

		bucket = (HashBucket*)bucket->next;
	}
}
Ejemplo n.º 2
0
void
FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
{
    BucketCheckerInjector bcj(*_node, *this);
    TestFileStorComponents c(*this, "fileStorRepliesToRecheckBucketCommands", bcj);
    setClusterState("storage:1 distributor:1");

    document::BucketId bucket(16, 0);
    createBucket(makeSpiBucket(bucket));
    c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
    c.top.waitForMessages(1, MSG_WAIT_TIME);
    c.top.reset();

    modifyBuckets(0, 1);
    c.top.waitForMessages(1, MSG_WAIT_TIME);
    assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0));

    // If we don't reply to the recheck bucket commands, we won't trigger
    // a new round of getModifiedBuckets and recheck commands.
    c.top.reset();
    createBucket(makeSpiBucket(document::BucketId(16, 1)));
    modifyBuckets(1, 1);
    c.top.waitForMessages(1, MSG_WAIT_TIME);
    assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0));
}
Ejemplo n.º 3
0
void
MergeBlockingTest::testRejectApplyDiffWhenBucketHasBecomeInconsistent()
{
    TestFileStorComponents c(*this, "testRejectApplyDiffWhenBucketHasBecomeInconsistent");
    createBucket(leafBucket);
    createBucket(innerBucket);

    std::shared_ptr<api::ApplyBucketDiffCommand> applyDiff(
            createApplyDiff(innerBucket, getNodes()));
    c.top.sendDown(applyDiff);

    expectAbortedReply<api::ApplyBucketDiffReply>(c.top);
}
Ejemplo n.º 4
0
void
FileStorModifiedBucketsTest::modifiedBucketsSendNotifyBucketChange()
{
    BucketCheckerInjector bcj(*_node, *this);
    TestFileStorComponents c(*this, "modifiedBucketsSendNotifyBucketChange", bcj);
    setClusterState("storage:1 distributor:1");

    uint32_t numBuckets = 10;

    for (uint32_t i = 0; i < numBuckets; ++i) {
        document::BucketId bucket(16, i);
        createBucket(makeSpiBucket(bucket));
        c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
    }
    c.top.waitForMessages(numBuckets, MSG_WAIT_TIME);
    c.top.reset();

    modifyBuckets(0, numBuckets);
    c.top.waitForMessages(numBuckets, MSG_WAIT_TIME);

    for (uint32_t i = 0; i < 10; ++i) {
        assertIsNotifyCommandWithActiveBucket(*c.top.getReply(i));

        StorBucketDatabase::WrappedEntry entry(
                _node->getStorageBucketDatabase().get(
                        document::BucketId(16, i), "foo"));

        CPPUNIT_ASSERT(entry->info.isActive());
    }
}
Ejemplo n.º 5
0
Archivo: hashmap.cpp Proyecto: kayw/mnb
void HashMap<Key, T, Hasher, EqualKey, Alloc>::
  rehashImpl(size_type n){
    assert(n != bucket_count_);
    if (createBucket(n) ){
        refillBucket();
    }
  }
Ejemplo n.º 6
0
void
MergeBlockingTest::testRejectMergeForInconsistentLeafBucket()
{
    TestFileStorComponents c(*this, "testRejectMergeForInconsistentInnerBucket");
    createBucket(innerBucket);

    std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(leafBucket));
    c.top.sendDown(cmd);

    expectAbortedReply<api::MergeBucketReply>(c.top);
    CPPUNIT_ASSERT(!bucketExistsInDb(leafBucket));
}
Ejemplo n.º 7
0
void
MergeBlockingTest::testRejectGetBucketDiffWithInconsistentBucket()
{
    TestFileStorComponents c(*this, "testRejectGetBucketDiffWithInconsistentBucket");
    CPPUNIT_ASSERT(innerBucket.contains(leafBucket));
    createBucket(innerBucket);

    std::shared_ptr<api::GetBucketDiffCommand> cmd(createGetDiff(leafBucket, getNodes()));
    c.top.sendDown(cmd);

    expectAbortedReply<api::GetBucketDiffReply>(c.top);
    CPPUNIT_ASSERT(!bucketExistsInDb(leafBucket));
}
Ejemplo n.º 8
0
void
MergeBlockingTest::testRejectGetDiffReplyWhenBucketHasBecomeInconsistent()
{
    TestFileStorComponents c(*this, "testRejectGetDiffReplyWhenBucketHasBecomeInconsistent");
    createBucket(innerBucket);

    std::shared_ptr<api::GetBucketDiffCommand> getDiff(
            createGetDiff(innerBucket, getNodesWithForwarding()));
    c.top.sendDown(getDiff);
    c.top.waitForMessages(1, MSG_WAIT_TIME);

    api::StorageMessage::SP fwdDiff(
            c.top.getAndRemoveMessage(api::MessageType::GETBUCKETDIFF));
    api::GetBucketDiffCommand& diffCmd(
            dynamic_cast<api::GetBucketDiffCommand&>(*fwdDiff));

    api::GetBucketDiffReply::SP diffReply(
            new api::GetBucketDiffReply(diffCmd));
    createBucket(innerBucket2);
    c.top.sendDown(diffReply);

    expectAbortedReply<api::GetBucketDiffReply>(c.top);
}
Ejemplo n.º 9
0
/**
 * Test case for buckets in ticket 6389558, comment #4.
 */
void
MergeBlockingTest::testRejectMergeWhenLowUsedBitCount()
{
    document::BucketId superBucket(1, 0x1);
    document::BucketId subBucket(2, 0x1);

    CPPUNIT_ASSERT(superBucket.contains(subBucket));

    TestFileStorComponents c(*this, "testRejectMergeWithInconsistentBucket");
    createBucket(superBucket);

    std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(subBucket));
    c.top.sendDown(cmd);

    expectAbortedReply<api::MergeBucketReply>(c.top);
    CPPUNIT_ASSERT(!bucketExistsInDb(subBucket));
}
Ejemplo n.º 10
0
void
SingleBucketJoinTest::testPersistenceCanHandleSingleBucketJoin()
{
    TestFileStorComponents c(*this, "testPersistenceCanHandleSingleBucketJoin");
    document::BucketId targetBucket(16, 1);
    document::BucketId sourceBucket(17, 1);

    createBucket(sourceBucket);
    // Make sure it's not empty
    c.sendPut(sourceBucket, DocumentIndex(0), PutTimestamp(1000));
    expectOkReply<api::PutReply>(c.top);
    c.top.getRepliesOnce();

    auto cmd = std::make_shared<api::JoinBucketsCommand>(makeDocumentBucket(targetBucket));
    cmd->getSourceBuckets().push_back(sourceBucket);
    cmd->getSourceBuckets().push_back(sourceBucket);

    c.top.sendDown(cmd);
    // If single bucket join locking is not working properly, this
    // will hang forever.
    expectOkReply<api::JoinBucketsReply>(c.top);
}
Ejemplo n.º 11
0
void TileParser::parseStyleLayers(util::ptr<StyleLayerGroup> group) {
    if (!group) {
        return;
    }

    for (const util::ptr<StyleLayer> &layer_desc : group->layers) {
        // Cancel early when parsing.
        if (obsolete()) {
            return;
        }

        if (layer_desc->isBackground()) {
            // background is a special, fake bucket
            continue;
        } else if (layer_desc->layers) {
            // This is a layer group.
            parseStyleLayers(layer_desc->layers);
        }
        if (layer_desc->bucket) {
            // This is a singular layer. Check if this bucket already exists. If not,
            // parse this bucket.
            auto bucket_it = tile.buckets.find(layer_desc->bucket->name);
            if (bucket_it == tile.buckets.end()) {
                // We need to create this bucket since it doesn't exist yet.
                std::unique_ptr<Bucket> bucket = createBucket(layer_desc->bucket);
                if (bucket) {
                    // Bucket creation might fail because the data tile may not
                    // contain any data that falls into this bucket.
                    tile.buckets[layer_desc->bucket->name] = std::move(bucket);
                }
            }
        } else {
            fprintf(stderr, "[WARNING] layer '%s' does not have child layers or buckets\n", layer_desc->id.c_str());
        }
    }
}
Ejemplo n.º 12
0
int main(int argc, char **argv)
{
    /*
     * Initialize IPRT and create the test.
     */
    RTTEST hTest;
    int rc = RTTestInitAndCreate("tstRTS3", &hTest);
    if (rc)
        return rc;
    RTTestBanner(hTest);

    /*
     * If no args, display usage.
     */
    if (argc <= 2)
    {
        RTTestPrintf(hTest, RTTESTLVL_ALWAYS, "Syntax: %s [Access Key] [Secret Key]\n", argv[0]);
        return RTTestSkipAndDestroy(hTest, "Missing required arguments\n");
    }

    RTTestSubF(hTest, "Create S3");
    RTS3 hS3;
    rc = RTS3Create(&hS3, argv[1], argv[2], "object.storage.network.com", "tstS3-agent/1.0");
    if (RT_FAILURE(rc))
    {
        RTTestIFailed("RTS3Create -> %Rrc", rc);
        return RTTestSummaryAndDestroy(hTest);
    }

    RTTestSub(hTest, "Fetch buckets");
    fetchAllBuckets(hS3);
    RTTestSub(hTest, "Fetch keys");
    fetchAllKeys(hS3, "bla");

#ifdef TSTS3_CREATEBUCKET
    RTTestSub(hTest, "Create bucket");
    createBucket(hS3, TSTS3_CREATEBUCKET_BUCKETNAME);
    fetchAllBuckets(hS3);
    deleteBucket(hS3, TSTS3_CREATEBUCKET_BUCKETNAME);
    fetchAllBuckets(hS3);
#endif /* TSTS3_CREATEBUCKET */


#ifdef TSTS3_PUTGETKEY
    RTTestSub(hTest, "Put key");
    createBucket(hS3, TSTS3_PUTGETKEY_BUCKETNAME);
    putKey(hS3, TSTS3_PUTGETKEY_BUCKETNAME, TSTS3_PUTGETKEY_KEYNAME, TSTS3_PUTGETKEY_PUTFILE);
    fetchAllKeys(hS3, TSTS3_PUTGETKEY_BUCKETNAME);
    getKey(hS3, TSTS3_PUTGETKEY_BUCKETNAME, TSTS3_PUTGETKEY_KEYNAME, TSTS3_PUTGETKEY_GETFILE);
    deleteKey(hS3, TSTS3_PUTGETKEY_BUCKETNAME, TSTS3_PUTGETKEY_KEYNAME);
    fetchAllKeys(hS3, TSTS3_PUTGETKEY_BUCKETNAME);
    deleteBucket(hS3, TSTS3_PUTGETKEY_BUCKETNAME);
#endif /* TSTS3_PUTGETKEY */

    RTS3Destroy(hS3);

    /*
     * Summary
     */
    return RTTestSummaryAndDestroy(hTest);
}
Ejemplo n.º 13
0
/**
 * Inserts a data block into the hashtable
 */
void FlowHashtable::bufferDataBlock(boost::shared_array<IpfixRecord::Data> data)
{
	statRecordsReceived++;

	uint32_t nhash = getHash(data.get(), false);
	DPRINTFL(MSG_VDEBUG, "nhash=%u", nhash);
	HashtableBucket* prevbucket;
	HashtableBucket* bucket = lookupBucket(nhash, data.get(), false, &prevbucket);

	if (bucket != NULL) {
		DPRINTFL(MSG_VDEBUG, "aggregating flow");
		aggregateFlow(bucket->data.get(), data.get(), false);
		bucket->expireTime = time(0) + minBufferTime;
		if (bucket->forceExpireTime>bucket->expireTime) {
			exportList.remove(bucket->listNode);
			exportList.push(bucket->listNode);
		}
	} else {
		if (biflowAggregation) {
			// try reverse flow
			uint32_t rhash = getHash(data.get(), true);
			DPRINTFL(MSG_VDEBUG, "rhash=%u", rhash);
			bucket = lookupBucket(rhash, data.get(), true, &prevbucket);
			if (bucket != NULL) {
				DPRINTFL(MSG_VDEBUG, "aggregating reverse flow");
				if (aggregateFlow(bucket->data.get(), data.get(), true)==1) {
					DPRINTFL(MSG_VDEBUG, "reversing whole flow");
					// reverse flow
					reverseFlowBucket(bucket);
					// delete reference from hash table
					if (prevbucket==NULL)
						buckets[rhash] = bucket->next;
					else {
						prevbucket->next = bucket->next;
					}
					if (bucket->next!=NULL)
						bucket->next->prev = prevbucket;
					// insert into hash table again
					nhash = getHash(bucket->data.get(), false);
					DPRINTFL(MSG_VDEBUG, "nhash=%u", nhash);
					bucket->next = buckets[nhash];
					bucket->hash = nhash;
					buckets[nhash] = bucket;
					bucket->prev = 0;
					if (bucket->next != NULL) bucket->next->prev = bucket;
					bucket->expireTime = time(0) + minBufferTime;
					if (bucket->forceExpireTime>bucket->expireTime) {
						exportList.remove(bucket->listNode);
						exportList.push(bucket->listNode);
					}
				}
			}
		}
		if (bucket == NULL) {
			DPRINTFL(MSG_VDEBUG, "creating new bucket");
			HashtableBucket* n = buckets[nhash];
			buckets[nhash] = createBucket(data, 0, n, 0, nhash); // FIXME: insert observationDomainID!
			if (n != NULL) n->prev = buckets[nhash];
			BucketListElement* node = hbucketIM.getNewInstance();
			node->reset();
			buckets[nhash]->listNode = node;
			node->bucket = buckets[nhash];
			exportList.push(node);
		}
	}

	atomic_release(&aggInProgress);

}
Ejemplo n.º 14
0
int main(int argc, char** argv)
{
    String base = "=== [AWS API Init";
    std::cout << base << "]: Start===\n";
    Aws::SDKOptions options;
    // set the options
    options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Info;
    // end of options
    Aws::InitAPI(options);
    // setup
    String signVer = SIGN_VER, host = HOST, portStr = PORT,
                bucketName = BUCKET;
    if (argc == 5)
    {
        signVer = argv[1];
        host = argv[2];
        portStr = argv[3];
        bucketName = argv[4];
    }

    auto client = init(host, portStr);
    std::cout << base << "]: End ===\n\n";
    // call tests here
    createBucket(client, bucketName);
    std::cout << '\n';

    // put object
    putObject(client, bucketName, "test.simple", SMALL_TEST_FILE);
    putObject(client, bucketName, "test.medium", MED_TEST_FILE);
    putObject(client, bucketName, "test.large", LARGE_TEST_FILE);

    // put object with metadata
    Map metadata;
    metadata[METADATA_KEY] = METADATA_VAL;
    putObject(client, bucketName, "test.simple.meta", SMALL_TEST_FILE, metadata);
    putObject(client, bucketName, "test.medium.meta", MED_TEST_FILE, metadata);
    putObject(client, bucketName, "test.large.meta", LARGE_TEST_FILE, metadata);

    // put object in parts
    putObjectMp(client, bucketName, "test.simple.mp", SMALL_TEST_FILE);
    putObjectMp(client, bucketName, "test.medium.mp", MED_TEST_FILE);
    putObjectMp(client, bucketName, "test.large.mp", LARGE_TEST_FILE);

    // put object in parts with metadata
    putObjectMp(client, bucketName, "test.simple.meta.mp", SMALL_TEST_FILE, metadata);
    putObjectMp(client, bucketName, "test.medium.meta.mp", MED_TEST_FILE, metadata);
    putObjectMp(client, bucketName, "test.large.meta.mp", LARGE_TEST_FILE, metadata);

    // head is already tested
    // get object
    getObject(client, bucketName, "test.simple", SMALL_TEST_FILE);
    getObject(client, bucketName, "test.medium", MED_TEST_FILE);
    getObject(client, bucketName, "test.large", LARGE_TEST_FILE);
    getObject(client, bucketName, "test.simple.mp", SMALL_TEST_FILE);
    getObject(client, bucketName, "test.medium.mp", MED_TEST_FILE);
    getObject(client, bucketName, "test.large.mp", LARGE_TEST_FILE);
    getObject(client, bucketName, "test.simple.meta", SMALL_TEST_FILE, metadata);
    getObject(client, bucketName, "test.medium.meta", MED_TEST_FILE, metadata);
    getObject(client, bucketName, "test.large.meta", LARGE_TEST_FILE, metadata);
    getObject(client, bucketName, "test.simple.meta.mp", SMALL_TEST_FILE, metadata);
    getObject(client, bucketName, "test.medium.meta.mp", MED_TEST_FILE, metadata);
    getObject(client, bucketName, "test.large.meta.mp", LARGE_TEST_FILE, metadata);

    // get fake object
    getFakeObject(client, bucketName, "test.noexist");

    // range get object
    rangeObject(client, bucketName, "test.simple", SMALL_TEST_FILE, 1, 4);
    // rangeObject(client, bucketName, "test.simple.mp", SMALL_TEST_FILE, 1, 4);
    rangeObject(client, bucketName, "test.large", LARGE_TEST_FILE, 1048576, 40485760);
    // rangeObject(client, bucketName, "test.large.mp", LARGE_TEST_FILE, 1048576, 10485760);

    // copy object
    copyObject(client, bucketName, "test.simple", "test.simple.copy");
    getObject(client, bucketName, "test.simple.copy", SMALL_TEST_FILE);

    // list object
    listObjects(client, bucketName, "", -1);

    // delete all objects
    deleteAllObjects(client, bucketName);
    listObjects(client, bucketName, "", 0);

    // put dummy objects
    putObject(client, bucketName, "list/test.small.", SMALL_TEST_FILE, Map(), 35);
    // multi-page list obj
    listObjects(client, bucketName, "list/", 35, 10);

    // multi-delete
    deleteObjects(client, bucketName, "list/test.small.", 10);
    listObjects(client, bucketName, "list/", 25);

    // get-put acl

    // delete bucket
    deleteBucket(client, bucketName);
    // end of tests
    std::cout << "=== AWS API Shutdown: Start===\n";
    Aws::ShutdownAPI(options);
    std::cout << "=== AWS API Shutdown: End ===\n";
    return 0;
}