std::string search_key_in_series(uint32_t seed, TypeValidator *validator,  unsigned char *enc_buf)
{
    if (validator == NULL) {
        printf("Validator not found!\n");
        return "";
    }
    //single series without incrementing seed
    char key[0x100];

    printf("Searching key started...\n");
#ifdef DEBUG
    printf("Start seed: %d = %#x\n----------\n", seed, seed);
    print_time_str(seed);
#endif
    srand (seed);
    
    for (size_t key_num = 0; key_num < 10000000; key_num++) {
        make_random_key(key, sizeof(key));

        if (validator->testKey(key)) {
            printf("> KEY FOUND: %s\n", key);
            printf("[SUCCESS]\n");
#ifdef DEBUG
            printf ("KEY: %s\nSEED %x = %d\nkey number in series: %d\n", key, seed, seed, key_num);
#endif
            //log_key(seed, key, key_num, params.filename);
            return key;
        }
    }
    return "";
}
Exemple #2
0
TEST(ch3, weighted_furc_hash_all_one) {
  char key[MAX_KEY_LENGTH + 1];
  int len;
  srand(12345);
  std::array<double, 1000> weights;
  weights.fill(1.0);

  for (uint32_t size = 1; size <= 1000; ++size) {
    make_random_key(key, MAX_KEY_LENGTH);
    len = strlen(key);
    size_t classic = furc_hash(key, len, size);
    EXPECT_LT(classic, size);
    folly::Range<const double*> weightRange(
        weights.cbegin(), weights.cbegin() + size);
    size_t weighted = facebook::mcrouter::weightedFurcHash(
        folly::StringPiece(key, len), weightRange);
    EXPECT_EQ(classic, weighted);
  }
}
Exemple #3
0
static void
benchmark_iteration(void)
{
    int i, j;

    struct flowtable_entry *ftes = calloc(num_flows, sizeof(*ftes));
    struct flowtable *ft = flowtable_create();

    for (i = 0; i < num_flows; i++) {
        struct flowtable_key key, mask;
        make_random_mask(&mask);
        make_random_key(&key, &mask);
        flowtable_entry_init(&ftes[i], &key, &mask, i % 4);
        flowtable_insert(ft, &ftes[i]);
    }

    uint64_t start_time = monotonic_ns();

    CALLGRIND_START_INSTRUMENTATION;

    for (i = 0; i < num_lookups_per_flow; i++) {
        for (j = 0; j < num_flows; j++) {
            (void) flowtable_match(ft, &ftes[j].key);
        }
    }

    uint64_t end_time = monotonic_ns();

    CALLGRIND_STOP_INSTRUMENTATION;

    for (i = 0; i < num_flows; i++) {
        flowtable_remove(ft, &ftes[i]);
    }

    free(ftes);
    flowtable_destroy(ft);

    uint64_t elapsed = end_time - start_time;
    total_elapsed += elapsed;
}
/*
 * allocate all memory with small and large chunks.  link them such the
 * allocation of a large object will start evicting small chunks but then stop
 * because the large chunk LRU has an older item.  this covers part of case 3
 * and part of case 4 for the small item alloc in flat_storage_lru_evict(..).
 */
static int
mixed_items_release_small_and_large_items_scan_stop_test(int verbose) {
    typedef struct {
        item* it;
        char key[KEY_MAX_LENGTH];
        uint8_t klen;
    } mixed_items_release_one_small_item_t;

    size_t num_small_objects = (fsi.large_free_list_sz / 2) * SMALL_CHUNKS_PER_LARGE_CHUNK;
    /* this is not the same as fsi.large_free_list_sz / 2 due to rounding. */
    size_t num_large_objects = fsi.large_free_list_sz - (fsi.large_free_list_sz / 2);
    mixed_items_release_one_small_item_t* large_items = malloc(sizeof(mixed_items_release_one_small_item_t) *
                                                               num_large_objects);
    mixed_items_release_one_small_item_t* small_items = malloc(sizeof(mixed_items_release_one_small_item_t) *
                                                               num_small_objects);
    item* lru_trigger;
    size_t max_small_key_size = SMALL_TITLE_CHUNK_DATA_SZ;
    size_t min_size_for_large_chunk = ( sizeof( ((small_title_chunk_t*) 0)->data ) ) +
        ( (SMALL_CHUNKS_PER_LARGE_CHUNK - 1) * sizeof( ((small_body_chunk_t*) 0)->data ) ) +
        1;
    size_t i;
    char key[KEY_MAX_LENGTH];
    size_t klen;
    size_t large_free_list_sz = fsi.large_free_list_sz, small_free_list_sz = fsi.small_free_list_sz;

    V_PRINTF(1, "  * %s\n", __FUNCTION__);

    TASSERT(fsi.large_free_list_sz != 0);
    TASSERT(fsi.small_free_list_sz == 0);

    for (i = 0; i < num_small_objects; i ++) {
        V_PRINTF(2, "\r  *  allocating small object %lu", i);
        V_FLUSH(2);
        do {
            small_items[i].klen = make_random_key(small_items[i].key, max_small_key_size, true);
        } while (assoc_find(small_items[i].key, small_items[i].klen));

        small_items[i].it = do_item_alloc(small_items[i].key, small_items[i].klen,
                                          FLAGS, 0,
                                          0, addr);
        TASSERT(small_items[i].it);
        TASSERT(is_item_large_chunk(small_items[i].it) == 0);

        do_item_link(small_items[i].it, small_items[i].key);
    }
    V_PRINTF(2, "\n");

    for (i = 0; i < num_large_objects; i ++) {
        V_PRINTF(2, "\r  *  allocating large object %lu", i);
        V_FLUSH(2);
        do {
            large_items[i].klen = make_random_key(large_items[i].key, KEY_MAX_LENGTH, true);
        } while (assoc_find(large_items[i].key, large_items[i].klen));

        large_items[i].it = do_item_alloc(large_items[i].key, large_items[i].klen,
                                          FLAGS, 0,
                                          min_size_for_large_chunk - large_items[i].klen, addr);
        TASSERT(large_items[i].it);
        TASSERT(is_item_large_chunk(large_items[i].it));

        do_item_link(large_items[i].it, large_items[i].key);
    }
    V_PRINTF(2, "\n");

    TASSERT(fsi.large_free_list_sz == 0 &&
            fsi.small_free_list_sz == 0);

    V_LPRINTF(2, "update items\n");
    /* update the objects we want to clobber *first*.  but since ties go to the
     * large item, we need to bump the time stamp to ensure the small item is
     * released first. */
    current_time += ITEM_UPDATE_INTERVAL + 1; /* initial bump to ensure that
                                               * LRU reordering takes place. */

    do_item_update(small_items[0].it);
    current_time += 1;
    do_item_update(large_items[0].it);

    /* bump the timestamp and add the remaining items. */
    current_time += 1;
    for (i = 1; i < num_small_objects; i ++) {
        do_item_update(small_items[i].it);
    }
    for (i = 1; i < num_large_objects; i ++) {
        do_item_update(large_items[i].it);
    }

    V_LPRINTF(2, "dereferencing objects\n");
    for (i = 0; i < num_small_objects; i ++) {
        do_item_deref(small_items[i].it);
    }
    for (i = 0; i < num_large_objects; i ++) {
        do_item_deref(large_items[i].it);
    }

    V_LPRINTF(2, "alloc after deref\n");
    do {
        klen = make_random_key(key, max_small_key_size, true);
    } while (assoc_find(key, klen));
    lru_trigger = do_item_alloc(key, klen, FLAGS, 0, LARGE_TITLE_CHUNK_DATA_SZ - klen, addr);
    TASSERT(lru_trigger != NULL);
    TASSERT(is_item_large_chunk(lru_trigger));

    V_LPRINTF(2, "search for evicted objects\n");
    TASSERT(assoc_find(small_items[0].key, small_items[0].klen) == NULL);
    TASSERT(assoc_find(large_items[0].key, large_items[0].klen) == NULL);

    V_LPRINTF(2, "ensuring that objects that shouldn't be evicted are still present\n");
    for (i = 1; i < num_small_objects; i ++) {
        TASSERT(assoc_find(small_items[i].key, small_items[i].klen));
    }
    for (i = 1; i < num_large_objects; i ++) {
        TASSERT(assoc_find(large_items[i].key, large_items[i].klen));
    }

    V_LPRINTF(2, "cleanup objects\n");
    for (i = 1; i < num_small_objects; i ++) {
        do_item_unlink(small_items[i].it, UNLINK_NORMAL, small_items[i].key);
    }
    for (i = 1; i < num_large_objects; i ++) {
        do_item_unlink(large_items[i].it, UNLINK_NORMAL, large_items[i].key);
    }
    do_item_deref(lru_trigger);

    TASSERT(fsi.large_free_list_sz == large_free_list_sz &&
            fsi.small_free_list_sz == small_free_list_sz);

    return 0;
}
/*
 * allocate all memory with small and large chunks.  link them such that the
 * small items are the oldest.  allocate one large object that can be covered by
 * the release of one large item.  this covers part of case 4 for the large item
 * alloc in flat_storage_lru_evict(..).
 */
static int
mixed_items_release_one_large_item_test(int verbose) {
    typedef struct {
        item* it;
        char key[KEY_MAX_LENGTH];
        uint8_t klen;
    } mixed_items_release_one_small_item_t;

    size_t num_small_objects = (fsi.large_free_list_sz / 2) * SMALL_CHUNKS_PER_LARGE_CHUNK;
    /* this is not the same as fsi.large_free_list_sz / 2 due to rounding. */
    size_t num_large_objects = fsi.large_free_list_sz - (fsi.large_free_list_sz / 2);
    mixed_items_release_one_small_item_t* large_items = malloc(sizeof(mixed_items_release_one_small_item_t) *
                                                               num_large_objects);
    mixed_items_release_one_small_item_t* small_items = malloc(sizeof(mixed_items_release_one_small_item_t) *
                                                               num_small_objects);
    item* lru_trigger;
    size_t max_small_key_size = SMALL_TITLE_CHUNK_DATA_SZ;
    size_t min_size_for_large_chunk = ( sizeof( ((small_title_chunk_t*) 0)->data ) ) +
        ( (SMALL_CHUNKS_PER_LARGE_CHUNK - 1) * sizeof( ((small_body_chunk_t*) 0)->data ) ) +
        1;
    size_t i;
    char key[KEY_MAX_LENGTH];
    size_t klen;
    size_t large_free_list_sz = fsi.large_free_list_sz, small_free_list_sz = fsi.small_free_list_sz;

    V_PRINTF(1, "  * %s\n", __FUNCTION__);

    TASSERT(fsi.large_free_list_sz != 0);
    TASSERT(fsi.small_free_list_sz == 0);

    for (i = 0; i < num_large_objects; i ++) {
        V_PRINTF(2, "\r  *  allocating large object %lu", i);
        V_FLUSH(2);
        do {
            large_items[i].klen = make_random_key(large_items[i].key, KEY_MAX_LENGTH, true);
        } while (assoc_find(large_items[i].key, large_items[i].klen));

        large_items[i].it = do_item_alloc(large_items[i].key, large_items[i].klen,
                                          FLAGS, 0,
                                          min_size_for_large_chunk - large_items[i].klen,
                                          addr);
        TASSERT(large_items[i].it);
        TASSERT(is_item_large_chunk(large_items[i].it));

        do_item_link(large_items[i].it, large_items[i].key);
    }
    V_PRINTF(2, "\n");

    for (i = 0; i < num_small_objects; i ++) {
        V_PRINTF(2, "\r  *  allocating small object %lu", i);
        V_FLUSH(2);
        do {
            small_items[i].klen = make_random_key(small_items[i].key, max_small_key_size, true);
        } while (assoc_find(small_items[i].key, small_items[i].klen));

        small_items[i].it = do_item_alloc(small_items[i].key, small_items[i].klen,
                                          FLAGS, 0,
                                          0, addr);
        TASSERT(small_items[i].it);
        TASSERT(is_item_large_chunk(small_items[i].it) == 0);

        do_item_link(small_items[i].it, small_items[i].key);
    }
    V_PRINTF(2, "\n");

    TASSERT(fsi.large_free_list_sz == 0 &&
            fsi.small_free_list_sz == 0);

    V_LPRINTF(2, "alloc before deref\n");
    do {
        klen = make_random_key(key, max_small_key_size, true);
    } while (assoc_find(key, klen));

    lru_trigger = do_item_alloc(key, klen, FLAGS, 0, 0, addr);
    TASSERT(lru_trigger == NULL);

    V_LPRINTF(2, "dereferencing objects\n");
    for (i = 0; i < num_small_objects; i ++) {
        do_item_deref(small_items[i].it);
    }
    for (i = 0; i < num_large_objects; i ++) {
        do_item_deref(large_items[i].it);
    }

    V_LPRINTF(2, "alloc after deref\n");
    lru_trigger = do_item_alloc(key, klen, FLAGS, 0, min_size_for_large_chunk - klen, addr);
    TASSERT(lru_trigger != NULL);

    V_LPRINTF(2, "search for evicted object\n");
    TASSERT(assoc_find(large_items[0].key, large_items[0].klen) == NULL);

    V_LPRINTF(2, "ensuring that objects that shouldn't be evicted are still present\n");
    for (i = 0; i < num_small_objects; i ++) {
        TASSERT(assoc_find(small_items[i].key, small_items[i].klen));
    }
    for (i = 1; i < num_large_objects; i ++) {
        TASSERT(assoc_find(large_items[i].key, large_items[i].klen));
    }

    V_LPRINTF(2, "cleanup objects\n");
    for (i = 0; i < num_small_objects; i ++) {
        do_item_unlink(small_items[i].it, UNLINK_NORMAL, small_items[i].key);
    }
    for (i = 1; i < num_large_objects; i ++) {
        do_item_unlink(large_items[i].it, UNLINK_NORMAL, large_items[i].key);
    }
    do_item_deref(lru_trigger);

    TASSERT(fsi.large_free_list_sz == large_free_list_sz &&
            fsi.small_free_list_sz == small_free_list_sz);

    return 0;
}
/*
 * this is a negative test to ensure the proper behavior when we don't have
 * sufficient resources.  in this case, we have sufficient small items on the
 * LRU, and enough of them have refcount == 0, but all the parent broken chunks
 * have refcount > 0.
 */
static int
insufficient_available_large_broken_chunks(int verbose) {
    typedef struct {
        item* it;
        char key[KEY_MAX_LENGTH];
        uint8_t klen;
    } all_small_chunks_key_t;

    size_t num_objects = fsi.large_free_list_sz * SMALL_CHUNKS_PER_LARGE_CHUNK;
    all_small_chunks_key_t* small_items = malloc(sizeof(all_small_chunks_key_t) * num_objects);
    item* lru_trigger;
    size_t max_key_size = SMALL_TITLE_CHUNK_DATA_SZ;
    size_t min_size_for_large_chunk = ( sizeof( ((small_title_chunk_t*) 0)->data ) ) +
        ( (SMALL_CHUNKS_PER_LARGE_CHUNK - 1) * sizeof( ((small_body_chunk_t*) 0)->data ) ) +
        1;
    size_t i;
    char key[KEY_MAX_LENGTH];
    size_t klen;
    size_t large_free_list_sz = fsi.large_free_list_sz, small_free_list_sz = fsi.small_free_list_sz;

    V_PRINTF(1, "  * %s\n", __FUNCTION__);

    TASSERT(fsi.large_free_list_sz != 0);
    TASSERT(fsi.small_free_list_sz == 0);

    for (i = 0; i < num_objects; i ++) {
        V_PRINTF(2, "\r  *  allocating object %lu", i);
        V_FLUSH(2);
        do {
            small_items[i].klen = make_random_key(small_items[i].key, max_key_size, true);
        } while (assoc_find(small_items[i].key, small_items[i].klen));

        small_items[i].it = do_item_alloc(small_items[i].key, small_items[i].klen, FLAGS, 0, 0, addr);
        TASSERT(small_items[i].it);
        TASSERT(is_item_large_chunk(small_items[i].it) == false);

        do_item_link(small_items[i].it, small_items[i].key);
    }
    V_PRINTF(2, "\n");

    TASSERT(fsi.large_free_list_sz == 0 &&
            fsi.small_free_list_sz == 0);

    V_LPRINTF(2, "alloc before deref\n");
    do {
        klen = make_random_key(key, max_key_size, true);
    } while (assoc_find(key, klen));

    lru_trigger = do_item_alloc(key, klen, FLAGS, 0, min_size_for_large_chunk - klen, addr);
    TASSERT(lru_trigger == NULL);

    V_LPRINTF(2, "dereferencing objects\n");
    for (i = 0; i < num_objects; i += 2) {
        do_item_deref(small_items[i].it);
    }

    V_LPRINTF(2, "alloc after deref\n");
    lru_trigger = do_item_alloc(key, klen, FLAGS, 0, min_size_for_large_chunk - klen, addr);
    TASSERT(lru_trigger == NULL);

    V_LPRINTF(2, "ensuring that objects that shouldn't be evicted are still present\n");
    for (i = 0; i < num_objects; i ++) {
        bool should_be_found;
        /* we free everything we encounter that has no refcount until we hit the
         * LRU_SEARCH_DEPTH, at which time we cease searching. */
        if (i % 2 == 0 && i < (LRU_SEARCH_DEPTH * 2)) {
            should_be_found = false;
        } else {
            should_be_found = true;
        }
        TASSERT((assoc_find(small_items[i].key, small_items[i].klen) ? (true) : (false)) ==
                should_be_found);
    }

    V_LPRINTF(2, "cleanup objects\n");
    for (i = 0; i < num_objects; i ++) {
        /* we dereference all the odd numbered items */
        if ((i % 2) != 0) {
            do_item_deref(small_items[i].it);
        }

        /* we unlink everything that's still in the LRU. */
        if (i % 2 == 0 && i < (LRU_SEARCH_DEPTH * 2)) {
            ;
        } else {
            do_item_unlink(small_items[i].it, UNLINK_NORMAL, small_items[i].key);
        }
    }

    TASSERT(fsi.large_free_list_sz == large_free_list_sz &&
            fsi.small_free_list_sz == small_free_list_sz);

    return 0;
}
/*
 * allocate nearly all memory with small items (all memory -
 * SMALL_CHUNKS_PER_LARGE_CHUNK - 1).  then set it up such that there is only
 * one item eligible to be freed (i.e., by removing the remaining items from the
 * LRU.  allocate one large object.  this will require the migration of one
 * single chunk item at the LRU head.  this covers part of case 1 for the small
 * item alloc in flat_storage_lru_evict(..).
 */
static int
all_small_items_migrate_small_single_chunk_item_at_lru_head_test(int verbose) {
    typedef struct {
        item* it;
        char key[KEY_MAX_LENGTH];
        uint8_t klen;
    } test_keys_t;

    size_t num_objects = fsi.large_free_list_sz * SMALL_CHUNKS_PER_LARGE_CHUNK;
    test_keys_t* items = malloc(sizeof(test_keys_t) * num_objects);
    item* lru_trigger;
    size_t max_small_key_size = SMALL_TITLE_CHUNK_DATA_SZ;
    size_t min_size_for_large_chunk = ( sizeof( ((small_title_chunk_t*) 0)->data ) ) +
        ( (SMALL_CHUNKS_PER_LARGE_CHUNK - 1) * sizeof( ((small_body_chunk_t*) 0)->data ) ) +
        1;
    size_t i, count;
    char key[KEY_MAX_LENGTH];
    size_t klen;
    size_t large_free_list_sz = fsi.large_free_list_sz, small_free_list_sz = fsi.small_free_list_sz;

    V_PRINTF(1, "  * %s\n", __FUNCTION__);

    TASSERT(fsi.large_free_list_sz != 0);
    TASSERT(fsi.small_free_list_sz == 0);

    for (i = 0, count = 0;
         fsi.large_free_list_sz ||
             fsi.small_free_list_sz > SMALL_CHUNKS_PER_LARGE_CHUNK - 1;
         i ++, count ++) {
        V_PRINTF(2, "\r  *  allocating small object %lu", i);
        V_FLUSH(2);

        assert(i < num_objects);

        do {
            items[i].klen = make_random_key(items[i].key, max_small_key_size, true);
        } while (assoc_find(items[i].key, items[i].klen));

        items[i].it = do_item_alloc(items[i].key, items[i].klen,
                                    FLAGS, 0, 0, addr);
        TASSERT(items[i].it);
        TASSERT(is_item_large_chunk(items[i].it) == 0);

        do_item_link(items[i].it, items[i].key);
    }
    V_PRINTF(2, "\n");

    TASSERT(fsi.large_free_list_sz == 0);
    TASSERT(fsi.small_free_list_sz == SMALL_CHUNKS_PER_LARGE_CHUNK - 1);

    // remove all but one item from the LRU.  and release our reference to the
    // item we don't remove from the LRU.
    for (i = 0; i < count - 1; i ++) {
        do_item_unlink(items[i].it, UNLINK_NORMAL, items[i].key);
    }
    do_item_deref(items[count - 1].it);

    TASSERT(fsi.lru_head == items[count - 1].it);

    TASSERT(fsi.large_free_list_sz == 0);
    TASSERT(fsi.small_free_list_sz == SMALL_CHUNKS_PER_LARGE_CHUNK - 1);

    V_LPRINTF(2, "alloc\n");
    do {
        klen = make_random_key(key, max_small_key_size, true);
    } while (assoc_find(key, klen));

    lru_trigger = do_item_alloc(key, klen, FLAGS, 0, min_size_for_large_chunk - klen, addr);
    TASSERT(lru_trigger != NULL);

    V_LPRINTF(2, "search for evicted object\n");
    TASSERT(assoc_find(items[count - 1].key, items[count - 1].klen) == NULL);

    V_LPRINTF(2, "cleanup objects\n");
    for (i = 0; i < count - 1; i ++) {
        do_item_deref(items[i].it);
    }
    do_item_deref(lru_trigger);

    TASSERT(fsi.large_free_list_sz == large_free_list_sz &&
            fsi.small_free_list_sz == small_free_list_sz);

    return 0;
}
/*
 * allocate all memory with small items.  allocate one large object that can be
 * covered by the release of small items, but also requires the migration of
 * single chunk items.  this covers part of case 1 for the large item alloc in
 * flat_storage_lru_evict(..).
 */
static int
all_small_items_migrate_small_single_chunk_items_test(int verbose) {
    typedef struct {
        item* it;
        char key[KEY_MAX_LENGTH];
        uint8_t klen;
    } test_keys_t;

    size_t num_objects = fsi.large_free_list_sz * SMALL_CHUNKS_PER_LARGE_CHUNK;
    test_keys_t* items = malloc(sizeof(test_keys_t) * num_objects);
    item* lru_trigger;
    size_t max_small_key_size = SMALL_TITLE_CHUNK_DATA_SZ;
    size_t min_size_for_large_chunk = ( sizeof( ((small_title_chunk_t*) 0)->data ) ) +
        ( (SMALL_CHUNKS_PER_LARGE_CHUNK - 1) * sizeof( ((small_body_chunk_t*) 0)->data ) ) +
        1;
    size_t i;
    char key[KEY_MAX_LENGTH];
    size_t klen;
    size_t large_free_list_sz = fsi.large_free_list_sz, small_free_list_sz = fsi.small_free_list_sz;

    V_PRINTF(1, "  * %s\n", __FUNCTION__);

    TASSERT(fsi.large_free_list_sz != 0);
    TASSERT(fsi.small_free_list_sz == 0);

    for (i = 0; i < num_objects; i ++) {
        V_PRINTF(2, "\r  *  allocating small object %lu", i);
        V_FLUSH(2);
        do {
            items[i].klen = make_random_key(items[i].key, max_small_key_size, true);
        } while (assoc_find(items[i].key, items[i].klen));

        items[i].it = do_item_alloc(items[i].key, items[i].klen,
                                    FLAGS, 0, 0, addr);
        TASSERT(items[i].it);
        TASSERT(is_item_large_chunk(items[i].it) == 0);

        do_item_link(items[i].it, items[i].key);
    }
    V_PRINTF(2, "\n");

    TASSERT(fsi.large_free_list_sz == 0 &&
            fsi.small_free_list_sz == 0);

    /* access items we don't want to move. */
    current_time += ITEM_UPDATE_INTERVAL + 1;
    /* touch every other item.  the ones that are not touched in (0,
     * SMALL_CHUNKS_PER_LARGE_CHUNK * 2) will be evicted. */
    for (i = 0; i < SMALL_CHUNKS_PER_LARGE_CHUNK * 2; i += 2) {
        do_item_update(items[i].it);
    }
    /* touch remaining items */
    for (i = SMALL_CHUNKS_PER_LARGE_CHUNK * 2; i < num_objects; i ++) {
        do_item_update(items[i].it);
    }

    V_LPRINTF(2, "dereferencing objects\n");
    for (i = 0; i < num_objects; i ++) {
        do_item_deref(items[i].it);
    }

    V_LPRINTF(2, "alloc after deref\n");
    do {
        klen = make_random_key(key, max_small_key_size, true);
    } while (assoc_find(key, klen));

    lru_trigger = do_item_alloc(key, klen, FLAGS, 0, min_size_for_large_chunk - klen, addr);
    TASSERT(lru_trigger != NULL);

    V_LPRINTF(2, "search for evicted object\n");
    for (i = 1; i < SMALL_CHUNKS_PER_LARGE_CHUNK * 2; i += 2) {
        TASSERT(assoc_find(items[i].key, items[i].klen) == NULL);
    }

    V_LPRINTF(2, "ensuring that objects that shouldn't be evicted are still present\n");
    for (i = 0; i < SMALL_CHUNKS_PER_LARGE_CHUNK * 2; i += 2) {
        /* these may have been moved. */
        TASSERT((items[i].it = assoc_find(items[i].key, items[i].klen)));
    }
    for (i = SMALL_CHUNKS_PER_LARGE_CHUNK * 2; i < num_objects; i ++) {
        TASSERT(assoc_find(items[i].key, items[i].klen));
    }

    V_LPRINTF(2, "cleanup objects\n");
    for (i = 0; i < SMALL_CHUNKS_PER_LARGE_CHUNK * 2; i += 2) {
        do_item_unlink(items[i].it, UNLINK_NORMAL, items[i].key);
    }
    for (i = SMALL_CHUNKS_PER_LARGE_CHUNK * 2; i < num_objects; i ++) {
        do_item_unlink(items[i].it, UNLINK_NORMAL, items[i].key);
    }
    do_item_deref(lru_trigger);

    TASSERT(fsi.large_free_list_sz == large_free_list_sz &&
            fsi.small_free_list_sz == small_free_list_sz);

    return 0;
}
Exemple #9
0
/**
 * This verifies that
 *   1) the load is evenly balanced across servers.
 *   2) the act of adding a server to a pool will never result in a server
 *      handling keyspace that it previously handled but no longer does.
 *      If this occurs, then stale data may be returned.
 */
TEST(ch3, verify_correctness) {
  uint32_t i, j;
  uint32_t maximum_pool_size = furc_maximum_pool_size();
  char key[MAX_KEY_LENGTH + 1];
  std::vector<uint64_t> pools[NUM_POOLS];
  uint32_t sizes[NUM_POOLS];
  size_t num_pools;
  auto weights = std::make_unique<std::array<double, 1U << 23U>>();
  weights->fill(1.0);

  srand(time(nullptr));

  for (num_pools = 0; /* see end of loop */; ++num_pools) {
    if (num_pools == 0) {
      sizes[num_pools] = 1;
    } else if (num_pools == NUM_POOLS - 1) {
      sizes[num_pools] = maximum_pool_size;
    } else if (num_pools % 2 == 1) { // grow pool size geometrically
      sizes[num_pools] = sizes[num_pools - 1] * drand_in_range(1.5, 2.5);
    } else { // grow pool size arithmetically
      sizes[num_pools] = sizes[num_pools - 1] + rand_in_range(1, 11);
    }

    /* Make sure we don't exceed the maximum pool size. */
    if (sizes[num_pools] > maximum_pool_size) {
      sizes[num_pools] = maximum_pool_size;
    }

    pools[num_pools] = std::vector<uint64_t>(sizes[num_pools]);

    if (sizes[num_pools] == maximum_pool_size)
      break;
  }

  for (i = 0; i < NUM_SAMPLES; ++i) {
    size_t previous_num = -1;
    int len;

    make_random_key(key, MAX_KEY_LENGTH);
    len = strlen(key);

    // hash the same key in each pool, in increasing pool size order
    for (j = 0; j < num_pools; ++j) {
      size_t num = furc_hash(key, len, sizes[j]);
      EXPECT_LT(num, sizes[j]);

      // Verify that the weighted furc yields identical result with weights at 1
      assert(sizes[j] <= weights->size());
      folly::Range<const double*> weightRange(
          weights->cbegin(), weights->cbegin() + sizes[j]);
      size_t weighted = facebook::mcrouter::weightedFurcHash(
          folly::StringPiece(key, len), weightRange);
      EXPECT_EQ(num, weighted);

      ++pools[j][num];

      // make sure that this key either hashes the same server,
      // or hashes to a new server
      if (previous_num != num && j > 0) {
        EXPECT_GE(num, sizes[j - 1]);
      }

      previous_num = num;
    }
  }

  for (i = 0; i < num_pools; ++i) {
    /* Verify that load is evenly distributed. This isn't easy to do
       generally without significantly increasing the runtime by choosing
       a huge NUM_SAMPLES, so just check pools up to 1000 in size. */

    uint32_t pool_size = sizes[i];
    if (pool_size > 1000)
      break;
    double expected_mean = ((double)NUM_SAMPLES) / pool_size;

    double max_diff = 0;
    double sum = 0;
    for (j = 0; j < pool_size; j++) {
      double diff = std::abs(pools[i][j] - expected_mean);
      if (diff > max_diff)
        max_diff = diff;
      sum += pools[i][j];
    }
    double mean = sum / pool_size;
    // expect the sample mean to be within 5% of expected mean
    EXPECT_NEAR(mean, expected_mean, expected_mean * 0.05);

    // expect the maximum deviation from mean to be within 15%
    EXPECT_NEAR(max_diff, 0, mean * 0.15);

    sum = 0;
    for (j = 0; j < pool_size; j++) {
      double diff = pools[i][j] - mean;
      sum += diff * diff;
    }
    double stddev = sqrt(sum / pool_size);
    // expect the standard deviation to be < 5%
    EXPECT_NEAR(stddev, 0, mean * 0.05);
  }
}
std::string search_key(Params &params, 
                TypeValidator *validator, 
                size_t series_min, 
                size_t series_max
                )
{
    if (validator == NULL) {
        printf("Validator not found!\n");
        return "";
    }

    size_t days = 0;
    int day_start = params.seed;
    int deadline = 2; //2 days max
    //---
    int seed = params.seed;
    char key[0x100];

    printf("Searching key started...\n");
#ifdef DEBUG
    printf("Start seed: %d = %#x\n----------\n", seed, seed);
    print_time_str(seed);
#endif
    
    if (series_min != series_max) {
#ifdef DEBUG
        printf("Smart search mode: ON!\nWarning: it works only if the file have a valid modification timestamp!\n");
        printf("Series min = %d , max = %d\n----------\n", series_min, series_max);
#endif
    } else {
#ifdef DEBUG
        printf("Smart search mode: OFF!\n");
        printf("Series min = %d , max = %d\n----------\n", series_min, series_max);
#endif
    }
    size_t series = series_min;

    while (deadline > 0) {
        srand (seed);
        for (size_t key_num = 0; key_num < series; key_num++) {
            make_random_key(key, sizeof(key));
            if (validator->testKey(key)) {
                if (validator->getAccuracy() >= PIVOT_MIN) {
#ifdef DEBUG
                    printf("Adjusting seed to to found one!\n");
#endif
                    params.seed = seed;
                    params.key_num = key_num;
                }
                printf(">> KEY FOUND: %s\n", key);
                printf("[SUCCESS]\n");
#ifdef DEBUG
                printf ("KEY: %s\nSEED %x = %d\nkey number in series: %d\n", key, seed, seed, key_num);
#endif
                log_key(seed, key, key_num, params.filename);
                return key;
            }
        }
        if (params.incrementalMode) {
            seed++;
        } else {
            seed--;
            if (series < series_max) {
                //max number of encrypted files per milisecons
                series += series_min;
            }
        }
        if (abs(day_start - seed) > DAY_LEN) {
            day_start = seed;
            days++;
            deadline--;
            printf("%d day passed!\n", days);
        }
    }
    return "";
}
/* allocate all memory with small chunks.  allocate one more object.  it should
 * free up the oldest object.  release all objects.  this covers case 1 for the
 * small item alloc in flat_storage_lru_evict(..). */
static int
all_small_chunks_test(int verbose) {
    typedef struct {
        item* it;
        char key[KEY_MAX_LENGTH];
        uint8_t klen;
    } all_small_chunks_key_t;

    size_t num_objects = fsi.large_free_list_sz * SMALL_CHUNKS_PER_LARGE_CHUNK;
    all_small_chunks_key_t* small_items = malloc(sizeof(all_small_chunks_key_t) * num_objects);
    item* lru_trigger;
    size_t max_key_size = SMALL_TITLE_CHUNK_DATA_SZ;
    size_t i;
    char key[KEY_MAX_LENGTH];
    size_t klen;
    size_t large_free_list_sz = fsi.large_free_list_sz, small_free_list_sz = fsi.small_free_list_sz;

    V_PRINTF(1, "  * %s\n", __FUNCTION__);

    TASSERT(fsi.large_free_list_sz != 0);
    TASSERT(fsi.small_free_list_sz == 0);

    for (i = 0; i < num_objects; i ++) {
        V_PRINTF(2, "\r  *  allocating object %lu", i);
        V_FLUSH(2);
        do {
            small_items[i].klen = make_random_key(small_items[i].key, max_key_size, true);
        } while (assoc_find(small_items[i].key, small_items[i].klen));

        small_items[i].it = do_item_alloc(small_items[i].key, small_items[i].klen, FLAGS, 0, 0,
                                          addr);
        TASSERT(small_items[i].it);
        TASSERT(is_item_large_chunk(small_items[i].it) == false);

        do_item_link(small_items[i].it, small_items[i].key);
    }
    V_PRINTF(2, "\n");

    TASSERT(fsi.large_free_list_sz == 0 &&
            fsi.small_free_list_sz == 0);

    V_LPRINTF(2, "alloc before deref\n");
    do {
        klen = make_random_key(key, max_key_size, true);
    } while (assoc_find(key, klen));

    lru_trigger = do_item_alloc(key, klen, FLAGS, 0, 0, addr);
    TASSERT(lru_trigger == NULL);

    V_LPRINTF(2, "dereferencing objects\n");
    for (i = 0; i < num_objects; i ++) {
        do_item_deref(small_items[i].it);
    }

    V_LPRINTF(2, "alloc after deref\n");
    lru_trigger = do_item_alloc(key, klen, FLAGS, 0, 0, addr);
    TASSERT(lru_trigger != NULL);

    V_LPRINTF(2, "search for evicted object\n");
    TASSERT(assoc_find(small_items[0].key, small_items[0].klen) == NULL);

    V_LPRINTF(2, "ensuring that objects that shouldn't be evicted are still present\n");
    for (i = 1; i < num_objects; i ++) {
        TASSERT(assoc_find(small_items[i].key, small_items[i].klen));
    }

    V_LPRINTF(2, "cleanup objects\n");
    for (i = 1; i < num_objects; i ++) {
        do_item_unlink(small_items[i].it, UNLINK_NORMAL, small_items[i].key);
    }
    do_item_deref(lru_trigger);

    TASSERT(fsi.large_free_list_sz == large_free_list_sz &&
            fsi.small_free_list_sz == small_free_list_sz);

    return 0;
}