예제 #1
0
파일: rehash.c 프로젝트: cquinlan/OlegDB
int _ol_grow_and_rehash_db(ol_database *db) {
    int i;
    ol_bucket *bucket;
    ol_bucket **tmp_hashes = NULL;

    size_t to_alloc = db->cur_ht_size * 2;
    debug("Growing DB to %zu bytes.", to_alloc);
    tmp_hashes = calloc(1, to_alloc);
    check_mem(tmp_hashes);

    struct ol_stack *orphans = NULL;
    orphans = malloc(sizeof(struct ol_stack));
    check_mem(orphans);
    orphans->next = NULL;
    orphans->data = NULL;
    int orphans_found = 0;

    int iterations = ol_ht_bucket_max(db->cur_ht_size);
    for (i = 0; i < iterations; i++) {
        bucket = db->hashes[i];
        if (bucket != NULL) {
            if (bucket->next != NULL) {
                ol_bucket *tmp_bucket = bucket;
                do {
                    spush(&orphans, tmp_bucket->next);

                    ol_bucket *next = tmp_bucket->next;
                    tmp_bucket->next = NULL;
                    tmp_bucket = next;

                    orphans_found++;
                } while (tmp_bucket->next != NULL);
            }
            /* Rehash the bucket itself. */
            _ol_rehash_insert_bucket(tmp_hashes, to_alloc, bucket);
        }
    }

    /* Take care of our orphans */
    ol_log_msg(LOG_INFO, "Have %i orphans to take care of.", orphans_found);
    do {
        ol_bucket *rebucket = spop(&orphans);
        _ol_rehash_insert_bucket(tmp_hashes, to_alloc, rebucket);

        orphans_found--;
    } while (orphans->next != NULL);
    ol_log_msg(LOG_INFO, "We now have %i orphans not accounted for.", orphans_found);

    free(orphans);
    free(db->hashes);
    db->hashes = tmp_hashes;
    db->cur_ht_size = to_alloc;
    debug("Current hash table size is now: %zu bytes.", to_alloc);
    return 0;

error:
    if (tmp_hashes != NULL)
        free(tmp_hashes);
    return -1;
}
예제 #2
0
파일: oleg.c 프로젝트: carriercomm/OlegDB
static inline int _ol_close_common(ol_database *db) {
    debug("Closing \"%s\" database.", db->name);

    /* TODO: Commit/abort transactions here. */
    /*
    if (!new_db->is_enabled(OL_F_DISABLE_TX, &new_db->feature_set)) {
    }
    */

    unsigned int iterations = ol_ht_bucket_max(db->cur_ht_size);
    int rcrd_cnt = db->rcrd_cnt;
    int freed = 0;
    debug("Freeing %d records.", rcrd_cnt);
    debug("Hash table iterations: %d.", iterations);

    int i = 0;
    for (; i < iterations; i++) { /* 8=======D */
        if (db->hashes[i] != NULL) {
            ol_bucket *ptr, *next;
            for (ptr = db->hashes[i]; NULL != ptr; ptr = next) {
                next = ptr->next;
                _ol_free_bucket(&ptr);
                freed++;
            }
        }
    }

    if (!db->is_enabled(OL_F_DISABLE_TX, &db->feature_set)) {
        ols_close(db->cur_transactions);
        free(db->cur_transactions);
        check(freed >= rcrd_cnt, "Error: Couldn't free all records.\nRecords freed: %d", freed);
    }

    if (db->tree != NULL) {
        debug("Destroying tree.");
        ols_close(db->tree);
        free(db->tree);
        db->tree = NULL;
    }

    return OL_SUCCESS;

error:
    return OL_FAILURE;
}
예제 #3
0
int olt_jar(ol_transaction *tx, const char *key, size_t klen, const unsigned char *value, size_t vsize) {
    int ret;
    char _key[KEY_SIZE] = {'\0'};
    size_t _klen = 0;
    ol_database *db = tx->transaction_db;

    ol_bucket *bucket = ol_get_bucket(db, key, klen, &_key, &_klen);
    check(_klen > 0, "Key length of zero not allowed.");

    /* We only want to hit this codepath within the same database, otherwise
     * weird stuff happens. Like fires and stuff.
     */
    if (bucket != NULL) {
        /* Flag the transaction as dirty. */
        tx->dirty = 1;
        return _ol_reallocate_bucket(db, bucket, value, vsize);
    }

    /* Looks like we don't have an old hash */
    ol_bucket *new_bucket = calloc(1, sizeof(ol_bucket));
    if (new_bucket == NULL)
        return OL_FAILURE;

    /* copy _key into new bucket */
    new_bucket->key = malloc(_klen + 1);
    check_mem(new_bucket->key);
    new_bucket->key[_klen] = '\0';
    if (strncpy(new_bucket->key, _key, _klen) != new_bucket->key) {
        free(new_bucket);
        return OL_FAILURE;
    }

    new_bucket->klen = _klen;
    new_bucket->original_size = vsize;

    /* Compute the new position of the data in the values file: */
    const size_t new_offset = db->val_size;

    if (db->state != OL_S_STARTUP) {
        unsigned char *new_data_ptr = NULL;

        if (db->is_enabled(OL_F_LZ4, &db->feature_set)) {
            /* Compress using LZ4 if enabled */
            int maxoutsize = LZ4_compressBound(vsize);
            _ol_ensure_values_file_size(db, maxoutsize);
            new_data_ptr = db->values + db->val_size;
            memset(new_data_ptr, '\0', maxoutsize);

            /* All these f*****g casts */
            size_t cmsize = (size_t)LZ4_compress((char*)value, (char*)new_data_ptr,
                                                 (int)vsize);
            if (cmsize == 0) {
                /* Free allocated data */
                free(new_bucket);
                return OL_FAILURE;
            }

            new_bucket->data_size = cmsize;
        } else {
            new_bucket->data_size = vsize;
            _ol_ensure_values_file_size(db, new_bucket->data_size);
            new_data_ptr = db->values + db->val_size;
            memset(new_data_ptr, '\0', new_bucket->data_size);

            if (memcpy(new_data_ptr, value, vsize) != new_data_ptr) {
                /* Free allocated memory since we're not going to use them */
                free(new_bucket);
                return OL_FAILURE;
            }
        }
    } else {
        /* We still need to set the data size, but not the actual data. */
        if (db->is_enabled(OL_F_LZ4, &db->feature_set)) {
            /* Since LZ4_compressBound only provides the worst case scenario
             * and not what the data actually compressed to (we're replaying
             * the AOL file, remember?) we have to compress it again and grab
             * the amount of bytes processed.
             * TODO: This is dumb. Make a function that just sets the bucket size.
             * This new mythical function should also handle setting the data_offset
             * of the bucket.
             */
            int maxoutsize = LZ4_compressBound(vsize);
            char tmp_data[maxoutsize];
            /* Don't need to memset tmp_data because I don't care about it. */

            size_t cmsize = (size_t)LZ4_compress((char *)value, (char *)tmp_data,
                                                 (int)vsize);
            new_bucket->data_size = cmsize;
        } else {
            new_bucket->data_size = vsize;
        }
    }

    /* Set the offset of the bucket before we increment it offset globally. */
    new_bucket->data_offset = new_offset;
    /* Remember to increment the tracked data size of the DB. */
    db->val_size += new_bucket->data_size;

    unsigned int bucket_max = ol_ht_bucket_max(db->cur_ht_size);
    /* TODO: rehash this shit at 80% */
    if (db->rcrd_cnt > 0 && db->rcrd_cnt == bucket_max) {
        debug("Record count is now %i; growing hash table.", db->rcrd_cnt);
        ret = _ol_grow_and_rehash_db(db);
        if (ret > 0) {
            ol_log_msg(LOG_ERR, "Problem rehashing DB. Error code: %i", ret);
            free(new_bucket);
            return OL_FAILURE;
        }
    }


    uint32_t hash;
    MurmurHash3_x86_32(_key, _klen, DEVILS_SEED, &hash);
    ret = _ol_set_bucket(db, new_bucket, hash);

    if(ret > 0)
        ol_log_msg(LOG_ERR, "Problem inserting item: Error code: %i", ret);

    if(db->is_enabled(OL_F_APPENDONLY, &db->feature_set) &&
            db->state != OL_S_STARTUP) {
        ol_aol_write_cmd(db, "JAR", new_bucket);
    }

    /* Flag the transaction as dirty. */
    tx->dirty = 1;

    return OL_SUCCESS;

error:
    return OL_FAILURE;
}
예제 #4
0
파일: oleg.c 프로젝트: carriercomm/OlegDB
int ol_squish(ol_database *db) {
    check(db != NULL, "Cannot squish null database.");
    int fflush_turned_off = 0;
    const int flags = db->feature_set;
    if (db->is_enabled(OL_F_APPENDONLY, &flags)) {
        /* Turn off fflush for the time being. We'll do it once at the end. */
        if (db->is_enabled(OL_F_AOL_FFLUSH, &db->feature_set)) {
            db->disable(OL_F_AOL_FFLUSH, &db->feature_set);
            fflush_turned_off = 1;
        }

        /* AOL is enabled. Create a new aol file that we'll be using. */
        fflush(db->aolfd);
        fclose(db->aolfd);

        /* Create a new file which we'll move into the old ones place later */
        db->get_db_file_name(db, "aol.new", db->aol_file);

        /* Get a new file descriptor */
        db->aolfd = fopen(db->aol_file, AOL_FILEMODE);
    }

    /* Iterate through the hash table instead of using the tree just
     * so you can use this in case the tree isn't enabled. */
    const unsigned int iterations = ol_ht_bucket_max(db->cur_ht_size);

    int i = 0;
    for (; i < iterations; i++) {
        if (db->hashes[i] != NULL) {
            /* Found a bucket. */
            ol_bucket *ptr, *next;
            /* Start traversing the linked list of collisions, starting with
             * the bucket we found. */
            for (ptr = db->hashes[i]; NULL != ptr; ptr = next) {
                if (!_has_bucket_expired(ptr)) {
                    /* Bucket hasn't been deleted or expired. */
                    if (db->is_enabled(OL_F_APPENDONLY, &db->feature_set)) {
                        /* AOL is enabled. Write it to the new AOL file. */
                        ol_aol_write_cmd(db, "JAR", ptr);

                        /* See if theres an expiration date we care about: */
                        if (ptr->expiration != NULL) {
                            ol_aol_write_cmd(db, "SPOIL", ptr);
                        }
                    }
                }
                /* Get the next bucket in the collision chain. */
                next = ptr->next;
            }
        }
    }

    if (db->is_enabled(OL_F_APPENDONLY, &db->feature_set)) {
        /* Turn off fflush for the time being. We'll do it once at the end. */
        if (fflush_turned_off) {
            db->enable(OL_F_AOL_FFLUSH, &db->feature_set);
        }
        /* Make sure all of the new stuff is written */
        fflush(db->aolfd);
        fclose(db->aolfd);

        char new_filename[AOL_FILENAME_ALLOC] = {0};
        /* Set the old filename. */
        db->get_db_file_name(db, "aol.new", new_filename);
        db->get_db_file_name(db, AOL_FILENAME, db->aol_file);
        /* Rename the .aol.new file to just be .aol */
        check(rename(new_filename, db->aol_file) == 0, "Could not rename new AOL to old AOL.");

        /* Get a new file descriptor */
        db->aolfd = fopen(db->aol_file, AOL_FILEMODE);
    }

    return OL_SUCCESS;

error:
    return OL_FAILURE;
}
예제 #5
0
파일: utils.c 프로젝트: zofuthan/OlegDB
inline unsigned int _ol_calc_idx(const size_t ht_size, const uint32_t hash) {
    unsigned int index;
    /* Powers of two, baby! */
    index = hash & (ol_ht_bucket_max(ht_size) - 1);
    return index;
}