示例#1
0
int olt_spoil(ol_transaction *tx, const char *key, size_t klen, struct tm *expiration_date) {
    char _key[KEY_SIZE] = {'\0'};
    size_t _klen = 0;

    ol_database *operating_db = tx->transaction_db;

    ol_bucket *bucket = ol_get_bucket(operating_db, key, klen, &_key, &_klen);
    check(_klen > 0, "Key length of zero not allowed.");

    if (bucket == NULL && tx->parent_db != NULL) {
        /* Transaction DB doesn't have this key, but the parent does. */
        operating_db = tx->parent_db;
        bucket = ol_get_bucket(operating_db, key, klen, &_key, &_klen);
        if (bucket != NULL) {
            /* Copy that value into our current transaction db,
             * and then spoil it.
             */
            uint32_t hash;
            MurmurHash3_x86_32(_key, _klen, DEVILS_SEED, &hash);
            ol_bucket *copied = malloc(sizeof(ol_bucket));
            check_mem(copied);

            memcpy(copied, bucket, sizeof(ol_bucket));
            copied->next = NULL;

            copied->key = malloc(_klen + 1);
            check_mem(copied->key);
            copied->key[_klen] = '\0';
            memcpy(copied->key, bucket->key, _klen);

            _ol_set_bucket_no_incr(tx->transaction_db, copied, hash);
        }
    }

    if (bucket != NULL) {
        if (bucket->expiration == NULL)
            bucket->expiration = malloc(sizeof(struct tm));
        else
            debug("Hmmm, bucket->expiration wasn't null.");
        memcpy(bucket->expiration, expiration_date, sizeof(struct tm));
        debug("New expiration time: %lu", (long)mktime(bucket->expiration));

#ifdef DEBUG
        struct tm utctime;
        time_t current;

        /* So dumb */
        time(&current);
        gmtime_r(&current, &utctime);
        current = timegm(&utctime);
        debug("Current time: %lu", (long)current);
#endif
        if (operating_db->is_enabled(OL_F_APPENDONLY, &operating_db->feature_set) &&
                operating_db->state != OL_S_STARTUP) {
            ol_aol_write_cmd(operating_db, "SPOIL", bucket);
        }

        /* Flag the transaction as dirty. */
        tx->dirty = 1;

        return OL_SUCCESS;
    }

    return OL_FAILURE;

error:
    return OL_FAILURE;
}
示例#2
0
int olt_scoop(ol_transaction *tx, const char *key, size_t klen) {
    /* you know... like scoop some data from the jar and eat it? All gone. */
    uint32_t hash;
    char _key[KEY_SIZE] = {'\0'};
    _ol_trunc(key, klen, _key);
    size_t _klen = strnlen(_key, KEY_SIZE);
    check(_klen > 0, "Key length cannot be zero.");

    MurmurHash3_x86_32(_key, _klen, DEVILS_SEED, &hash);
    ol_database *operating_db = tx->transaction_db;
    /* First attempt to calculate the index in the transaction_db */
    unsigned int index = _ol_calc_idx(tx->transaction_db->cur_ht_size, hash);
    /* If we couldn't find it in the transaction_db, look for the value in the
     * parent_db (the one we forked from) */
    if (tx->transaction_db->hashes[index] == NULL &&
            tx->parent_db != NULL) {
        index = _ol_calc_idx(tx->parent_db->cur_ht_size, hash);
        operating_db = tx->parent_db;
    }

    if (operating_db->hashes[index] == NULL)
        return OL_FAILURE;

    /* Now that we know what database we're operating on, continue
     * as usual. */
    ol_bucket *to_free = NULL;
    int return_level = OL_FAILURE;

    size_t larger_key = 0;
    ol_bucket *bucket = operating_db->hashes[index];
    larger_key = bucket->klen > _klen ? bucket->klen : _klen;
    if (strncmp(bucket->key, _key, larger_key) == 0) {
        /* We only ACTUALLY want to delete something if we're operating on the transaction_db */
        if (operating_db == tx->transaction_db)
            operating_db->hashes[index] = bucket->next;

        to_free = bucket;
        return_level = OL_SUCCESS;
    } else { /* Keys weren't the same, traverse the bucket LL */
        while (bucket->next != NULL) {
            ol_bucket *last = bucket;
            bucket = bucket->next;
            larger_key = bucket->klen > klen ? bucket->klen : klen;
            if (strncmp(bucket->key, _key, larger_key) == 0) {
                if (operating_db == tx->transaction_db) {
                    if (bucket->next != NULL)
                        last->next = bucket->next;
                    else
                        last->next = NULL;
                }

                to_free = bucket;
                return_level = OL_SUCCESS;
                break;
            }
        }
    }

    if (to_free != NULL) {
        /* Only delete the node from the transaction_db. */
        if (operating_db == tx->transaction_db &&
            tx->transaction_db->is_enabled(OL_F_SPLAYTREE, &tx->transaction_db->feature_set)) {
            ols_delete(tx->transaction_db->tree, to_free->node);
            to_free->node = NULL;
        }

        /* Write the SCOOP command to the log, so we can replay it later. */
        if (tx->transaction_db->state != OL_S_STARTUP) {
            ol_aol_write_cmd(tx->transaction_db, "SCOOP", bucket);
        }

        /* Again, only delete the key from the transaction_db, not the parent. */
        if (operating_db == tx->transaction_db) {
            unsigned char *data_ptr = tx->transaction_db->values + to_free->data_offset;
            const size_t data_size = to_free->data_size;
            if (data_size != 0)
                memset(data_ptr, '\0', data_size);
            _ol_free_bucket(&to_free);
            tx->transaction_db->rcrd_cnt -= 1;
        }
    }

    /* Flag the transaction as dirty. */
    tx->dirty = 1;

    return return_level;
error:
    return OL_FAILURE;
}
示例#3
0
文件: oleg.c 项目: carriercomm/OlegDB
int ol_squish(ol_database *db) {
    check(db != NULL, "Cannot squish null database.");
    int fflush_turned_off = 0;
    const int flags = db->feature_set;
    if (db->is_enabled(OL_F_APPENDONLY, &flags)) {
        /* Turn off fflush for the time being. We'll do it once at the end. */
        if (db->is_enabled(OL_F_AOL_FFLUSH, &db->feature_set)) {
            db->disable(OL_F_AOL_FFLUSH, &db->feature_set);
            fflush_turned_off = 1;
        }

        /* AOL is enabled. Create a new aol file that we'll be using. */
        fflush(db->aolfd);
        fclose(db->aolfd);

        /* Create a new file which we'll move into the old ones place later */
        db->get_db_file_name(db, "aol.new", db->aol_file);

        /* Get a new file descriptor */
        db->aolfd = fopen(db->aol_file, AOL_FILEMODE);
    }

    /* Iterate through the hash table instead of using the tree just
     * so you can use this in case the tree isn't enabled. */
    const unsigned int iterations = ol_ht_bucket_max(db->cur_ht_size);

    int i = 0;
    for (; i < iterations; i++) {
        if (db->hashes[i] != NULL) {
            /* Found a bucket. */
            ol_bucket *ptr, *next;
            /* Start traversing the linked list of collisions, starting with
             * the bucket we found. */
            for (ptr = db->hashes[i]; NULL != ptr; ptr = next) {
                if (!_has_bucket_expired(ptr)) {
                    /* Bucket hasn't been deleted or expired. */
                    if (db->is_enabled(OL_F_APPENDONLY, &db->feature_set)) {
                        /* AOL is enabled. Write it to the new AOL file. */
                        ol_aol_write_cmd(db, "JAR", ptr);

                        /* See if theres an expiration date we care about: */
                        if (ptr->expiration != NULL) {
                            ol_aol_write_cmd(db, "SPOIL", ptr);
                        }
                    }
                }
                /* Get the next bucket in the collision chain. */
                next = ptr->next;
            }
        }
    }

    if (db->is_enabled(OL_F_APPENDONLY, &db->feature_set)) {
        /* Turn off fflush for the time being. We'll do it once at the end. */
        if (fflush_turned_off) {
            db->enable(OL_F_AOL_FFLUSH, &db->feature_set);
        }
        /* Make sure all of the new stuff is written */
        fflush(db->aolfd);
        fclose(db->aolfd);

        char new_filename[AOL_FILENAME_ALLOC] = {0};
        /* Set the old filename. */
        db->get_db_file_name(db, "aol.new", new_filename);
        db->get_db_file_name(db, AOL_FILENAME, db->aol_file);
        /* Rename the .aol.new file to just be .aol */
        check(rename(new_filename, db->aol_file) == 0, "Could not rename new AOL to old AOL.");

        /* Get a new file descriptor */
        db->aolfd = fopen(db->aol_file, AOL_FILEMODE);
    }

    return OL_SUCCESS;

error:
    return OL_FAILURE;
}
示例#4
0
int olt_jar(ol_transaction *tx, const char *key, size_t klen, const unsigned char *value, size_t vsize) {
    int ret;
    char _key[KEY_SIZE] = {'\0'};
    size_t _klen = 0;
    ol_database *db = tx->transaction_db;

    ol_bucket *bucket = ol_get_bucket(db, key, klen, &_key, &_klen);
    check(_klen > 0, "Key length of zero not allowed.");

    /* We only want to hit this codepath within the same database, otherwise
     * weird stuff happens. Like fires and stuff.
     */
    if (bucket != NULL) {
        /* Flag the transaction as dirty. */
        tx->dirty = 1;
        return _ol_reallocate_bucket(db, bucket, value, vsize);
    }

    /* Looks like we don't have an old hash */
    ol_bucket *new_bucket = calloc(1, sizeof(ol_bucket));
    if (new_bucket == NULL)
        return OL_FAILURE;

    /* copy _key into new bucket */
    new_bucket->key = malloc(_klen + 1);
    check_mem(new_bucket->key);
    new_bucket->key[_klen] = '\0';
    if (strncpy(new_bucket->key, _key, _klen) != new_bucket->key) {
        free(new_bucket);
        return OL_FAILURE;
    }

    new_bucket->klen = _klen;
    new_bucket->original_size = vsize;

    /* Compute the new position of the data in the values file: */
    const size_t new_offset = db->val_size;

    if (db->state != OL_S_STARTUP) {
        unsigned char *new_data_ptr = NULL;

        if (db->is_enabled(OL_F_LZ4, &db->feature_set)) {
            /* Compress using LZ4 if enabled */
            int maxoutsize = LZ4_compressBound(vsize);
            _ol_ensure_values_file_size(db, maxoutsize);
            new_data_ptr = db->values + db->val_size;
            memset(new_data_ptr, '\0', maxoutsize);

            /* All these f*****g casts */
            size_t cmsize = (size_t)LZ4_compress((char*)value, (char*)new_data_ptr,
                                                 (int)vsize);
            if (cmsize == 0) {
                /* Free allocated data */
                free(new_bucket);
                return OL_FAILURE;
            }

            new_bucket->data_size = cmsize;
        } else {
            new_bucket->data_size = vsize;
            _ol_ensure_values_file_size(db, new_bucket->data_size);
            new_data_ptr = db->values + db->val_size;
            memset(new_data_ptr, '\0', new_bucket->data_size);

            if (memcpy(new_data_ptr, value, vsize) != new_data_ptr) {
                /* Free allocated memory since we're not going to use them */
                free(new_bucket);
                return OL_FAILURE;
            }
        }
    } else {
        /* We still need to set the data size, but not the actual data. */
        if (db->is_enabled(OL_F_LZ4, &db->feature_set)) {
            /* Since LZ4_compressBound only provides the worst case scenario
             * and not what the data actually compressed to (we're replaying
             * the AOL file, remember?) we have to compress it again and grab
             * the amount of bytes processed.
             * TODO: This is dumb. Make a function that just sets the bucket size.
             * This new mythical function should also handle setting the data_offset
             * of the bucket.
             */
            int maxoutsize = LZ4_compressBound(vsize);
            char tmp_data[maxoutsize];
            /* Don't need to memset tmp_data because I don't care about it. */

            size_t cmsize = (size_t)LZ4_compress((char *)value, (char *)tmp_data,
                                                 (int)vsize);
            new_bucket->data_size = cmsize;
        } else {
            new_bucket->data_size = vsize;
        }
    }

    /* Set the offset of the bucket before we increment it offset globally. */
    new_bucket->data_offset = new_offset;
    /* Remember to increment the tracked data size of the DB. */
    db->val_size += new_bucket->data_size;

    unsigned int bucket_max = ol_ht_bucket_max(db->cur_ht_size);
    /* TODO: rehash this shit at 80% */
    if (db->rcrd_cnt > 0 && db->rcrd_cnt == bucket_max) {
        debug("Record count is now %i; growing hash table.", db->rcrd_cnt);
        ret = _ol_grow_and_rehash_db(db);
        if (ret > 0) {
            ol_log_msg(LOG_ERR, "Problem rehashing DB. Error code: %i", ret);
            free(new_bucket);
            return OL_FAILURE;
        }
    }


    uint32_t hash;
    MurmurHash3_x86_32(_key, _klen, DEVILS_SEED, &hash);
    ret = _ol_set_bucket(db, new_bucket, hash);

    if(ret > 0)
        ol_log_msg(LOG_ERR, "Problem inserting item: Error code: %i", ret);

    if(db->is_enabled(OL_F_APPENDONLY, &db->feature_set) &&
            db->state != OL_S_STARTUP) {
        ol_aol_write_cmd(db, "JAR", new_bucket);
    }

    /* Flag the transaction as dirty. */
    tx->dirty = 1;

    return OL_SUCCESS;

error:
    return OL_FAILURE;
}
示例#5
0
文件: utils.c 项目: zofuthan/OlegDB
int _ol_reallocate_bucket(ol_database *db, ol_bucket *bucket,
                          const unsigned char *value, size_t vsize) {
    debug("Reallocating bucket.");

    unsigned char *old_data_ptr = db->values + bucket->data_offset;
    /* Clear out the old data in the file. */
    if (db->state != OL_S_STARTUP && bucket->data_size > 0)
        memset(old_data_ptr, '\0', bucket->data_size);
    /* Compute the new position of the data in the values file: */
    size_t new_offset = db->val_size;
    unsigned char *new_data_ptr = NULL;

    /* Compress using LZ4 if enabled */
    size_t cmsize = 0;
    int extended_value_area = 0;
    if (db->is_enabled(OL_F_LZ4, &db->feature_set)) {
        int maxoutsize = LZ4_compressBound(vsize);
        if (maxoutsize <= bucket->data_size) {
            /* We don't need to put this value at the end of the file if the
             * new value is small enough. */
            new_data_ptr = old_data_ptr;
            new_offset = bucket->data_offset;
        } else {
            _ol_ensure_values_file_size(db, maxoutsize);
            extended_value_area = 1;
            new_data_ptr = db->values + db->val_size;
        }

        if (db->state != OL_S_STARTUP) {
            cmsize = (size_t)LZ4_compress((char*)value, (char*)new_data_ptr,
                                          (int)vsize);
        } else {
            /* We're starting up, so we don't want to actually write to the
             * values file. We just want the size and stuff.
             */
            int maxoutsize = LZ4_compressBound(vsize);
            char tmp_data[maxoutsize];
            cmsize = (size_t)LZ4_compress((char *)value, (char *)tmp_data,
                                                 (int)vsize);
        }
    } else {
        if (vsize <= bucket->data_size) {
            /* We don't need to put this value at the end of the file if the
             * new value is small enough. */
            new_data_ptr = old_data_ptr;
            new_offset = bucket->data_offset;
        } else {
            _ol_ensure_values_file_size(db, vsize);
            extended_value_area = 1;
            new_data_ptr = db->values + db->val_size;
        }
        if (db->state != OL_S_STARTUP) {
            /* Like above, avoid writing to the values file on startup. */
            if (memcpy(new_data_ptr, value, vsize) != new_data_ptr)
                return 4;
        }
    }

    if (bucket->expiration != NULL) {
        free(bucket->expiration);
        bucket->expiration = NULL;
    }

    /* Set original_size regardless of lz4 compression. This ensures we always
     * have something to write to the AOL. */
    bucket->original_size = vsize;
    if(db->is_enabled(OL_F_LZ4, &db->feature_set)) {
        bucket->data_size = cmsize;
    } else {
        bucket->data_size = vsize;
    }
    bucket->data_offset = new_offset;

    /* Remember to increment the tracked data size of the DB. */
    if (extended_value_area)
        db->val_size += bucket->data_size;

    if(db->is_enabled(OL_F_APPENDONLY, &db->feature_set) && db->state != OL_S_STARTUP) {
        ol_aol_write_cmd(db, "JAR", bucket);
    }

    return 0;
}