Ejemplo n.º 1
0
int olt_jar(ol_transaction *tx, const char *key, size_t klen, const unsigned char *value, size_t vsize) {
    int ret;
    char _key[KEY_SIZE] = {'\0'};
    size_t _klen = 0;
    ol_database *db = tx->transaction_db;

    ol_bucket *bucket = ol_get_bucket(db, key, klen, &_key, &_klen);
    check(_klen > 0, "Key length of zero not allowed.");

    /* We only want to hit this codepath within the same database, otherwise
     * weird stuff happens. Like fires and stuff.
     */
    if (bucket != NULL) {
        /* Flag the transaction as dirty. */
        tx->dirty = 1;
        return _ol_reallocate_bucket(db, bucket, value, vsize);
    }

    /* Looks like we don't have an old hash */
    ol_bucket *new_bucket = calloc(1, sizeof(ol_bucket));
    if (new_bucket == NULL)
        return OL_FAILURE;

    /* copy _key into new bucket */
    new_bucket->key = malloc(_klen + 1);
    check_mem(new_bucket->key);
    new_bucket->key[_klen] = '\0';
    if (strncpy(new_bucket->key, _key, _klen) != new_bucket->key) {
        free(new_bucket);
        return OL_FAILURE;
    }

    new_bucket->klen = _klen;
    new_bucket->original_size = vsize;

    /* Compute the new position of the data in the values file: */
    const size_t new_offset = db->val_size;

    if (db->state != OL_S_STARTUP) {
        unsigned char *new_data_ptr = NULL;

        if (db->is_enabled(OL_F_LZ4, &db->feature_set)) {
            /* Compress using LZ4 if enabled */
            int maxoutsize = LZ4_compressBound(vsize);
            _ol_ensure_values_file_size(db, maxoutsize);
            new_data_ptr = db->values + db->val_size;
            memset(new_data_ptr, '\0', maxoutsize);

            /* All these f*****g casts */
            size_t cmsize = (size_t)LZ4_compress((char*)value, (char*)new_data_ptr,
                                                 (int)vsize);
            if (cmsize == 0) {
                /* Free allocated data */
                free(new_bucket);
                return OL_FAILURE;
            }

            new_bucket->data_size = cmsize;
        } else {
            new_bucket->data_size = vsize;
            _ol_ensure_values_file_size(db, new_bucket->data_size);
            new_data_ptr = db->values + db->val_size;
            memset(new_data_ptr, '\0', new_bucket->data_size);

            if (memcpy(new_data_ptr, value, vsize) != new_data_ptr) {
                /* Free allocated memory since we're not going to use them */
                free(new_bucket);
                return OL_FAILURE;
            }
        }
    } else {
        /* We still need to set the data size, but not the actual data. */
        if (db->is_enabled(OL_F_LZ4, &db->feature_set)) {
            /* Since LZ4_compressBound only provides the worst case scenario
             * and not what the data actually compressed to (we're replaying
             * the AOL file, remember?) we have to compress it again and grab
             * the amount of bytes processed.
             * TODO: This is dumb. Make a function that just sets the bucket size.
             * This new mythical function should also handle setting the data_offset
             * of the bucket.
             */
            int maxoutsize = LZ4_compressBound(vsize);
            char tmp_data[maxoutsize];
            /* Don't need to memset tmp_data because I don't care about it. */

            size_t cmsize = (size_t)LZ4_compress((char *)value, (char *)tmp_data,
                                                 (int)vsize);
            new_bucket->data_size = cmsize;
        } else {
            new_bucket->data_size = vsize;
        }
    }

    /* Set the offset of the bucket before we increment it offset globally. */
    new_bucket->data_offset = new_offset;
    /* Remember to increment the tracked data size of the DB. */
    db->val_size += new_bucket->data_size;

    unsigned int bucket_max = ol_ht_bucket_max(db->cur_ht_size);
    /* TODO: rehash this shit at 80% */
    if (db->rcrd_cnt > 0 && db->rcrd_cnt == bucket_max) {
        debug("Record count is now %i; growing hash table.", db->rcrd_cnt);
        ret = _ol_grow_and_rehash_db(db);
        if (ret > 0) {
            ol_log_msg(LOG_ERR, "Problem rehashing DB. Error code: %i", ret);
            free(new_bucket);
            return OL_FAILURE;
        }
    }


    uint32_t hash;
    MurmurHash3_x86_32(_key, _klen, DEVILS_SEED, &hash);
    ret = _ol_set_bucket(db, new_bucket, hash);

    if(ret > 0)
        ol_log_msg(LOG_ERR, "Problem inserting item: Error code: %i", ret);

    if(db->is_enabled(OL_F_APPENDONLY, &db->feature_set) &&
            db->state != OL_S_STARTUP) {
        ol_aol_write_cmd(db, "JAR", new_bucket);
    }

    /* Flag the transaction as dirty. */
    tx->dirty = 1;

    return OL_SUCCESS;

error:
    return OL_FAILURE;
}
Ejemplo n.º 2
0
int _ol_reallocate_bucket(ol_database *db, ol_bucket *bucket,
                          const unsigned char *value, size_t vsize) {
    debug("Reallocating bucket.");

    unsigned char *old_data_ptr = db->values + bucket->data_offset;
    /* Clear out the old data in the file. */
    if (db->state != OL_S_STARTUP && bucket->data_size > 0)
        memset(old_data_ptr, '\0', bucket->data_size);
    /* Compute the new position of the data in the values file: */
    size_t new_offset = db->val_size;
    unsigned char *new_data_ptr = NULL;

    /* Compress using LZ4 if enabled */
    size_t cmsize = 0;
    int extended_value_area = 0;
    if (db->is_enabled(OL_F_LZ4, &db->feature_set)) {
        int maxoutsize = LZ4_compressBound(vsize);
        if (maxoutsize <= bucket->data_size) {
            /* We don't need to put this value at the end of the file if the
             * new value is small enough. */
            new_data_ptr = old_data_ptr;
            new_offset = bucket->data_offset;
        } else {
            _ol_ensure_values_file_size(db, maxoutsize);
            extended_value_area = 1;
            new_data_ptr = db->values + db->val_size;
        }

        if (db->state != OL_S_STARTUP) {
            cmsize = (size_t)LZ4_compress((char*)value, (char*)new_data_ptr,
                                          (int)vsize);
        } else {
            /* We're starting up, so we don't want to actually write to the
             * values file. We just want the size and stuff.
             */
            int maxoutsize = LZ4_compressBound(vsize);
            char tmp_data[maxoutsize];
            cmsize = (size_t)LZ4_compress((char *)value, (char *)tmp_data,
                                                 (int)vsize);
        }
    } else {
        if (vsize <= bucket->data_size) {
            /* We don't need to put this value at the end of the file if the
             * new value is small enough. */
            new_data_ptr = old_data_ptr;
            new_offset = bucket->data_offset;
        } else {
            _ol_ensure_values_file_size(db, vsize);
            extended_value_area = 1;
            new_data_ptr = db->values + db->val_size;
        }
        if (db->state != OL_S_STARTUP) {
            /* Like above, avoid writing to the values file on startup. */
            if (memcpy(new_data_ptr, value, vsize) != new_data_ptr)
                return 4;
        }
    }

    if (bucket->expiration != NULL) {
        free(bucket->expiration);
        bucket->expiration = NULL;
    }

    /* Set original_size regardless of lz4 compression. This ensures we always
     * have something to write to the AOL. */
    bucket->original_size = vsize;
    if(db->is_enabled(OL_F_LZ4, &db->feature_set)) {
        bucket->data_size = cmsize;
    } else {
        bucket->data_size = vsize;
    }
    bucket->data_offset = new_offset;

    /* Remember to increment the tracked data size of the DB. */
    if (extended_value_area)
        db->val_size += bucket->data_size;

    if(db->is_enabled(OL_F_APPENDONLY, &db->feature_set) && db->state != OL_S_STARTUP) {
        ol_aol_write_cmd(db, "JAR", bucket);
    }

    return 0;
}