int olt_unjar(ol_transaction *tx, const char *key, size_t klen, unsigned char **data, size_t *dsize) { char _key[KEY_SIZE] = {'\0'}; size_t _klen = 0; ol_database *operating_db = NULL; ol_bucket *bucket = ol_get_bucket(tx->transaction_db, key, klen, &_key, &_klen); check(_klen > 0, "Key length of zero not allowed."); /* Fall through to the parent db: */ if (bucket == NULL) { bucket = ol_get_bucket(tx->parent_db, key, klen, &_key, &_klen); /* This is getting messy... */ if (bucket != NULL) operating_db = tx->parent_db; } else { operating_db = tx->transaction_db; } if (bucket != NULL) { if (!_has_bucket_expired(bucket)) { /* We don't need to fill out the data so just return 'we found the key'. */ if (data == NULL) return OL_SUCCESS; const int ret = _ol_get_value_from_bucket(operating_db, bucket, data, dsize); check(ret == 0, "Could not retrieve value from bucket."); /* Key found, tell somebody. */ return OL_SUCCESS; } else { /* It's dead, get rid of it. */ /* NOTE: We explicitly say the transaction_db here because ITS A * F*****G TRANSACTION. ACID, bro. */ check(olt_scoop(tx, key, klen) == 0, "Scoop failed"); } } /* TODO: Set error code here (could not find key) */ return OL_FAILURE; error: /* TODO: Set error code here (generic error? Theres a couple failure modes here.)*/ return OL_FAILURE; }
struct tm *ol_sniff(ol_database *db, const char *key, size_t klen) { char _key[KEY_SIZE] = {'\0'}; size_t _klen = 0; ol_bucket *bucket = ol_get_bucket(db, key, klen, &_key, &_klen); check_warn(_klen > 0, "Key length of zero not allowed."); if (bucket != NULL && bucket->expiration != NULL) { if (!_has_bucket_expired(bucket)) { return bucket->expiration; } else { /* It's dead, get rid of it. */ check(ol_scoop(db, key, klen) == 0, "Could not delete a bucket!") } } error: return NULL; }
int ol_cas(ol_database *db, const char *key, const size_t klen, unsigned char *value, size_t vsize, const unsigned char *ovalue, const size_t ovsize) { char _key[KEY_SIZE] = {'\0'}; size_t _klen = 0; ol_bucket *bucket = ol_get_bucket(db, key, klen, &_key, &_klen); check(_klen > 0, "Key length of zero not allowed."); if (bucket == NULL) return OL_FAILURE; /* Quick fail if the two sizes don't match */ if (bucket->original_size != ovsize) return OL_FAILURE; /* ATOMIC, GOOOO! */ const unsigned char *data_ptr = db->values + bucket->data_offset; if (db->is_enabled(OL_F_LZ4, &db->feature_set)) { char decompressed[bucket->original_size]; memset(decompressed, '\0', bucket->original_size); int processed = 0; processed = LZ4_decompress_fast((char *)data_ptr, (char *)decompressed, bucket->original_size); check(processed == bucket->data_size, "Could not decompress data."); if (memcmp(decompressed, ovalue, ovsize) == 0) return ol_jar(db, key, klen, value, vsize); } else { if (memcmp(data_ptr, ovalue, ovsize) == 0) return ol_jar(db, key, klen, value, vsize); } return OL_FAILURE; error: return OL_FAILURE; }
int olt_spoil(ol_transaction *tx, const char *key, size_t klen, struct tm *expiration_date) { char _key[KEY_SIZE] = {'\0'}; size_t _klen = 0; ol_database *operating_db = tx->transaction_db; ol_bucket *bucket = ol_get_bucket(operating_db, key, klen, &_key, &_klen); check(_klen > 0, "Key length of zero not allowed."); if (bucket == NULL && tx->parent_db != NULL) { /* Transaction DB doesn't have this key, but the parent does. */ operating_db = tx->parent_db; bucket = ol_get_bucket(operating_db, key, klen, &_key, &_klen); if (bucket != NULL) { /* Copy that value into our current transaction db, * and then spoil it. */ uint32_t hash; MurmurHash3_x86_32(_key, _klen, DEVILS_SEED, &hash); ol_bucket *copied = malloc(sizeof(ol_bucket)); check_mem(copied); memcpy(copied, bucket, sizeof(ol_bucket)); copied->next = NULL; copied->key = malloc(_klen + 1); check_mem(copied->key); copied->key[_klen] = '\0'; memcpy(copied->key, bucket->key, _klen); _ol_set_bucket_no_incr(tx->transaction_db, copied, hash); } } if (bucket != NULL) { if (bucket->expiration == NULL) bucket->expiration = malloc(sizeof(struct tm)); else debug("Hmmm, bucket->expiration wasn't null."); memcpy(bucket->expiration, expiration_date, sizeof(struct tm)); debug("New expiration time: %lu", (long)mktime(bucket->expiration)); #ifdef DEBUG struct tm utctime; time_t current; /* So dumb */ time(¤t); gmtime_r(¤t, &utctime); current = timegm(&utctime); debug("Current time: %lu", (long)current); #endif if (operating_db->is_enabled(OL_F_APPENDONLY, &operating_db->feature_set) && operating_db->state != OL_S_STARTUP) { ol_aol_write_cmd(operating_db, "SPOIL", bucket); } /* Flag the transaction as dirty. */ tx->dirty = 1; return OL_SUCCESS; } return OL_FAILURE; error: return OL_FAILURE; }
int olt_jar(ol_transaction *tx, const char *key, size_t klen, const unsigned char *value, size_t vsize) { int ret; char _key[KEY_SIZE] = {'\0'}; size_t _klen = 0; ol_database *db = tx->transaction_db; ol_bucket *bucket = ol_get_bucket(db, key, klen, &_key, &_klen); check(_klen > 0, "Key length of zero not allowed."); /* We only want to hit this codepath within the same database, otherwise * weird stuff happens. Like fires and stuff. */ if (bucket != NULL) { /* Flag the transaction as dirty. */ tx->dirty = 1; return _ol_reallocate_bucket(db, bucket, value, vsize); } /* Looks like we don't have an old hash */ ol_bucket *new_bucket = calloc(1, sizeof(ol_bucket)); if (new_bucket == NULL) return OL_FAILURE; /* copy _key into new bucket */ new_bucket->key = malloc(_klen + 1); check_mem(new_bucket->key); new_bucket->key[_klen] = '\0'; if (strncpy(new_bucket->key, _key, _klen) != new_bucket->key) { free(new_bucket); return OL_FAILURE; } new_bucket->klen = _klen; new_bucket->original_size = vsize; /* Compute the new position of the data in the values file: */ const size_t new_offset = db->val_size; if (db->state != OL_S_STARTUP) { unsigned char *new_data_ptr = NULL; if (db->is_enabled(OL_F_LZ4, &db->feature_set)) { /* Compress using LZ4 if enabled */ int maxoutsize = LZ4_compressBound(vsize); _ol_ensure_values_file_size(db, maxoutsize); new_data_ptr = db->values + db->val_size; memset(new_data_ptr, '\0', maxoutsize); /* All these f*****g casts */ size_t cmsize = (size_t)LZ4_compress((char*)value, (char*)new_data_ptr, (int)vsize); if (cmsize == 0) { /* Free allocated data */ free(new_bucket); return OL_FAILURE; } new_bucket->data_size = cmsize; } else { new_bucket->data_size = vsize; _ol_ensure_values_file_size(db, new_bucket->data_size); new_data_ptr = db->values + db->val_size; memset(new_data_ptr, '\0', new_bucket->data_size); if (memcpy(new_data_ptr, value, vsize) != new_data_ptr) { /* Free allocated memory since we're not going to use them */ free(new_bucket); return OL_FAILURE; } } } else { /* We still need to set the data size, but not the actual data. */ if (db->is_enabled(OL_F_LZ4, &db->feature_set)) { /* Since LZ4_compressBound only provides the worst case scenario * and not what the data actually compressed to (we're replaying * the AOL file, remember?) we have to compress it again and grab * the amount of bytes processed. * TODO: This is dumb. Make a function that just sets the bucket size. * This new mythical function should also handle setting the data_offset * of the bucket. */ int maxoutsize = LZ4_compressBound(vsize); char tmp_data[maxoutsize]; /* Don't need to memset tmp_data because I don't care about it. */ size_t cmsize = (size_t)LZ4_compress((char *)value, (char *)tmp_data, (int)vsize); new_bucket->data_size = cmsize; } else { new_bucket->data_size = vsize; } } /* Set the offset of the bucket before we increment it offset globally. */ new_bucket->data_offset = new_offset; /* Remember to increment the tracked data size of the DB. */ db->val_size += new_bucket->data_size; unsigned int bucket_max = ol_ht_bucket_max(db->cur_ht_size); /* TODO: rehash this shit at 80% */ if (db->rcrd_cnt > 0 && db->rcrd_cnt == bucket_max) { debug("Record count is now %i; growing hash table.", db->rcrd_cnt); ret = _ol_grow_and_rehash_db(db); if (ret > 0) { ol_log_msg(LOG_ERR, "Problem rehashing DB. Error code: %i", ret); free(new_bucket); return OL_FAILURE; } } uint32_t hash; MurmurHash3_x86_32(_key, _klen, DEVILS_SEED, &hash); ret = _ol_set_bucket(db, new_bucket, hash); if(ret > 0) ol_log_msg(LOG_ERR, "Problem inserting item: Error code: %i", ret); if(db->is_enabled(OL_F_APPENDONLY, &db->feature_set) && db->state != OL_S_STARTUP) { ol_aol_write_cmd(db, "JAR", new_bucket); } /* Flag the transaction as dirty. */ tx->dirty = 1; return OL_SUCCESS; error: return OL_FAILURE; }