Пример #1
0
/**
 * @brief      Test whether multiple records exist in the cluster.
 *
 * @param[in]  argc  The argc
 * @param      argv  The argv
 * @param[in]  self  The object
 *
 * @return     { description_of_the_return_value }
 */
static VALUE batch_read(int argc, VALUE * argv, VALUE self) {
  rb_aero_TIMED(tm);

  as_error err;
  as_status status;
  aerospike * as = rb_aero_CLIENT;

  VALUE keys;
  VALUE options;

  rb_scan_args(argc, argv, "11", &keys, &options);

  long keys_len = rb_ary_len_long(keys);

  as_batch batch;
  as_batch_inita(&batch, keys_len);

  VALUE return_data = rb_hash_new();

  for (long i = 0; i < keys_len; ++i) {
    VALUE element = rb_ary_entry(keys, i);

    // set into hash for return values
    rb_hash_aset(return_data, element, Qfalse);

    VALUE tmp = rb_funcall(element, rb_intern("namespace"), 0);
    char * c_namespace = StringValueCStr( tmp );

    tmp = rb_funcall(element, rb_intern("set"), 0);
    char * c_set = StringValueCStr( tmp );

    tmp = rb_funcall(element, rb_intern("key"), 0);

    if ( TYPE(tmp) != T_FIXNUM ) {
      char * c_key = StringValueCStr( tmp );
      as_key_init(as_batch_keyat(&batch,i), c_namespace, c_set, c_key);
    }
    else {
      as_key_init_int64(as_batch_keyat(&batch,i), c_namespace, c_set, FIX2LONG(tmp));
    }
  }

  if ( aerospike_batch_get(as, &err, NULL, &batch, batch_read_callback, return_data) != AEROSPIKE_OK ) {
    as_batch_destroy(&batch);
    raise_as_error(err);
  }

  as_batch_destroy(&batch);

  rb_aero_logger(AS_LOG_LEVEL_DEBUG, &tm, 1, rb_str_new2("[Client][batch_read] success"));

  return return_data;
}
/**
 *******************************************************************************************************
 * This function will get a batch of records from the Aeropike DB.
 *
 * @param err                   as_error object
 * @param self                  AerospikeClient object
 * @param py_keys               The list of keys
 * @param batch_policy_p        as_policy_batch object
 *
 * Returns the record if key exists otherwise NULL.
 *******************************************************************************************************
 */
static PyObject * batch_select_aerospike_batch_get(as_error *err, AerospikeClient * self, PyObject *py_keys, as_policy_batch * batch_policy_p, char **filter_bins, Py_ssize_t bins_size)
{
	PyObject * py_recs = NULL;

	as_batch batch;
	bool batch_initialised = false;

	LocalData data;
	data.client = self;
	// Convert python keys list to as_key ** and add it to as_batch.keys
	// keys can be specified in PyList or PyTuple
	if ( py_keys != NULL && PyList_Check(py_keys) ) {
		Py_ssize_t size = PyList_Size(py_keys);

		py_recs = PyList_New(size);
		data.py_recs = py_recs;
		as_batch_init(&batch, size);

		// Batch object initialised
		batch_initialised = true;

		for ( int i = 0; i < size; i++ ) {

			PyObject * py_key = PyList_GetItem(py_keys, i);

			if ( !PyTuple_Check(py_key) ){
				as_error_update(err, AEROSPIKE_ERR_PARAM, "Key should be a tuple.");
				goto CLEANUP;
			}

			pyobject_to_key(err, py_key, as_batch_keyat(&batch, i));

			if ( err->code != AEROSPIKE_OK ) {
				goto CLEANUP;
			}
		}
	}
	else if ( py_keys != NULL && PyTuple_Check(py_keys) ) {
		Py_ssize_t size = PyTuple_Size(py_keys);

		py_recs = PyList_New(size);
		data.py_recs = py_recs;
		as_batch_init(&batch, size);
		// Batch object initialised
		batch_initialised = true;

		for ( int i = 0; i < size; i++ ) {
			PyObject * py_key = PyTuple_GetItem(py_keys, i);

			if ( !PyTuple_Check(py_key) ){
				as_error_update(err, AEROSPIKE_ERR_PARAM, "Key should be a tuple.");
				goto CLEANUP;
			}

			pyobject_to_key(err, py_key, as_batch_keyat(&batch, i));

			if ( err->code != AEROSPIKE_OK ) {
				goto CLEANUP;
			}
		}
	}
	else {
		as_error_update(err, AEROSPIKE_ERR_PARAM, "Keys should be specified as a list or tuple.");
		goto CLEANUP;
	}

	// Invoke C-client API
	Py_BEGIN_ALLOW_THREADS
	aerospike_batch_get_bins(self->as, err, batch_policy_p,
		&batch, (const char **) filter_bins, bins_size,
		(aerospike_batch_read_callback) batch_select_cb,
		&data);
	Py_END_ALLOW_THREADS

CLEANUP:
	if (batch_initialised == true){
		// We should destroy batch object as we are using 'as_batch_init' for initialisation
		// Also, pyobject_to_key is soing strdup() in case of Unicode. So, object destruction
		// is necessary.
		as_batch_destroy(&batch);
	}

	return py_recs;
}
Пример #3
0
static as_status batch_read(
		aerospike * as, as_error * err, const as_policy_batch * policy,
		const as_batch * batch,
		aerospike_batch_read_callback callback, void * udata,
		bool get_bin_data
		)
{
	as_error_reset(err);

	// Lazily initialize batch machinery:
	cl_cluster_batch_init(as->cluster);

	uint32_t n = batch->keys.size;
	as_batch_read* results = (as_batch_read*)alloca(sizeof(as_batch_read) * n);

	if (! results) {
		return as_error_update(err, AEROSPIKE_ERR_CLIENT,
				"failed results array allocation");
	}

	cf_digest* digests = (cf_digest*)alloca(sizeof(cf_digest) * n);

	if (! digests) {
		return as_error_update(err, AEROSPIKE_ERR_CLIENT,
				"failed digests array allocation");
	}

	// Because we're wrapping the old functionality, we only support a batch
	// with all keys in the same namespace.
	char* ns = batch->keys.entries[0].ns;

	for (uint32_t i = 0; i < n; i++) {
		if (strcmp(ns, batch->keys.entries[i].ns) != 0) {
			// Don't need to destroy results' records since they won't have any
			// associated allocations yet.
			return as_error_update(err, AEROSPIKE_ERR_PARAM,
					"batch keys must all be in the same namespace");
		}

		as_batch_read * p_r = &results[i];

		p_r->result = -1; // TODO - make an 'undefined' error
		as_record_init(&p_r->record, 0);
		p_r->key = (const as_key*)as_batch_keyat(batch, i);

		memcpy(&digests[i], as_key_digest((as_key*)p_r->key)->value,
				AS_DIGEST_VALUE_SIZE);
	}

	batch_bridge bridge;
	bridge.as = as;
	bridge.results = results;
	bridge.n = n;

	cl_rv rc = citrusleaf_batch_read(as->cluster, ns, digests, n, NULL, 0,
			get_bin_data, cl_batch_cb, &bridge);

	callback(results, n, udata);

	for (uint32_t i = 0; i < n; i++) {
		as_record_destroy(&results[i].record);
	}

	return as_error_fromrc(err, rc);
}
/**
 *********************************************************************
 * This function will invoke aerospike_batch_get_bins to get filtered
 * bins from all the records in a batches.
 *
 * @param self                    AerospikeClient object
 * @param py_keys                 List of keys passed on by user
 * @param py_bins                 List of filter bins passed on by user
 * @param py_policy               User specified Policy dictionary
 *
 *********************************************************************
 **/
	static
PyObject * AerospikeClient_Select_Many_Invoke(
		AerospikeClient * self,
		PyObject * py_keys, PyObject * py_bins, PyObject * py_policy)
{
	// Python Return Value
	PyObject * py_recs = PyDict_New();

	// Aerospike Client Arguments
	as_error err;
	as_batch batch;
	as_policy_batch policy;
	as_policy_batch * batch_policy_p = NULL;
	Py_ssize_t bins_size = 0;
	char **filter_bins = NULL;

	// Unicode object's pool
	UnicodePyObjects u_objs;
	u_objs.size = 0;
	int i = 0;

	// Initialisation flags
	bool batch_initialised = false;

	// Initialize error
	as_error_init(&err);

	if (!self || !self->as) {
		as_error_update(&err, AEROSPIKE_ERR_PARAM, "Invalid aerospike object");
		goto CLEANUP;
	}

	if (!self->is_conn_16) {
		as_error_update(&err, AEROSPIKE_ERR_CLUSTER, "No connection to aerospike cluster");
		goto CLEANUP;
	}

	// Convert python keys list to as_key ** and add it to as_batch.keys
	// keys can be specified in PyList or PyTuple
	if ( py_keys != NULL && PyList_Check(py_keys) ) {
		Py_ssize_t size = PyList_Size(py_keys);

		as_batch_init(&batch, size);
		// Batch object initialised
		batch_initialised = true;

		for ( i = 0; i < size; i++ ) {

			PyObject * py_key = PyList_GetItem(py_keys, i);

			if ( !PyTuple_Check(py_key) ){
				as_error_update(&err, AEROSPIKE_ERR_PARAM, "Key should be a tuple.");
				goto CLEANUP;
			}

			pyobject_to_key(&err, py_key, as_batch_keyat(&batch, i));

			if ( err.code != AEROSPIKE_OK ) {
				goto CLEANUP;
			}
		}
	}
	else if ( py_keys != NULL && PyTuple_Check(py_keys) ) {
		Py_ssize_t size = PyTuple_Size(py_keys);

		as_batch_init(&batch, size);
		// Batch object initialised.
		batch_initialised = true;

		for ( i = 0; i < size; i++ ) {
			PyObject * py_key = PyTuple_GetItem(py_keys, i);

			if ( !PyTuple_Check(py_key) ){
				as_error_update(&err, AEROSPIKE_ERR_PARAM, "Key should be a tuple.");
				goto CLEANUP;
			}

			pyobject_to_key(&err, py_key, as_batch_keyat(&batch, i));

			if ( err.code != AEROSPIKE_OK ) {
				goto CLEANUP;
			}
		}
	}
	else {
		as_error_update(&err, AEROSPIKE_ERR_PARAM, "Keys should be specified as a list or tuple.");
		goto CLEANUP;
	}

	// Check the type of bins and get it's size
	// i.e. number of bins provided
	if (py_bins != NULL && PyList_Check(py_bins)){
		bins_size    = PyList_Size(py_bins);
	}
	else if (py_bins != NULL && PyTuple_Check(py_bins)){
		bins_size    = PyTuple_Size(py_bins);
	}
	else {
		as_error_update(&err, AEROSPIKE_ERR_PARAM, "Filter bins should be specified as a list or tuple.");
		goto CLEANUP;
	}

	filter_bins = (char **)malloc(sizeof(long int) * bins_size);

	for (i = 0; i < bins_size; i++){
		PyObject *py_bin = NULL;
		if(PyList_Check(py_bins)){
			py_bin = PyList_GetItem(py_bins, i);
		}
		if(PyTuple_Check(py_bins)){
			py_bin = PyTuple_GetItem(py_bins, i);
		}
		if (PyUnicode_Check(py_bin)){
			// Store the unicode object into a pool
			// It is DECREFed at later stages
			// So, no need of DECREF here.
			filter_bins[i] = PyString_AsString(
					store_unicode_bins(&u_objs, PyUnicode_AsUTF8String(py_bin)));
		}
		else if (PyString_Check(py_bin)){
			filter_bins[i]    = PyString_AsString(py_bin);
		}
		else{
			as_error_update(&err, AEROSPIKE_ERR_PARAM, "Bin name should be a string or unicode string.");
			goto CLEANUP;
		}
	}

	// Convert python policy object to as_policy_batch
	pyobject_to_policy_batch(&err, py_policy, &policy, &batch_policy_p,
			&self->as->config.policies.batch);
	if ( err.code != AEROSPIKE_OK ) {
		goto CLEANUP;
	}

	// Invoke C-client API
	aerospike_batch_get_bins(self->as, &err, batch_policy_p,
		&batch, (const char **) filter_bins, bins_size,
		(aerospike_batch_read_callback) batch_select_cb,
		py_recs);

CLEANUP:

	if (filter_bins != NULL){
		free(filter_bins);
	}

	// DECREFed all the unicode objects stored in Pool
	for ( i = 0; i< u_objs.size; i++){
		Py_DECREF(u_objs.ob[i]);
	}

	if (batch_initialised == true){
		// We should destroy batch object as we are using 'as_batch_init' for initialisation
		// Also, pyobject_to_key is soing strdup() in case of Unicode. So, object destruction
		// is necessary.
		as_batch_destroy(&batch);
	}

	if ( err.code != AEROSPIKE_OK ) {
		PyObject * py_err = NULL;
		error_to_pyobject(&err, &py_err);
		PyObject *exception_type = raise_exception(&err);
		if(PyObject_HasAttrString(exception_type, "key")) {
			PyObject_SetAttrString(exception_type, "key", py_keys);
		} 
		if(PyObject_HasAttrString(exception_type, "bin")) {
			PyObject_SetAttrString(exception_type, "bin", Py_None);
		}
		PyErr_SetObject(exception_type, py_err);
		Py_DECREF(py_err);
		return NULL;
	}
	return py_recs;
}
Пример #5
0
int
main(int argc, char* argv[])
{
	// Parse command line arguments.
	if (! example_get_opts(argc, argv, EXAMPLE_MULTI_KEY_OPTS)) {
		exit(-1);
	}

	// Connect to the aerospike database cluster.
	aerospike as;
	example_connect_to_aerospike(&as);

	// Start clean.
	example_remove_test_records(&as);

	if (! insert_records(&as)) {
		cleanup(&as);
		exit(-1);
	}

	as_error err;

	// Make a batch of all the keys we inserted.
	as_batch batch;
	as_batch_inita(&batch, g_n_keys);

	uint32_t i;
	for (i = 0; i < g_n_keys; i++) {
		as_key_init_int64(as_batch_keyat(&batch, i), g_namespace, g_set,
				(int64_t)i);
	}

	// Check existence of these keys - they should all be there.
	if (aerospike_batch_exists(&as, &err, NULL, &batch, batch_read_cb, NULL) !=
			AEROSPIKE_OK) {
		LOG("aerospike_batch_exists() returned %d - %s", err.code, err.message);
		cleanup(&as);
		exit(-1);
	}

	LOG("batch exists call completed");

	// Get all of these keys - they should all be there.
	if (aerospike_batch_get(&as, &err, NULL, &batch, batch_read_cb, NULL) !=
			AEROSPIKE_OK) {
		LOG("aerospike_batch_get() returned %d - %s", err.code, err.message);
		cleanup(&as);
		exit(-1);
	}

	LOG("batch get call completed");

	// Delete some records in the middle.
	uint32_t n_to_delete = g_n_keys / 5;

	if (n_to_delete == 0) {
		n_to_delete = 1;
	}

	uint32_t n_start = g_n_keys / 2;

	if (n_start + n_to_delete > g_n_keys) {
		n_start = 0;
	}

	uint32_t n_end = n_start + n_to_delete;

	for (i = n_start; i < n_end; i++) {
		// No need to destroy a stack as_key object, if we only use
		// as_key_init_int64().
		as_key key;
		as_key_init_int64(&key, g_namespace, g_set, (int64_t)i);

		if (aerospike_key_remove(&as, &err, NULL, &key) != AEROSPIKE_OK) {
			LOG("aerospike_key_remove() returned %d - %s", err.code,
					err.message);
			cleanup(&as);
			exit(-1);
		}
	}

	LOG("deleted records %u ... %u", n_start, n_end - 1);

	// Check existence of these keys - some should not be found.
	if (aerospike_batch_exists(&as, &err, NULL, &batch, batch_read_cb, NULL) !=
			AEROSPIKE_OK) {
		LOG("aerospike_batch_exists() returned %d - %s", err.code, err.message);
		cleanup(&as);
		exit(-1);
	}

	LOG("second batch exists call completed");

	// Get all of these keys - some should not be found.
	if (aerospike_batch_get(&as, &err, NULL, &batch, batch_read_cb, NULL) !=
			AEROSPIKE_OK) {
		LOG("aerospike_batch_get() returned %d - %s", err.code, err.message);
		cleanup(&as);
		exit(-1);
	}

	LOG("second batch get call completed");

	// Cleanup and disconnect from the database cluster.
	cleanup(&as);

	LOG("batch get example successfully completed");

	return 0;
}