/* * Initialize new Timer * * @since 1.2.0 * * The timers could used to trigger reccuring events or implement timeouts. * The library will call given block after time interval pass. * * @param bucket [Bucket] the connection object * @param interval [Fixnum] the interval in microseconds * @param options [Hash] * @option options [Boolean] :periodic (false) set it to +true+ if the timer * should be triggered until it will be canceled. * * @yieldparam [Timer] timer the current timer * * @example Create regular timer for 0.5 second * c.run do * Couchbase::Timer.new(c, 500000) do * puts "ding-dong" * end * end * * @example Create periodic timer * n = 10 * c.run do * Couchbase::Timer.new(c, 500000, :periodic => true) do |tm| * puts "#{n}" * n -= 1 * tm.cancel if n.zero? * end * end * * * @return [Couchbase::Timer] */ VALUE cb_timer_init(int argc, VALUE *argv, VALUE self) { struct timer_st *tm = DATA_PTR(self); VALUE bucket, opts, timeout, exc, cb; lcb_error_t err; rb_need_block(); rb_scan_args(argc, argv, "21&", &bucket, &timeout, &opts, &cb); if (CLASS_OF(bucket) != cBucket) { rb_raise(rb_eTypeError, "wrong argument type (expected Couchbase::Bucket)"); } tm->self = self; tm->callback = cb; tm->usec = NUM2ULONG(timeout); tm->bucket = DATA_PTR(bucket); if (opts != Qnil) { Check_Type(opts, T_HASH); tm->periodic = RTEST(rb_hash_aref(opts, sym_periodic)); } tm->timer = lcb_timer_create(tm->bucket->handle, tm, tm->usec, tm->periodic, timer_callback, &err); exc = cb_check_error(err, "failed to attach the timer", Qnil); if (exc != Qnil) { rb_exc_raise(exc); } return self; }
void observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_observe_resp_t *resp) { struct context_st *ctx = (struct context_st *)cookie; struct bucket_st *bucket = ctx->bucket; VALUE key, res, *rv = ctx->rv; if (resp->v.v0.key) { key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); ctx->exception = cb_check_error(error, "failed to execute observe request", key); if (ctx->exception) { cb_gc_protect(bucket, ctx->exception); } res = rb_class_new_instance(0, NULL, cResult); rb_ivar_set(res, id_iv_completed, Qfalse); rb_ivar_set(res, id_iv_error, ctx->exception); rb_ivar_set(res, id_iv_operation, sym_observe); rb_ivar_set(res, id_iv_key, key); rb_ivar_set(res, id_iv_cas, ULL2NUM(resp->v.v0.cas)); rb_ivar_set(res, id_iv_from_master, resp->v.v0.from_master ? Qtrue : Qfalse); rb_ivar_set(res, id_iv_time_to_persist, ULONG2NUM(resp->v.v0.ttp)); rb_ivar_set(res, id_iv_time_to_replicate, ULONG2NUM(resp->v.v0.ttr)); switch (resp->v.v0.status) { case LCB_OBSERVE_FOUND: rb_ivar_set(res, id_iv_status, sym_found); break; case LCB_OBSERVE_PERSISTED: rb_ivar_set(res, id_iv_status, sym_persisted); break; case LCB_OBSERVE_NOT_FOUND: rb_ivar_set(res, id_iv_status, sym_not_found); break; default: rb_ivar_set(res, id_iv_status, Qnil); } if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { cb_proc_call(ctx->proc, 1, res); } } else { /* synchronous */ if (NIL_P(ctx->exception)) { VALUE stats = rb_hash_aref(*rv, key); if (NIL_P(stats)) { stats = rb_ary_new(); rb_hash_aset(*rv, key, stats); } rb_ary_push(stats, res); } } } else { if (bucket->async && ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cResult); rb_ivar_set(res, id_iv_completed, Qtrue); cb_proc_call(ctx->proc, 1, res); } ctx->nqueries--; cb_gc_unprotect(bucket, ctx->proc); } (void)handle; }
VALUE cb_bucket_observe(int argc, VALUE *argv, VALUE self) { struct cb_bucket_st *bucket = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, proc, exc; lcb_error_t err; struct cb_params_st params; if (!cb_bucket_connected_bang(bucket, cb_sym_observe)) { return Qnil; } memset(¶ms, 0, sizeof(struct cb_params_st)); rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc); if (!bucket->async && proc != Qnil) { rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks"); } params.type = cb_cmd_observe; params.bucket = bucket; cb_params_build(¶ms); ctx = cb_context_alloc_common(bucket, proc, params.cmd.observe.num); err = lcb_observe(bucket->handle, (const void *)ctx, params.cmd.observe.num, params.cmd.observe.ptr); cb_params_destroy(¶ms); exc = cb_check_error(err, "failed to schedule observe request", Qnil); if (exc != Qnil) { cb_context_free(ctx); rb_exc_raise(exc); } bucket->nbytes += params.npayload; if (bucket->async) { cb_maybe_do_loop(bucket); return Qnil; } else { if (ctx->nqueries > 0) { /* we have some operations pending */ lcb_wait(bucket->handle); } exc = ctx->exception; rv = ctx->rv; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } exc = bucket->exception; if (exc != Qnil) { bucket->exception = Qnil; rb_exc_raise(exc); } if (params.cmd.observe.num > 1 || params.cmd.observe.array) { return rv; /* return as a hash {key => {}, ...} */ } else { VALUE vv = Qnil; rb_hash_foreach(rv, cb_first_value_i, (VALUE)&vv); return vv; /* return first value */ } } }
void arithmetic_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_arithmetic_resp_t *resp) { struct context_st *ctx = (struct context_st *)cookie; struct bucket_st *bucket = ctx->bucket; VALUE cas, key, val, *rv = ctx->rv, exc, res; ID o; ctx->nqueries--; key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); strip_key_prefix(bucket, key); cas = resp->v.v0.cas > 0 ? ULL2NUM(resp->v.v0.cas) : Qnil; o = ctx->arith > 0 ? sym_increment : sym_decrement; exc = cb_check_error(error, "failed to perform arithmetic operation", key); if (exc != Qnil) { rb_ivar_set(exc, id_iv_cas, cas); rb_ivar_set(exc, id_iv_operation, o); if (bucket->async) { if (bucket->on_error_proc != Qnil) { cb_proc_call(bucket->on_error_proc, 3, o, key, exc); } else { if (NIL_P(bucket->exception)) { bucket->exception = exc; } } } if (NIL_P(ctx->exception)) { ctx->exception = cb_gc_protect(bucket, exc); } } val = ULL2NUM(resp->v.v0.value); if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cResult); rb_ivar_set(res, id_iv_error, exc); rb_ivar_set(res, id_iv_operation, o); rb_ivar_set(res, id_iv_key, key); rb_ivar_set(res, id_iv_value, val); rb_ivar_set(res, id_iv_cas, cas); cb_proc_call(ctx->proc, 1, res); } } else { /* synchronous */ if (NIL_P(exc)) { if (ctx->extended) { rb_hash_aset(*rv, key, rb_ary_new3(2, val, cas)); } else { rb_hash_aset(*rv, key, val); } } } if (ctx->nqueries == 0) { cb_gc_unprotect(bucket, ctx->proc); } (void)handle; }
/* * Returns versions of the server for each node in the cluster * * @since 1.1.0 * * @overload version * @yieldparam [Result] ret the object with +error+, +node+, +operation+ * and +value+ attributes. * * @return [Hash] node-version pairs * * @raise [Couchbase::Error::Connect] if connection closed (see {Bucket#reconnect}) * @raise [ArgumentError] when passing the block in synchronous mode * * @example Synchronous version request * c.version #=> will render version * * @example Asynchronous version request * c.run do * c.version do |ret| * ret.operation #=> :version * ret.success? #=> true * ret.node #=> "localhost:11211" * ret.value #=> will render version * end * end */ VALUE cb_bucket_version(int argc, VALUE *argv, VALUE self) { struct cb_bucket_st *bucket = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, exc, proc; lcb_error_t err; struct cb_params_st params; if (!cb_bucket_connected_bang(bucket, cb_sym_version)) { return Qnil; } memset(¶ms, 0, sizeof(struct cb_params_st)); rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc); if (!bucket->async && proc != Qnil) { rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks"); } params.type = cb_cmd_version; params.bucket = bucket; cb_params_build(¶ms); ctx = cb_context_alloc_common(bucket, proc, params.cmd.version.num); err = lcb_server_versions(bucket->handle, (const void *)ctx, params.cmd.version.num, params.cmd.version.ptr); exc = cb_check_error(err, "failed to schedule version request", Qnil); cb_params_destroy(¶ms); if (exc != Qnil) { cb_context_free(ctx); rb_exc_raise(exc); } bucket->nbytes += params.npayload; if (bucket->async) { cb_maybe_do_loop(bucket); return Qnil; } else { if (ctx->nqueries > 0) { /* we have some operations pending */ lcb_wait(bucket->handle); } exc = ctx->exception; rv = ctx->rv; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } exc = bucket->exception; if (exc != Qnil) { bucket->exception = Qnil; rb_exc_raise(exc); } return rv; } }
void cb_storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation, lcb_error_t error, const lcb_store_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE key, cas, exc, res; key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); cb_strip_key_prefix(bucket, key); cas = resp->v.v0.cas > 0 ? ULL2NUM(resp->v.v0.cas) : Qnil; ctx->operation = storage_opcode_to_sym(operation); exc = cb_check_error(error, "failed to store value", key); if (exc != Qnil) { rb_ivar_set(exc, cb_id_iv_cas, cas); rb_ivar_set(exc, cb_id_iv_operation, ctx->operation); ctx->exception = exc; } if (bucket->async) { /* asynchronous */ if (RTEST(ctx->observe_options)) { VALUE args[2]; /* it's ok to pass pointer to stack struct here */ args[0] = rb_hash_new(); rb_hash_aset(args[0], key, cas); args[1] = ctx->observe_options; rb_block_call(bucket->self, cb_id_observe_and_wait, 2, args, storage_observe_callback, (VALUE)ctx); ctx->observe_options = Qnil; } else if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, exc); rb_ivar_set(res, cb_id_iv_key, key); rb_ivar_set(res, cb_id_iv_operation, ctx->operation); rb_ivar_set(res, cb_id_iv_cas, cas); cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ rb_hash_aset(ctx->rv, key, cas); } if (!RTEST(ctx->observe_options)) { ctx->nqueries--; if (ctx->nqueries == 0) { ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } } (void)handle; }
/* * Execute {Bucket::CouchRequest} * * @since 1.2.0 */ VALUE cb_http_request_perform(VALUE self) { struct http_request_st *req = DATA_PTR(self); struct context_st *ctx; VALUE rv, exc; lcb_error_t err; struct bucket_st *bucket; ctx = xcalloc(1, sizeof(struct context_st)); if (ctx == NULL) { rb_raise(eClientNoMemoryError, "failed to allocate memory"); } rv = Qnil; ctx->rv = &rv; ctx->bucket = bucket = req->bucket; ctx->proc = rb_block_given_p() ? rb_block_proc() : req->on_body_callback; ctx->extended = req->extended; ctx->request = req; ctx->headers_val = cb_gc_protect(bucket, rb_hash_new()); err = lcb_make_http_request(bucket->handle, (const void *)ctx, req->type, &req->cmd, &req->request); exc = cb_check_error(err, "failed to schedule document request", STR_NEW(req->cmd.v.v0.path, req->cmd.v.v0.npath)); if (exc != Qnil) { xfree(ctx); rb_exc_raise(exc); } req->running = 1; req->ctx = ctx; if (bucket->async) { return Qnil; } else { lcb_wait(bucket->handle); if (req->completed) { exc = ctx->exception; xfree(ctx); if (exc != Qnil) { cb_gc_unprotect(bucket, exc); rb_exc_raise(exc); } return rv; } else { return Qnil; } } return Qnil; }
/* * Execute {Bucket::CouchRequest} * * @since 1.2.0 */ VALUE cb_http_request_perform(VALUE self) { struct cb_http_request_st *req = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, exc; lcb_error_t err; struct cb_bucket_st *bucket = req->bucket; if (bucket->handle == NULL) { rb_raise(cb_eConnectError, "closed connection"); } ctx = cb_context_alloc(bucket); ctx->rv = Qnil; ctx->proc = rb_block_given_p() ? rb_block_proc() : req->on_body_callback; ctx->extended = req->extended; ctx->request = req; ctx->headers_val = rb_hash_new(); err = lcb_make_http_request(bucket->handle, (const void *)ctx, req->type, &req->cmd, &req->request); exc = cb_check_error(err, "failed to schedule document request", STR_NEW(req->cmd.v.v0.path, req->cmd.v.v0.npath)); if (exc != Qnil) { lcb_cancel_http_request(bucket->handle, req->request); rb_exc_raise(exc); } req->running = 1; req->ctx = ctx; if (bucket->async) { return Qnil; } else { lcb_wait(bucket->handle); if (req->completed) { rv = ctx->rv; exc = ctx->exception; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } return rv; } else { return Qnil; } } return Qnil; }
void cb_version_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_server_version_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE node, val, exc, res; node = resp->v.v0.server_endpoint ? STR_NEW_CSTR(resp->v.v0.server_endpoint) : Qnil; exc = cb_check_error(error, "failed to get version", node); if (exc != Qnil) { rb_ivar_set(exc, cb_id_iv_operation, cb_sym_version); ctx->exception = exc; } if (node != Qnil) { val = STR_NEW((const char*)resp->v.v0.vstring, resp->v.v0.nvstring); if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, exc); rb_ivar_set(res, cb_id_iv_operation, cb_sym_version); rb_ivar_set(res, cb_id_iv_node, node); rb_ivar_set(res, cb_id_iv_value, val); cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ if (NIL_P(exc)) { rb_hash_aset(ctx->rv, node, val); } } } else { ctx->nqueries--; ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } (void)handle; }
void cb_unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_unlock_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE key, exc = Qnil, res; ctx->nqueries--; key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); cb_strip_key_prefix(bucket, key); if (error != LCB_KEY_ENOENT || !ctx->quiet) { exc = cb_check_error(error, "failed to unlock value", key); if (exc != Qnil) { rb_ivar_set(exc, cb_id_iv_operation, cb_sym_unlock); ctx->exception = exc; } } if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, exc); rb_ivar_set(res, cb_id_iv_operation, cb_sym_unlock); rb_ivar_set(res, cb_id_iv_key, key); cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ rb_hash_aset(ctx->rv, key, (error == LCB_SUCCESS) ? Qtrue : Qfalse); } if (ctx->nqueries == 0) { ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } (void)handle; }
/* * Obtain an object stored in Couchbase by given key. * * @since 1.0.0 * * @see http://couchbase.com/docs/couchbase-manual-2.0/couchbase-architecture-apis-memcached-protocol-additions.html#couchbase-architecture-apis-memcached-protocol-additions-getl * * @overload get(*keys, options = {}) * @param keys [String, Symbol, Array] One or several keys to fetch * @param options [Hash] Options for operation. * @option options [true, false] :extended (false) If set to +true+, the * operation will return a tuple +[value, flags, cas]+, otherwise (by * default) it returns just the value. * @option options [Fixnum] :ttl (self.default_ttl) Expiry time for key. * Values larger than 30*24*60*60 seconds (30 days) are interpreted as * absolute times (from the epoch). * @option options [true, false] :quiet (self.quiet) If set to +true+, the * operation won't raise error for missing key, it will return +nil+. * Otherwise it will raise error in synchronous mode. In asynchronous * mode this option ignored. * @option options [Symbol] :format (nil) Explicitly choose the decoder * for this key (+:plain+, +:document+, +:marshal+). See * {Bucket#default_format}. * @option options [Fixnum, Boolean] :lock Lock the keys for time span. * If this parameter is +true+ the key(s) will be locked for default * timeout. Also you can use number to setup your own timeout in * seconds. If it will be lower that zero or exceed the maximum, the * server will use default value. You can determine actual default and * maximum values calling {Bucket#stats} without arguments and * inspecting keys "ep_getl_default_timeout" and "ep_getl_max_timeout" * correspondingly. See overloaded hash syntax to specify custom timeout * per each key. * @option options [true, false] :assemble_hash (false) Assemble Hash for * results. Hash assembled automatically if +:extended+ option is true * or in case of "get and touch" multimple keys. * @option options [true, false, :all, :first, Fixnum] :replica * (false) Read key from replica node. Options +:ttl+ and +:lock+ * are not compatible with +:replica+. Value +true+ is a synonym to * +:first+, which means sequentially iterate over all replicas * and return first successful response, skipping all failures. * It is also possible to query all replicas in parallel using * the +:all+ option, or pass a replica index, starting from zero. * * @yieldparam ret [Result] the result of operation in asynchronous mode * (valid attributes: +error+, +operation+, +key+, +value+, +flags+, * +cas+). * * @return [Object, Array, Hash] the value(s) (or tuples in extended mode) * associated with the key. * * @raise [Couchbase::Error::NotFound] if the key is missing in the * bucket. * * @raise [Couchbase::Error::Connect] if connection closed (see {Bucket#reconnect}) * * @raise [ArgumentError] when passing the block in synchronous mode * * @example Get single value in quiet mode (the default) * c.get("foo") #=> the associated value or nil * * @example Use alternative hash-like syntax * c["foo"] #=> the associated value or nil * * @example Get single value in verbose mode * c.get("missing-foo", :quiet => false) #=> raises Couchbase::NotFound * c.get("missing-foo", :quiet => true) #=> returns nil * * @example Get and touch single value. The key won't be accessible after 10 seconds * c.get("foo", :ttl => 10) * * @example Extended get * val, flags, cas = c.get("foo", :extended => true) * * @example Get multiple keys * c.get("foo", "bar", "baz") #=> [val1, val2, val3] * * @example Get multiple keys with assembing result into the Hash * c.get("foo", "bar", "baz", :assemble_hash => true) * #=> {"foo" => val1, "bar" => val2, "baz" => val3} * * @example Extended get multiple keys * c.get("foo", "bar", :extended => true) * #=> {"foo" => [val1, flags1, cas1], "bar" => [val2, flags2, cas2]} * * @example Asynchronous get * c.run do * c.get("foo", "bar", "baz") do |res| * ret.operation #=> :get * ret.success? #=> true * ret.key #=> "foo", "bar" or "baz" in separate calls * ret.value * ret.flags * ret.cas * end * end * * @example Get and lock key using default timeout * c.get("foo", :lock => true) * * @example Determine lock timeout parameters * c.stats.values_at("ep_getl_default_timeout", "ep_getl_max_timeout") * #=> [{"127.0.0.1:11210"=>"15"}, {"127.0.0.1:11210"=>"30"}] * * @example Get and lock key using custom timeout * c.get("foo", :lock => 3) * * @example Get and lock multiple keys using custom timeout * c.get("foo", "bar", :lock => 3) * * @overload get(keys, options = {}) * When the method receive hash map, it will behave like it receive list * of keys (+keys.keys+), but also touch each key setting expiry time to * the corresponding value. But unlike usual get this command always * return hash map +{key => value}+ or +{key => [value, flags, cas]}+. * * @param keys [Hash] Map key-ttl * @param options [Hash] Options for operation. (see options definition * above) * * @return [Hash] the values (or tuples in extended mode) associated with * the keys. * * @example Get and touch multiple keys * c.get("foo" => 10, "bar" => 20) #=> {"foo" => val1, "bar" => val2} * * @example Extended get and touch multiple keys * c.get({"foo" => 10, "bar" => 20}, :extended => true) * #=> {"foo" => [val1, flags1, cas1], "bar" => [val2, flags2, cas2]} * * @example Get and lock multiple keys for chosen period in seconds * c.get("foo" => 10, "bar" => 20, :lock => true) * #=> {"foo" => val1, "bar" => val2} */ VALUE cb_bucket_get(int argc, VALUE *argv, VALUE self) { struct cb_bucket_st *bucket = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, proc, exc; size_t ii; lcb_error_t err = LCB_SUCCESS; struct cb_params_st params; if (!cb_bucket_connected_bang(bucket, cb_sym_get)) { return Qnil; } memset(¶ms, 0, sizeof(struct cb_params_st)); rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc); if (!bucket->async && proc != Qnil) { rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks"); } params.type = cb_cmd_get; params.bucket = bucket; params.cmd.get.keys_ary = rb_ary_new(); cb_params_build(¶ms); ctx = cb_context_alloc_common(bucket, proc, params.cmd.get.num); ctx->extended = params.cmd.get.extended; ctx->quiet = params.cmd.get.quiet; ctx->transcoder = params.cmd.get.transcoder; ctx->transcoder_opts = params.cmd.get.transcoder_opts; if (RTEST(params.cmd.get.replica)) { if (params.cmd.get.replica == cb_sym_all) { ctx->nqueries = lcb_get_num_replicas(bucket->handle); ctx->all_replicas = 1; } err = lcb_get_replica(bucket->handle, (const void *)ctx, params.cmd.get.num, params.cmd.get.ptr_gr); } else { err = lcb_get(bucket->handle, (const void *)ctx, params.cmd.get.num, params.cmd.get.ptr); } cb_params_destroy(¶ms); exc = cb_check_error(err, "failed to schedule get request", Qnil); if (exc != Qnil) { cb_context_free(ctx); rb_exc_raise(exc); } bucket->nbytes += params.npayload; if (bucket->async) { cb_maybe_do_loop(bucket); return Qnil; } else { if (ctx->nqueries > 0) { /* we have some operations pending */ lcb_wait(bucket->handle); } exc = ctx->exception; rv = ctx->rv; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } exc = bucket->exception; if (exc != Qnil) { bucket->exception = Qnil; rb_exc_raise(exc); } if (params.cmd.get.gat || params.cmd.get.assemble_hash || (params.cmd.get.extended && (params.cmd.get.num > 1 || params.cmd.get.array))) { return rv; /* return as a hash {key => [value, flags, cas], ...} */ } if (params.cmd.get.num > 1 || params.cmd.get.array) { VALUE keys, ret; ret = rb_ary_new(); /* make sure ret is guarded so not invisible in a register * when stack scanning */ RB_GC_GUARD(ret); keys = params.cmd.get.keys_ary; for (ii = 0; ii < params.cmd.get.num; ++ii) { rb_ary_push(ret, rb_hash_aref(rv, rb_ary_entry(keys, ii))); } return ret; /* return as an array [value1, value2, ...] */ } else { VALUE vv = Qnil; rb_hash_foreach(rv, cb_first_value_i, (VALUE)&vv); return vv; } } }
void cb_get_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_get_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE key, val, flags, cas, exc = Qnil, res, raw; ctx->nqueries--; key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); cb_strip_key_prefix(bucket, key); if (error != LCB_KEY_ENOENT || !ctx->quiet) { exc = cb_check_error(error, "failed to get value", key); if (exc != Qnil) { rb_ivar_set(exc, cb_id_iv_operation, cb_sym_get); ctx->exception = exc; } } flags = ULONG2NUM(resp->v.v0.flags); cas = ULL2NUM(resp->v.v0.cas); raw = STR_NEW((const char*)resp->v.v0.bytes, resp->v.v0.nbytes); val = cb_decode_value(ctx->transcoder, raw, resp->v.v0.flags, ctx->transcoder_opts); if (rb_obj_is_kind_of(val, rb_eStandardError)) { VALUE exc_str = rb_funcall(val, cb_id_to_s, 0); VALUE msg = rb_funcall(rb_mKernel, cb_id_sprintf, 3, rb_str_new2("unable to convert value for key \"%s\": %s"), key, exc_str); ctx->exception = rb_exc_new3(cb_eValueFormatError, msg); rb_ivar_set(ctx->exception, cb_id_iv_operation, cb_sym_get); rb_ivar_set(ctx->exception, cb_id_iv_key, key); rb_ivar_set(ctx->exception, cb_id_iv_inner_exception, val); val = Qnil; } if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, exc); rb_ivar_set(res, cb_id_iv_operation, cb_sym_get); rb_ivar_set(res, cb_id_iv_key, key); rb_ivar_set(res, cb_id_iv_value, val); rb_ivar_set(res, cb_id_iv_flags, flags); rb_ivar_set(res, cb_id_iv_cas, cas); cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ if (NIL_P(exc) && error != LCB_KEY_ENOENT) { if (ctx->extended) { val = rb_ary_new3(3, val, flags, cas); } if (ctx->all_replicas) { VALUE ary = rb_hash_aref(ctx->rv, key); if (NIL_P(ary)) { ary = rb_ary_new(); rb_hash_aset(ctx->rv, key, ary); } rb_ary_push(ary, val); } else { rb_hash_aset(ctx->rv, key, val); } } } if (ctx->nqueries == 0) { ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } (void)handle; }
static inline VALUE cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self) { struct cb_bucket_st *bucket = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, proc, exc, obs = Qnil; lcb_error_t err; struct cb_params_st params; if (!cb_bucket_connected_bang(bucket, storage_opcode_to_sym(cmd))) { return Qnil; } memset(¶ms, 0, sizeof(struct cb_params_st)); rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc); if (!bucket->async && proc != Qnil) { rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks"); } params.type = cb_cmd_store; params.bucket = bucket; params.cmd.store.operation = cmd; cb_params_build(¶ms); obs = params.cmd.store.observe; ctx = cb_context_alloc(bucket); if (!bucket->async) { ctx->rv = rb_hash_new(); ctx->observe_options = obs; } ctx->proc = proc; ctx->nqueries = params.cmd.store.num; err = lcb_store(bucket->handle, (const void *)ctx, params.cmd.store.num, params.cmd.store.ptr); cb_params_destroy(¶ms); exc = cb_check_error(err, "failed to schedule set request", Qnil); if (exc != Qnil) { cb_context_free(ctx); rb_exc_raise(exc); } bucket->nbytes += params.npayload; if (bucket->async) { cb_maybe_do_loop(bucket); return Qnil; } else { if (ctx->nqueries > 0) { /* we have some operations pending */ lcb_wait(bucket->handle); } exc = ctx->exception; rv = ctx->rv; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } exc = bucket->exception; if (exc != Qnil) { bucket->exception = Qnil; rb_exc_raise(exc); } if (RTEST(obs)) { rv = rb_funcall(bucket->self, cb_id_observe_and_wait, 2, rv, obs); } if (params.cmd.store.num > 1) { return rv; /* return as a hash {key => cas, ...} */ } else { VALUE vv = Qnil; rb_hash_foreach(rv, cb_first_value_i, (VALUE)&vv); return vv; } } }
static inline VALUE cb_bucket_arithmetic(int sign, int argc, VALUE *argv, VALUE self) { struct bucket_st *bucket = DATA_PTR(self); struct context_st *ctx; VALUE args, rv, proc, exc; lcb_error_t err; struct params_st params; if (bucket->handle == NULL) { rb_raise(eConnectError, "closed connection"); } rb_scan_args(argc, argv, "0*&", &args, &proc); if (!bucket->async && proc != Qnil) { rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks"); } memset(¶ms, 0, sizeof(struct params_st)); params.type = cmd_arith; params.bucket = bucket; params.cmd.arith.sign = sign; cb_params_build(¶ms, RARRAY_LEN(args), args); ctx = xcalloc(1, sizeof(struct context_st)); if (ctx == NULL) { rb_raise(eClientNoMemoryError, "failed to allocate memory for context"); } rv = rb_hash_new(); ctx->rv = &rv; ctx->bucket = bucket; ctx->proc = cb_gc_protect(bucket, proc); ctx->exception = Qnil; ctx->nqueries = params.cmd.arith.num; err = lcb_arithmetic(bucket->handle, (const void *)ctx, params.cmd.arith.num, params.cmd.arith.ptr); cb_params_destroy(¶ms); exc = cb_check_error(err, "failed to schedule arithmetic request", Qnil); if (exc != Qnil) { xfree(ctx); rb_exc_raise(exc); } bucket->nbytes += params.npayload; if (bucket->async) { maybe_do_loop(bucket); return Qnil; } else { if (ctx->nqueries > 0) { /* we have some operations pending */ lcb_wait(bucket->handle); } exc = ctx->exception; xfree(ctx); if (exc != Qnil) { cb_gc_unprotect(bucket, exc); rb_exc_raise(exc); } if (params.cmd.store.num > 1) { return rv; /* return as a hash {key => cas, ...} */ } else { VALUE vv = Qnil; rb_hash_foreach(rv, cb_first_value_i, (VALUE)&vv); return vv; } return rv; } }