void cb_http_complete_callback(lcb_http_request_t request, lcb_t handle, const void *cookie, lcb_error_t error, const lcb_http_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE key, val, res, exc; lcb_http_status_t status; ctx->request->completed = 1; if (bucket->destroying) { cb_context_free(ctx); return; } key = STR_NEW((const char*)resp->v.v0.path, resp->v.v0.npath); val = resp->v.v0.nbytes ? STR_NEW((const char*)resp->v.v0.bytes, resp->v.v0.nbytes) : Qnil; exc = ctx->exception; if (!RTEST(exc)) { exc = cb_check_error_with_status(error, "failed to execute HTTP request", key, resp->v.v0.status); if (exc != Qnil && val != Qnil) { rb_ivar_set(exc, cb_id_iv_body, val); } } if (RTEST(exc)) { if (rb_obj_is_kind_of(exc, cb_eHTTPError)) { rb_funcall(exc, cb_id_parse_body_bang, 0); } ctx->exception = exc; } status = resp->v.v0.status; if (resp->v.v0.headers) { cb_build_headers(ctx, resp->v.v0.headers); ctx->headers_val = Qnil; } if (ctx->extended) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, ctx->exception); rb_ivar_set(res, cb_id_iv_status, status ? INT2FIX(status) : Qnil); rb_ivar_set(res, cb_id_iv_operation, cb_sym_http_request); rb_ivar_set(res, cb_id_iv_key, key); rb_ivar_set(res, cb_id_iv_value, val); rb_ivar_set(res, cb_id_iv_completed, Qtrue); rb_ivar_set(res, cb_id_iv_headers, ctx->headers_val); } else { res = val; } if (ctx->proc != Qnil) { cb_proc_call(bucket, ctx->proc, 1, res); ctx->proc = Qnil; } if (!bucket->async && ctx->exception == Qnil) { ctx->rv = res; } if (bucket->async) { cb_context_free(ctx); } (void)handle; (void)request; }
VALUE cb_bucket_observe(int argc, VALUE *argv, VALUE self) { struct cb_bucket_st *bucket = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, proc, exc; lcb_error_t err; struct cb_params_st params; if (!cb_bucket_connected_bang(bucket, cb_sym_observe)) { return Qnil; } memset(¶ms, 0, sizeof(struct cb_params_st)); rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc); if (!bucket->async && proc != Qnil) { rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks"); } params.type = cb_cmd_observe; params.bucket = bucket; cb_params_build(¶ms); ctx = cb_context_alloc_common(bucket, proc, params.cmd.observe.num); err = lcb_observe(bucket->handle, (const void *)ctx, params.cmd.observe.num, params.cmd.observe.ptr); cb_params_destroy(¶ms); exc = cb_check_error(err, "failed to schedule observe request", Qnil); if (exc != Qnil) { cb_context_free(ctx); rb_exc_raise(exc); } bucket->nbytes += params.npayload; if (bucket->async) { cb_maybe_do_loop(bucket); return Qnil; } else { if (ctx->nqueries > 0) { /* we have some operations pending */ lcb_wait(bucket->handle); } exc = ctx->exception; rv = ctx->rv; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } exc = bucket->exception; if (exc != Qnil) { bucket->exception = Qnil; rb_exc_raise(exc); } if (params.cmd.observe.num > 1 || params.cmd.observe.array) { return rv; /* return as a hash {key => {}, ...} */ } else { VALUE vv = Qnil; rb_hash_foreach(rv, cb_first_value_i, (VALUE)&vv); return vv; /* return first value */ } } }
/* * Returns versions of the server for each node in the cluster * * @since 1.1.0 * * @overload version * @yieldparam [Result] ret the object with +error+, +node+, +operation+ * and +value+ attributes. * * @return [Hash] node-version pairs * * @raise [Couchbase::Error::Connect] if connection closed (see {Bucket#reconnect}) * @raise [ArgumentError] when passing the block in synchronous mode * * @example Synchronous version request * c.version #=> will render version * * @example Asynchronous version request * c.run do * c.version do |ret| * ret.operation #=> :version * ret.success? #=> true * ret.node #=> "localhost:11211" * ret.value #=> will render version * end * end */ VALUE cb_bucket_version(int argc, VALUE *argv, VALUE self) { struct cb_bucket_st *bucket = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, exc, proc; lcb_error_t err; struct cb_params_st params; if (!cb_bucket_connected_bang(bucket, cb_sym_version)) { return Qnil; } memset(¶ms, 0, sizeof(struct cb_params_st)); rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc); if (!bucket->async && proc != Qnil) { rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks"); } params.type = cb_cmd_version; params.bucket = bucket; cb_params_build(¶ms); ctx = cb_context_alloc_common(bucket, proc, params.cmd.version.num); err = lcb_server_versions(bucket->handle, (const void *)ctx, params.cmd.version.num, params.cmd.version.ptr); exc = cb_check_error(err, "failed to schedule version request", Qnil); cb_params_destroy(¶ms); if (exc != Qnil) { cb_context_free(ctx); rb_exc_raise(exc); } bucket->nbytes += params.npayload; if (bucket->async) { cb_maybe_do_loop(bucket); return Qnil; } else { if (ctx->nqueries > 0) { /* we have some operations pending */ lcb_wait(bucket->handle); } exc = ctx->exception; rv = ctx->rv; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } exc = bucket->exception; if (exc != Qnil) { bucket->exception = Qnil; rb_exc_raise(exc); } return rv; } }
void cb_storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation, lcb_error_t error, const lcb_store_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE key, cas, exc, res; key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); cb_strip_key_prefix(bucket, key); cas = resp->v.v0.cas > 0 ? ULL2NUM(resp->v.v0.cas) : Qnil; ctx->operation = storage_opcode_to_sym(operation); exc = cb_check_error(error, "failed to store value", key); if (exc != Qnil) { rb_ivar_set(exc, cb_id_iv_cas, cas); rb_ivar_set(exc, cb_id_iv_operation, ctx->operation); ctx->exception = exc; } if (bucket->async) { /* asynchronous */ if (RTEST(ctx->observe_options)) { VALUE args[2]; /* it's ok to pass pointer to stack struct here */ args[0] = rb_hash_new(); rb_hash_aset(args[0], key, cas); args[1] = ctx->observe_options; rb_block_call(bucket->self, cb_id_observe_and_wait, 2, args, storage_observe_callback, (VALUE)ctx); ctx->observe_options = Qnil; } else if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, exc); rb_ivar_set(res, cb_id_iv_key, key); rb_ivar_set(res, cb_id_iv_operation, ctx->operation); rb_ivar_set(res, cb_id_iv_cas, cas); cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ rb_hash_aset(ctx->rv, key, cas); } if (!RTEST(ctx->observe_options)) { ctx->nqueries--; if (ctx->nqueries == 0) { ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } } (void)handle; }
/* * Execute {Bucket::CouchRequest} * * @since 1.2.0 */ VALUE cb_http_request_perform(VALUE self) { struct cb_http_request_st *req = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, exc; lcb_error_t err; struct cb_bucket_st *bucket = req->bucket; if (bucket->handle == NULL) { rb_raise(cb_eConnectError, "closed connection"); } ctx = cb_context_alloc(bucket); ctx->rv = Qnil; ctx->proc = rb_block_given_p() ? rb_block_proc() : req->on_body_callback; ctx->extended = req->extended; ctx->request = req; ctx->headers_val = rb_hash_new(); err = lcb_make_http_request(bucket->handle, (const void *)ctx, req->type, &req->cmd, &req->request); exc = cb_check_error(err, "failed to schedule document request", STR_NEW(req->cmd.v.v0.path, req->cmd.v.v0.npath)); if (exc != Qnil) { lcb_cancel_http_request(bucket->handle, req->request); rb_exc_raise(exc); } req->running = 1; req->ctx = ctx; if (bucket->async) { return Qnil; } else { lcb_wait(bucket->handle); if (req->completed) { rv = ctx->rv; exc = ctx->exception; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } return rv; } else { return Qnil; } } return Qnil; }
void cb_arithmetic_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_arithmetic_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE cas, key, val, exc, res; ID o; ctx->nqueries--; key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); cb_strip_key_prefix(bucket, key); cas = resp->v.v0.cas > 0 ? ULL2NUM(resp->v.v0.cas) : Qnil; o = ctx->arith > 0 ? cb_sym_increment : cb_sym_decrement; exc = cb_check_error(error, "failed to perform arithmetic operation", key); if (exc != Qnil) { rb_ivar_set(exc, cb_id_iv_cas, cas); rb_ivar_set(exc, cb_id_iv_operation, o); ctx->exception = exc; } val = ULL2NUM(resp->v.v0.value); if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, exc); rb_ivar_set(res, cb_id_iv_operation, o); rb_ivar_set(res, cb_id_iv_key, key); rb_ivar_set(res, cb_id_iv_value, val); rb_ivar_set(res, cb_id_iv_cas, cas); cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ if (NIL_P(exc)) { if (ctx->extended) { rb_hash_aset(ctx->rv, key, rb_ary_new3(2, val, cas)); } else { rb_hash_aset(ctx->rv, key, val); } } } if (ctx->nqueries == 0) { ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } (void)handle; }
void cb_version_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_server_version_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE node, val, exc, res; node = resp->v.v0.server_endpoint ? STR_NEW_CSTR(resp->v.v0.server_endpoint) : Qnil; exc = cb_check_error(error, "failed to get version", node); if (exc != Qnil) { rb_ivar_set(exc, cb_id_iv_operation, cb_sym_version); ctx->exception = exc; } if (node != Qnil) { val = STR_NEW((const char*)resp->v.v0.vstring, resp->v.v0.nvstring); if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, exc); rb_ivar_set(res, cb_id_iv_operation, cb_sym_version); rb_ivar_set(res, cb_id_iv_node, node); rb_ivar_set(res, cb_id_iv_value, val); cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ if (NIL_P(exc)) { rb_hash_aset(ctx->rv, node, val); } } } else { ctx->nqueries--; ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } (void)handle; }
static VALUE storage_observe_callback(VALUE args, VALUE cookie) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE res = rb_ary_shift(args); if (ctx->proc != Qnil) { rb_ivar_set(res, cb_id_iv_operation, ctx->operation); cb_proc_call(bucket, ctx->proc, 1, res); } if (!RTEST(ctx->observe_options)) { ctx->nqueries--; if (ctx->nqueries == 0) { ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } } return Qnil; }
VALUE cb_http_request_continue(VALUE self) { VALUE exc, rv; struct cb_http_request_st *req = DATA_PTR(self); if (req->running) { lcb_wait(req->bucket->handle); if (req->completed) { exc = req->ctx->exception; rv = req->ctx->rv; cb_context_free(req->ctx); if (exc != Qnil) { rb_exc_raise(exc); } return rv; } } else { cb_http_request_perform(self); } return Qnil; }
void cb_unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_unlock_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE key, exc = Qnil, res; ctx->nqueries--; key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); cb_strip_key_prefix(bucket, key); if (error != LCB_KEY_ENOENT || !ctx->quiet) { exc = cb_check_error(error, "failed to unlock value", key); if (exc != Qnil) { rb_ivar_set(exc, cb_id_iv_operation, cb_sym_unlock); ctx->exception = exc; } } if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, exc); rb_ivar_set(res, cb_id_iv_operation, cb_sym_unlock); rb_ivar_set(res, cb_id_iv_key, key); cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ rb_hash_aset(ctx->rv, key, (error == LCB_SUCCESS) ? Qtrue : Qfalse); } if (ctx->nqueries == 0) { ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } (void)handle; }
/* ARGSUSED */ bool_t inomap_build( jdm_fshandle_t *fshandlep, intgen_t fsfd, xfs_bstat_t *rootstatp, bool_t last, time32_t lasttime, bool_t resume, time32_t resumetime, size_t resumerangecnt, drange_t *resumerangep, char *subtreebuf[], ix_t subtreecnt, startpt_t *startptp, size_t startptcnt, ix_t *statphasep, ix_t *statpassp, size64_t statcnt, size64_t *statdonep ) { xfs_bstat_t *bstatbufp; size_t bstatbuflen; bool_t pruneneeded = BOOL_FALSE; intgen_t igrpcnt = 0; intgen_t stat; intgen_t rval; /* do a sync so that bulkstat will pick up inode changes * that are currently in the inode cache. this is necessary * for incremental dumps in order to have the dump time * accurately reflect what inodes were included in this dump. * (PV 881455) */ sync(); /* copy stat ptrs */ inomap_statphasep = statphasep; inomap_statpassp = statpassp; inomap_statdonep = statdonep; /* allocate a bulkstat buf */ bstatbuflen = BSTATBUFLEN; bstatbufp = ( xfs_bstat_t * )memalign( pgsz, bstatbuflen * sizeof( xfs_bstat_t )); ASSERT( bstatbufp ); /* count the number of inode groups, which will serve as a * starting point for the size of the inomap. */ rval = inogrp_iter( fsfd, cb_count_inogrp, (void *)&igrpcnt, &stat ); if ( rval || stat ) { free( ( void * )bstatbufp ); return BOOL_FALSE; } /* initialize the callback context */ rval = cb_context( last, lasttime, resume, resumetime, resumerangecnt, resumerangep, startptp, startptcnt, igrpcnt, &pruneneeded ); if ( rval ) { free( ( void * )bstatbufp ); return BOOL_FALSE; } /* the inode map requires that inodes are added in increasing * ino order. in the case of a subtree dump, inodes would be * added in whatever order they were discovered when walking the * subtrees. so pre-populate the inomap with all the inode groups * in this filesystem. each inode will be marked unused until its * correct state is set in cb_add. */ rval = inogrp_iter( fsfd, cb_add_inogrp, NULL, &stat ); if ( rval || stat ) { cb_context_free(); free( ( void * )bstatbufp ); return BOOL_FALSE; } /* construct the ino map, based on the last dump time, resumed * dump info, and subtree list. place all unchanged directories * in the "needed for children" state (MAP_DIR_SUPPRT). these will be * dumped even though they have not changed. a later pass will move * some of these to "not dumped", such that only those necessary * to represent the minimal tree containing only changes will remain. * for subtree dumps, recurse over the specified subtrees calling * the inomap constructor (cb_add). otherwise, if dumping the entire * filesystem, use the bigstat iterator to add inos to the inomap. * set a flag if any ino not put in a dump state. This will be used * to decide if any pruning can be done. */ mlog( MLOG_VERBOSE | MLOG_INOMAP, _( "ino map phase 1: " "constructing initial dump list\n") ); *inomap_statdonep = 0; *inomap_statphasep = 1; stat = 0; cb_accuminit_sz( ); if ( subtreecnt ) { rval = subtreelist_parse( fshandlep, fsfd, rootstatp, subtreebuf, subtreecnt ); } else { rval = bigstat_iter( fshandlep, fsfd, BIGSTAT_ITER_ALL, ( xfs_ino_t )0, cb_add, NULL, NULL, NULL, &stat, preemptchk, bstatbufp, bstatbuflen ); } *inomap_statphasep = 0; if ( rval || preemptchk( PREEMPT_FULL )) { cb_context_free(); free( ( void * )bstatbufp ); return BOOL_FALSE; } if ( inomap_exclude_filesize > 0 ) { mlog( MLOG_NOTE | MLOG_VERBOSE, _( "pruned %llu files: maximum size exceeded\n"), inomap_exclude_filesize ); } if ( inomap_exclude_skipattr > 0 ) { mlog( MLOG_NOTE | MLOG_VERBOSE, _( "pruned %llu files: skip attribute set\n"), inomap_exclude_skipattr ); } /* prune directories unchanged since the last dump and containing * no children needing dumping. */ if ( pruneneeded ) { bool_t rootdump = BOOL_FALSE; mlog( MLOG_VERBOSE | MLOG_INOMAP, _( "ino map phase 2: " "pruning unneeded subtrees\n") ); *inomap_statdonep = 0; *inomap_statpassp = 0; *inomap_statphasep = 2; (void) supprt_prune( &rootdump, fshandlep, fsfd, rootstatp, NULL ); *inomap_statphasep = 0; if ( preemptchk( PREEMPT_FULL )) { cb_context_free(); free( ( void * )bstatbufp ); return BOOL_FALSE; } } else { mlog( MLOG_VERBOSE | MLOG_INOMAP, _( "ino map phase 2: " "skipping (no pruning necessary)\n") ); } /* initialize the callback context for startpoint calculation */ cb_spinit( ); /* identify dump stream startpoints */ if ( startptcnt > 1 ) { mlog( MLOG_VERBOSE | MLOG_INOMAP, _( "ino map phase 3: " "identifying stream starting points\n") ); } else { mlog( MLOG_VERBOSE | MLOG_INOMAP, _( "ino map phase 3: " "skipping (only one dump stream)\n") ); } stat = 0; *inomap_statdonep = 0; *inomap_statphasep = 3; rval = bigstat_iter( fshandlep, fsfd, BIGSTAT_ITER_NONDIR, ( xfs_ino_t )0, cb_startpt, NULL, inomap_next_nondir, inomap_alloc_context(), &stat, preemptchk, bstatbufp, bstatbuflen ); *inomap_statphasep = 0; if ( rval ) { cb_context_free(); free( ( void * )bstatbufp ); return BOOL_FALSE; } if ( startptcnt > 1 ) { ix_t startptix; for ( startptix = 0 ; startptix < startptcnt ; startptix++ ) { startpt_t *p; startpt_t *ep; p = &startptp[ startptix ]; if ( startptix == startptcnt - 1 ) { ep = 0; } else { ep = &startptp[ startptix + 1 ]; } ASSERT( ! p->sp_flags ); mlog( MLOG_VERBOSE | MLOG_INOMAP, _("stream %u: ino %llu offset %lld to "), startptix, p->sp_ino, p->sp_offset ); if ( ! ep ) { mlog( MLOG_VERBOSE | MLOG_BARE | MLOG_INOMAP, _("end\n") ); } else { mlog( MLOG_VERBOSE | MLOG_BARE | MLOG_INOMAP, _("ino %llu offset %lld\n"), ep->sp_ino, ep->sp_offset ); } } } cb_context_free(); free( ( void * )bstatbufp ); mlog( MLOG_VERBOSE | MLOG_INOMAP, _( "ino map construction complete\n") ); return BOOL_TRUE; }
/* * Obtain an object stored in Couchbase by given key. * * @since 1.0.0 * * @see http://couchbase.com/docs/couchbase-manual-2.0/couchbase-architecture-apis-memcached-protocol-additions.html#couchbase-architecture-apis-memcached-protocol-additions-getl * * @overload get(*keys, options = {}) * @param keys [String, Symbol, Array] One or several keys to fetch * @param options [Hash] Options for operation. * @option options [true, false] :extended (false) If set to +true+, the * operation will return a tuple +[value, flags, cas]+, otherwise (by * default) it returns just the value. * @option options [Fixnum] :ttl (self.default_ttl) Expiry time for key. * Values larger than 30*24*60*60 seconds (30 days) are interpreted as * absolute times (from the epoch). * @option options [true, false] :quiet (self.quiet) If set to +true+, the * operation won't raise error for missing key, it will return +nil+. * Otherwise it will raise error in synchronous mode. In asynchronous * mode this option ignored. * @option options [Symbol] :format (nil) Explicitly choose the decoder * for this key (+:plain+, +:document+, +:marshal+). See * {Bucket#default_format}. * @option options [Fixnum, Boolean] :lock Lock the keys for time span. * If this parameter is +true+ the key(s) will be locked for default * timeout. Also you can use number to setup your own timeout in * seconds. If it will be lower that zero or exceed the maximum, the * server will use default value. You can determine actual default and * maximum values calling {Bucket#stats} without arguments and * inspecting keys "ep_getl_default_timeout" and "ep_getl_max_timeout" * correspondingly. See overloaded hash syntax to specify custom timeout * per each key. * @option options [true, false] :assemble_hash (false) Assemble Hash for * results. Hash assembled automatically if +:extended+ option is true * or in case of "get and touch" multimple keys. * @option options [true, false, :all, :first, Fixnum] :replica * (false) Read key from replica node. Options +:ttl+ and +:lock+ * are not compatible with +:replica+. Value +true+ is a synonym to * +:first+, which means sequentially iterate over all replicas * and return first successful response, skipping all failures. * It is also possible to query all replicas in parallel using * the +:all+ option, or pass a replica index, starting from zero. * * @yieldparam ret [Result] the result of operation in asynchronous mode * (valid attributes: +error+, +operation+, +key+, +value+, +flags+, * +cas+). * * @return [Object, Array, Hash] the value(s) (or tuples in extended mode) * associated with the key. * * @raise [Couchbase::Error::NotFound] if the key is missing in the * bucket. * * @raise [Couchbase::Error::Connect] if connection closed (see {Bucket#reconnect}) * * @raise [ArgumentError] when passing the block in synchronous mode * * @example Get single value in quiet mode (the default) * c.get("foo") #=> the associated value or nil * * @example Use alternative hash-like syntax * c["foo"] #=> the associated value or nil * * @example Get single value in verbose mode * c.get("missing-foo", :quiet => false) #=> raises Couchbase::NotFound * c.get("missing-foo", :quiet => true) #=> returns nil * * @example Get and touch single value. The key won't be accessible after 10 seconds * c.get("foo", :ttl => 10) * * @example Extended get * val, flags, cas = c.get("foo", :extended => true) * * @example Get multiple keys * c.get("foo", "bar", "baz") #=> [val1, val2, val3] * * @example Get multiple keys with assembing result into the Hash * c.get("foo", "bar", "baz", :assemble_hash => true) * #=> {"foo" => val1, "bar" => val2, "baz" => val3} * * @example Extended get multiple keys * c.get("foo", "bar", :extended => true) * #=> {"foo" => [val1, flags1, cas1], "bar" => [val2, flags2, cas2]} * * @example Asynchronous get * c.run do * c.get("foo", "bar", "baz") do |res| * ret.operation #=> :get * ret.success? #=> true * ret.key #=> "foo", "bar" or "baz" in separate calls * ret.value * ret.flags * ret.cas * end * end * * @example Get and lock key using default timeout * c.get("foo", :lock => true) * * @example Determine lock timeout parameters * c.stats.values_at("ep_getl_default_timeout", "ep_getl_max_timeout") * #=> [{"127.0.0.1:11210"=>"15"}, {"127.0.0.1:11210"=>"30"}] * * @example Get and lock key using custom timeout * c.get("foo", :lock => 3) * * @example Get and lock multiple keys using custom timeout * c.get("foo", "bar", :lock => 3) * * @overload get(keys, options = {}) * When the method receive hash map, it will behave like it receive list * of keys (+keys.keys+), but also touch each key setting expiry time to * the corresponding value. But unlike usual get this command always * return hash map +{key => value}+ or +{key => [value, flags, cas]}+. * * @param keys [Hash] Map key-ttl * @param options [Hash] Options for operation. (see options definition * above) * * @return [Hash] the values (or tuples in extended mode) associated with * the keys. * * @example Get and touch multiple keys * c.get("foo" => 10, "bar" => 20) #=> {"foo" => val1, "bar" => val2} * * @example Extended get and touch multiple keys * c.get({"foo" => 10, "bar" => 20}, :extended => true) * #=> {"foo" => [val1, flags1, cas1], "bar" => [val2, flags2, cas2]} * * @example Get and lock multiple keys for chosen period in seconds * c.get("foo" => 10, "bar" => 20, :lock => true) * #=> {"foo" => val1, "bar" => val2} */ VALUE cb_bucket_get(int argc, VALUE *argv, VALUE self) { struct cb_bucket_st *bucket = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, proc, exc; size_t ii; lcb_error_t err = LCB_SUCCESS; struct cb_params_st params; if (!cb_bucket_connected_bang(bucket, cb_sym_get)) { return Qnil; } memset(¶ms, 0, sizeof(struct cb_params_st)); rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc); if (!bucket->async && proc != Qnil) { rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks"); } params.type = cb_cmd_get; params.bucket = bucket; params.cmd.get.keys_ary = rb_ary_new(); cb_params_build(¶ms); ctx = cb_context_alloc_common(bucket, proc, params.cmd.get.num); ctx->extended = params.cmd.get.extended; ctx->quiet = params.cmd.get.quiet; ctx->transcoder = params.cmd.get.transcoder; ctx->transcoder_opts = params.cmd.get.transcoder_opts; if (RTEST(params.cmd.get.replica)) { if (params.cmd.get.replica == cb_sym_all) { ctx->nqueries = lcb_get_num_replicas(bucket->handle); ctx->all_replicas = 1; } err = lcb_get_replica(bucket->handle, (const void *)ctx, params.cmd.get.num, params.cmd.get.ptr_gr); } else { err = lcb_get(bucket->handle, (const void *)ctx, params.cmd.get.num, params.cmd.get.ptr); } cb_params_destroy(¶ms); exc = cb_check_error(err, "failed to schedule get request", Qnil); if (exc != Qnil) { cb_context_free(ctx); rb_exc_raise(exc); } bucket->nbytes += params.npayload; if (bucket->async) { cb_maybe_do_loop(bucket); return Qnil; } else { if (ctx->nqueries > 0) { /* we have some operations pending */ lcb_wait(bucket->handle); } exc = ctx->exception; rv = ctx->rv; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } exc = bucket->exception; if (exc != Qnil) { bucket->exception = Qnil; rb_exc_raise(exc); } if (params.cmd.get.gat || params.cmd.get.assemble_hash || (params.cmd.get.extended && (params.cmd.get.num > 1 || params.cmd.get.array))) { return rv; /* return as a hash {key => [value, flags, cas], ...} */ } if (params.cmd.get.num > 1 || params.cmd.get.array) { VALUE keys, ret; ret = rb_ary_new(); /* make sure ret is guarded so not invisible in a register * when stack scanning */ RB_GC_GUARD(ret); keys = params.cmd.get.keys_ary; for (ii = 0; ii < params.cmd.get.num; ++ii) { rb_ary_push(ret, rb_hash_aref(rv, rb_ary_entry(keys, ii))); } return ret; /* return as an array [value1, value2, ...] */ } else { VALUE vv = Qnil; rb_hash_foreach(rv, cb_first_value_i, (VALUE)&vv); return vv; } } }
void cb_get_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_get_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE key, val, flags, cas, exc = Qnil, res, raw; ctx->nqueries--; key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); cb_strip_key_prefix(bucket, key); if (error != LCB_KEY_ENOENT || !ctx->quiet) { exc = cb_check_error(error, "failed to get value", key); if (exc != Qnil) { rb_ivar_set(exc, cb_id_iv_operation, cb_sym_get); ctx->exception = exc; } } flags = ULONG2NUM(resp->v.v0.flags); cas = ULL2NUM(resp->v.v0.cas); raw = STR_NEW((const char*)resp->v.v0.bytes, resp->v.v0.nbytes); val = cb_decode_value(ctx->transcoder, raw, resp->v.v0.flags, ctx->transcoder_opts); if (rb_obj_is_kind_of(val, rb_eStandardError)) { VALUE exc_str = rb_funcall(val, cb_id_to_s, 0); VALUE msg = rb_funcall(rb_mKernel, cb_id_sprintf, 3, rb_str_new2("unable to convert value for key \"%s\": %s"), key, exc_str); ctx->exception = rb_exc_new3(cb_eValueFormatError, msg); rb_ivar_set(ctx->exception, cb_id_iv_operation, cb_sym_get); rb_ivar_set(ctx->exception, cb_id_iv_key, key); rb_ivar_set(ctx->exception, cb_id_iv_inner_exception, val); val = Qnil; } if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_error, exc); rb_ivar_set(res, cb_id_iv_operation, cb_sym_get); rb_ivar_set(res, cb_id_iv_key, key); rb_ivar_set(res, cb_id_iv_value, val); rb_ivar_set(res, cb_id_iv_flags, flags); rb_ivar_set(res, cb_id_iv_cas, cas); cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ if (NIL_P(exc) && error != LCB_KEY_ENOENT) { if (ctx->extended) { val = rb_ary_new3(3, val, flags, cas); } if (ctx->all_replicas) { VALUE ary = rb_hash_aref(ctx->rv, key); if (NIL_P(ary)) { ary = rb_ary_new(); rb_hash_aset(ctx->rv, key, ary); } rb_ary_push(ary, val); } else { rb_hash_aset(ctx->rv, key, val); } } } if (ctx->nqueries == 0) { ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } (void)handle; }
static inline VALUE cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self) { struct cb_bucket_st *bucket = DATA_PTR(self); struct cb_context_st *ctx; VALUE rv, proc, exc, obs = Qnil; lcb_error_t err; struct cb_params_st params; if (!cb_bucket_connected_bang(bucket, storage_opcode_to_sym(cmd))) { return Qnil; } memset(¶ms, 0, sizeof(struct cb_params_st)); rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc); if (!bucket->async && proc != Qnil) { rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks"); } params.type = cb_cmd_store; params.bucket = bucket; params.cmd.store.operation = cmd; cb_params_build(¶ms); obs = params.cmd.store.observe; ctx = cb_context_alloc(bucket); if (!bucket->async) { ctx->rv = rb_hash_new(); ctx->observe_options = obs; } ctx->proc = proc; ctx->nqueries = params.cmd.store.num; err = lcb_store(bucket->handle, (const void *)ctx, params.cmd.store.num, params.cmd.store.ptr); cb_params_destroy(¶ms); exc = cb_check_error(err, "failed to schedule set request", Qnil); if (exc != Qnil) { cb_context_free(ctx); rb_exc_raise(exc); } bucket->nbytes += params.npayload; if (bucket->async) { cb_maybe_do_loop(bucket); return Qnil; } else { if (ctx->nqueries > 0) { /* we have some operations pending */ lcb_wait(bucket->handle); } exc = ctx->exception; rv = ctx->rv; cb_context_free(ctx); if (exc != Qnil) { rb_exc_raise(exc); } exc = bucket->exception; if (exc != Qnil) { bucket->exception = Qnil; rb_exc_raise(exc); } if (RTEST(obs)) { rv = rb_funcall(bucket->self, cb_id_observe_and_wait, 2, rv, obs); } if (params.cmd.store.num > 1) { return rv; /* return as a hash {key => cas, ...} */ } else { VALUE vv = Qnil; rb_hash_foreach(rv, cb_first_value_i, (VALUE)&vv); return vv; } } }
void cb_observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_observe_resp_t *resp) { struct cb_context_st *ctx = (struct cb_context_st *)cookie; struct cb_bucket_st *bucket = ctx->bucket; VALUE key, res, exc; if (resp->v.v0.key) { key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey); exc = cb_check_error(error, "failed to execute observe request", key); if (exc != Qnil) { ctx->exception = exc; } res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_completed, Qfalse); rb_ivar_set(res, cb_id_iv_error, ctx->exception); rb_ivar_set(res, cb_id_iv_operation, cb_sym_observe); rb_ivar_set(res, cb_id_iv_key, key); rb_ivar_set(res, cb_id_iv_cas, ULL2NUM(resp->v.v0.cas)); rb_ivar_set(res, cb_id_iv_from_master, resp->v.v0.from_master ? Qtrue : Qfalse); rb_ivar_set(res, cb_id_iv_time_to_persist, ULONG2NUM(resp->v.v0.ttp)); rb_ivar_set(res, cb_id_iv_time_to_replicate, ULONG2NUM(resp->v.v0.ttr)); switch (resp->v.v0.status) { case LCB_OBSERVE_FOUND: rb_ivar_set(res, cb_id_iv_status, cb_sym_found); break; case LCB_OBSERVE_PERSISTED: rb_ivar_set(res, cb_id_iv_status, cb_sym_persisted); break; case LCB_OBSERVE_NOT_FOUND: rb_ivar_set(res, cb_id_iv_status, cb_sym_not_found); break; default: rb_ivar_set(res, cb_id_iv_status, Qnil); } if (bucket->async) { /* asynchronous */ if (ctx->proc != Qnil) { cb_proc_call(bucket, ctx->proc, 1, res); } } else { /* synchronous */ if (NIL_P(ctx->exception)) { VALUE stats = rb_hash_aref(ctx->rv, key); if (NIL_P(stats)) { stats = rb_ary_new(); rb_hash_aset(ctx->rv, key, stats); } rb_ary_push(stats, res); } } } else { if (bucket->async && ctx->proc != Qnil) { res = rb_class_new_instance(0, NULL, cb_cResult); rb_ivar_set(res, cb_id_iv_completed, Qtrue); cb_proc_call(bucket, ctx->proc, 1, res); } ctx->nqueries--; ctx->proc = Qnil; if (bucket->async) { cb_context_free(ctx); } } (void)handle; }