static lcb_error_t obs_ctxdone(lcb_MULTICMD_CTX *mctx, const void *cookie) { unsigned ii; OBSERVECTX *ctx = CTX_FROM_MULTI(mctx); mc_CMDQUEUE *cq = &ctx->instance->cmdq; for (ii = 0; ii < ctx->nrequests; ii++) { protocol_binary_request_header hdr; mc_PACKET *pkt; mc_PIPELINE *pipeline; lcb_string *rr = ctx->requests + ii; pipeline = cq->pipelines[ii]; if (!rr->nused) { continue; } pkt = mcreq_allocate_packet(pipeline); lcb_assert(pkt); mcreq_reserve_header(pipeline, pkt, MCREQ_PKT_BASESIZE); mcreq_reserve_value2(pipeline, pkt, rr->nused); hdr.request.magic = PROTOCOL_BINARY_REQ; hdr.request.opcode = PROTOCOL_BINARY_CMD_OBSERVE; hdr.request.datatype = PROTOCOL_BINARY_RAW_BYTES; hdr.request.keylen = 0; hdr.request.cas = 0; hdr.request.vbucket = 0; hdr.request.extlen = 0; hdr.request.opaque = pkt->opaque; hdr.request.bodylen = htonl((lcb_uint32_t)rr->nused); memcpy(SPAN_BUFFER(&pkt->kh_span), hdr.bytes, sizeof(hdr.bytes)); memcpy(SPAN_BUFFER(&pkt->u_value.single), rr->base, rr->nused); pkt->flags |= MCREQ_F_REQEXT; pkt->u_rdata.exdata = (mc_REQDATAEX *)ctx; mcreq_sched_add(pipeline, pkt); TRACE_OBSERVE_BEGIN(&hdr, SPAN_BUFFER(&pkt->u_value.single)); } destroy_requests(ctx); ctx->base.start = gethrtime(); ctx->base.cookie = cookie; ctx->base.procs = &obs_procs; if (ctx->nrequests == 0 || ctx->remaining == 0) { free(ctx); return LCB_EINVAL; } else { MAYBE_SCHEDLEAVE(ctx->instance); return LCB_SUCCESS; } }
LIBCOUCHBASE_API lcb_error_t lcb_rget3(lcb_t instance, const void *cookie, const lcb_CMDGETREPLICA *cmd) { /** * Because we need to direct these commands to specific servers, we can't * just use the 'basic_packet()' function. */ mc_CMDQUEUE *cq = &instance->cmdq; const void *hk; lcb_size_t nhk; int vbid; protocol_binary_request_header req; unsigned r0, r1; rget_cookie *rck = NULL; if (LCB_KEYBUF_IS_EMPTY(&cmd->key)) { return LCB_EMPTY_KEY; } mcreq_extract_hashkey(&cmd->key, &cmd->hashkey, MCREQ_PKT_BASESIZE, &hk, &nhk); vbid = lcbvb_k2vb(cq->config, hk, nhk); /** Get the vbucket by index */ if (cmd->strategy == LCB_REPLICA_SELECT) { r0 = r1 = cmd->index; } else if (cmd->strategy == LCB_REPLICA_ALL) { r0 = 0; r1 = instance->nreplicas; } else { /* first */ r0 = r1 = 0; } if (r1 < r0 || r1 >= cq->npipelines) { return LCB_NO_MATCHING_SERVER; } /* Initialize the cookie */ rck = calloc(1, sizeof(*rck)); rck->base.cookie = cookie; rck->base.start = gethrtime(); rck->base.callback = rget_callback; rck->strategy = cmd->strategy; rck->r_cur = r0; rck->r_max = instance->nreplicas; rck->instance = instance; rck->vbucket = vbid; /* Initialize the packet */ req.request.magic = PROTOCOL_BINARY_REQ; req.request.opcode = PROTOCOL_BINARY_CMD_GET_REPLICA; req.request.datatype = PROTOCOL_BINARY_RAW_BYTES; req.request.vbucket = htons((lcb_uint16_t)vbid); req.request.cas = 0; req.request.extlen = 0; req.request.keylen = htons((lcb_uint16_t)cmd->key.contig.nbytes); req.request.bodylen = htonl((lcb_uint32_t)cmd->key.contig.nbytes); do { int curix; mc_PIPELINE *pl; mc_PACKET *pkt; curix = lcbvb_vbreplica(cq->config, vbid, r0); if (curix == -1) { return LCB_NO_MATCHING_SERVER; } pl = cq->pipelines[curix]; pkt = mcreq_allocate_packet(pl); if (!pkt) { return LCB_CLIENT_ENOMEM; } pkt->u_rdata.exdata = &rck->base; pkt->flags |= MCREQ_F_REQEXT; mcreq_reserve_key(pl, pkt, sizeof(req.bytes), &cmd->key); req.request.opaque = pkt->opaque; rck->remaining++; mcreq_write_hdr(pkt, &req); mcreq_sched_add(pl, pkt); } while (++r0 < r1); return LCB_SUCCESS; }
lcb_STATUS mc_forward_packet(mc_CMDQUEUE *cq, mc_IOVINFO *info, mc_PACKET **pkt_p, mc_PIPELINE **pl_p, int options) { /* stack based header with our modifications. this is copied into the * packet's actual header */ protocol_binary_request_header hdr; int vbid, srvix; mc_IOVCURSOR *mincur = &info->c; unsigned n_packet; /* total packet size */ unsigned n_header; /* extras + key + memcached header */ unsigned n_body_total; /* size of everything following the memcached header */ unsigned n_body_key; /* length of the key */ unsigned n_body_value; /* packetsize - hdrsize */ unsigned offset; /* stack buffer and key pointer. stack buffer is used if the key is not * contiguous */ char kbuf_s[256]; const char *kptr; /* pipeline and packet for command */ mc_PIPELINE *pl; mc_PACKET *pkt; info->wanted = 0; /* not enough bytes */ if (info->total < 24) { info->wanted = 24; return LCB_INCOMPLETE_PACKET; } iovcursor_peek(mincur, (char *)hdr.bytes, sizeof hdr.bytes, 0); /* Initialize our size variables */ n_body_total = ntohl(hdr.request.bodylen); n_body_key = ntohs(hdr.request.keylen); n_header = sizeof hdr.bytes + n_body_key + hdr.request.extlen; n_packet = n_body_total + sizeof hdr.bytes; n_body_value = n_packet - n_header; if (n_packet > info->total) { info->wanted = n_packet; return LCB_INCOMPLETE_PACKET; } info->total -= n_packet; /* seek ahead to read the item's key into the header */ offset = sizeof hdr.bytes + hdr.request.extlen; iovcursor_peek_ex(mincur, kbuf_s, &kptr, n_body_key, offset); if (kptr == NULL) { /* key is not contiguous? that's ok. use the static buffer */ kptr = kbuf_s; } if ((options & MC_FWD_OPT_NOMAP) == 0) { lcbvb_map_key(cq->config, kptr, n_body_key, &vbid, &srvix); if (srvix < 0 || (unsigned)srvix >= cq->npipelines) { return LCB_NO_MATCHING_SERVER; } pl = cq->pipelines[srvix]; hdr.request.vbucket = htons(vbid); } else { pl = *pl_p; if (!pl) { return LCB_EINVAL; } srvix = pl->index; } pkt = mcreq_allocate_packet(pl); if (pkt == NULL) { return LCB_CLIENT_ENOMEM; } hdr.request.opaque = pkt->opaque; pkt->extlen = hdr.request.extlen; info->consumed = n_packet; if (options & MC_FWD_OPT_COPY) { /* reserve bytes for the entire packet */ mcreq_reserve_header(pl, pkt, n_header); iovcursor_adv_copy(mincur, SPAN_BUFFER(&pkt->kh_span), n_header); if (n_body_value) { mcreq_reserve_value2(pl, pkt, n_body_value); iovcursor_adv_copy(mincur, SPAN_BUFFER(&pkt->u_value.single), n_body_value); pkt->flags |= MCREQ_F_HASVALUE; } } else { if (IOVCURSOR_HAS_CONTIG(mincur, n_header)) { span_from_first(mincur, n_header, &pkt->kh_span); pkt->flags |= MCREQ_F_KEY_NOCOPY; } else { /* header is fragmented into multiple IOVs */ mcreq_reserve_header(pl, pkt, n_header); iovcursor_adv_copy(mincur, SPAN_BUFFER(&pkt->kh_span), n_header); } /* do we have a value payload still? */ if (n_body_value) { pkt->flags |= MCREQ_F_HASVALUE | MCREQ_F_VALUE_NOCOPY; if (IOVCURSOR_HAS_CONTIG(mincur, n_body_value)) { span_from_first(mincur, n_body_value, &pkt->u_value.single); } else { /* body is fragmented */ iovcursor_adv_iovalloc(mincur, n_body_value, (nb_IOV **)&pkt->u_value.multi.iov, &pkt->u_value.multi.niov); pkt->u_value.multi.total_length = n_body_value; pkt->flags |= MCREQ_F_VALUE_IOV; } } } /* Copy the first 24 bytes into the header span */ memcpy(SPAN_BUFFER(&pkt->kh_span), hdr.bytes, sizeof hdr.bytes); *pkt_p = pkt; *pl_p = pl; /* Set the UFWD flag. This causes the rest of the system to invoke the * handler for the raw response, rather than the "Contiguous" structures*/ pkt->flags |= MCREQ_F_UFWD; mcreq_sched_add(pl, pkt); return LCB_SUCCESS; }
LIBCOUCHBASE_API lcb_error_t lcb_rget3(lcb_t instance, const void *cookie, const lcb_CMDGETREPLICA *cmd) { /** * Because we need to direct these commands to specific servers, we can't * just use the 'basic_packet()' function. */ mc_CMDQUEUE *cq = &instance->cmdq; int vbid, ixtmp; protocol_binary_request_header req; unsigned r0, r1 = 0; rget_cookie *rck = NULL; if (LCB_KEYBUF_IS_EMPTY(&cmd->key)) { return LCB_EMPTY_KEY; } if (!cq->config) { return LCB_CLIENT_ETMPFAIL; } if (!LCBT_NREPLICAS(instance)) { return LCB_NO_MATCHING_SERVER; } mcreq_map_key(cq, &cmd->key, &cmd->_hashkey, MCREQ_PKT_BASESIZE, &vbid, &ixtmp); /* The following blocks will also validate that the entire index range is * valid. This is in order to ensure that we don't allocate the cookie * if there aren't enough replicas online to satisfy the requirements */ if (cmd->strategy == LCB_REPLICA_SELECT) { r0 = r1 = cmd->index; if ((ixtmp = lcbvb_vbreplica(cq->config, vbid, r0)) < 0) { return LCB_NO_MATCHING_SERVER; } } else if (cmd->strategy == LCB_REPLICA_ALL) { unsigned ii; r0 = 0; r1 = LCBT_NREPLICAS(instance); /* Make sure they're all online */ for (ii = 0; ii < LCBT_NREPLICAS(instance); ii++) { if ((ixtmp = lcbvb_vbreplica(cq->config, vbid, ii)) < 0) { return LCB_NO_MATCHING_SERVER; } } } else { for (r0 = 0; r0 < LCBT_NREPLICAS(instance); r0++) { if ((ixtmp = lcbvb_vbreplica(cq->config, vbid, r0)) > -1) { r1 = r0; break; } } if (r0 == LCBT_NREPLICAS(instance)) { return LCB_NO_MATCHING_SERVER; } } if (r1 < r0 || r1 >= cq->npipelines) { return LCB_NO_MATCHING_SERVER; } /* Initialize the cookie */ rck = calloc(1, sizeof(*rck)); rck->base.cookie = cookie; rck->base.start = gethrtime(); rck->base.procs = &rget_procs; rck->strategy = cmd->strategy; rck->r_cur = r0; rck->r_max = LCBT_NREPLICAS(instance); rck->instance = instance; rck->vbucket = vbid; /* Initialize the packet */ req.request.magic = PROTOCOL_BINARY_REQ; req.request.opcode = PROTOCOL_BINARY_CMD_GET_REPLICA; req.request.datatype = PROTOCOL_BINARY_RAW_BYTES; req.request.vbucket = htons((lcb_uint16_t)vbid); req.request.cas = 0; req.request.extlen = 0; req.request.keylen = htons((lcb_uint16_t)cmd->key.contig.nbytes); req.request.bodylen = htonl((lcb_uint32_t)cmd->key.contig.nbytes); do { int curix; mc_PIPELINE *pl; mc_PACKET *pkt; curix = lcbvb_vbreplica(cq->config, vbid, r0); /* XXX: this is always expected to be in range. For the FIRST mode * it will seek to the first valid index (checked above), and for the * ALL mode, it will fail if not all replicas are already online * (also checked above) */ pl = cq->pipelines[curix]; pkt = mcreq_allocate_packet(pl); if (!pkt) { return LCB_CLIENT_ENOMEM; } pkt->u_rdata.exdata = &rck->base; pkt->flags |= MCREQ_F_REQEXT; mcreq_reserve_key(pl, pkt, sizeof(req.bytes), &cmd->key); req.request.opaque = pkt->opaque; rck->remaining++; mcreq_write_hdr(pkt, &req); mcreq_sched_add(pl, pkt); } while (++r0 < r1); return LCB_SUCCESS; }