void read_timeout_cb(rw_request* rw) { if (! rw->from.any) { return; // lost race against dup-res callback } switch (rw->origin) { case FROM_CLIENT: as_msg_send_reply(rw->from.proto_fd_h, AS_ERR_TIMEOUT, 0, 0, NULL, NULL, 0, rw->rsv.ns, rw_request_trid(rw)); // Timeouts aren't included in histograms. client_read_update_stats(rw->rsv.ns, AS_ERR_TIMEOUT); break; case FROM_PROXY: if (rw_request_is_batch_sub(rw)) { from_proxy_batch_sub_read_update_stats(rw->rsv.ns, AS_ERR_TIMEOUT); } else { from_proxy_read_update_stats(rw->rsv.ns, AS_ERR_TIMEOUT); } break; case FROM_BATCH: as_batch_add_error(rw->from.batch_shared, rw->from_data.batch_index, AS_ERR_TIMEOUT); // Timeouts aren't included in histograms. batch_sub_read_update_stats(rw->rsv.ns, AS_ERR_TIMEOUT); break; default: cf_crash(AS_RW, "unexpected transaction origin %u", rw->origin); break; } rw->from.any = NULL; // inform other callback it lost the race }
// Process one queue's batch requests. void* batch_process_queue(void* q_to_wait_on) { cf_queue* worker_queue = (cf_queue*)q_to_wait_on; batch_transaction btr; uint64_t start; while (1) { if (cf_queue_pop(worker_queue, &btr, CF_QUEUE_FOREVER) != 0) { cf_crash(AS_BATCH, "Failed to pop from batch worker queue."); } // Check for timeouts. if (btr.end_time != 0 && cf_getns() > btr.end_time) { cf_atomic_int_incr(&g_config.batch_timeout); if (btr.fd_h) { as_msg_send_reply(btr.fd_h, AS_PROTO_RESULT_FAIL_TIMEOUT, 0, 0, 0, 0, 0, 0, 0, btr.trid, NULL); btr.fd_h = 0; } batch_transaction_done(&btr); continue; } // Process batch request. start = cf_getns(); batch_process_request(&btr); histogram_insert_data_point(g_config.batch_q_process_hist, start); } return 0; }
void as_transaction_error(as_transaction* tr, uint32_t error_code) { if (tr->proto_fd_h) { if (tr->batch_shared) { as_batch_add_error(tr->batch_shared, tr->batch_index, error_code); // Clear this transaction's msgp so calling code does not free it. tr->msgp = 0; } else { as_msg_send_reply(tr->proto_fd_h, error_code, 0, 0, NULL, NULL, 0, NULL, NULL, as_transaction_trid(tr), NULL); tr->proto_fd_h = 0; MICROBENCHMARK_HIST_INSERT_P(error_hist); cf_atomic_int_incr(&g_config.err_tsvc_requests); if (error_code == AS_PROTO_RESULT_FAIL_TIMEOUT) { cf_atomic_int_incr(&g_config.err_tsvc_requests_timeout); } } } else if (tr->proxy_msg) { as_proxy_send_response(tr->proxy_node, tr->proxy_msg, error_code, 0, 0, NULL, NULL, 0, NULL, as_transaction_trid(tr), NULL); tr->proxy_msg = NULL; } else if (tr->udata.req_udata) { if (udf_rw_needcomplete(tr)) { udf_rw_complete(tr, error_code, __FILE__,__LINE__); } } }
void send_read_response(as_transaction* tr, as_msg_op** ops, as_bin** response_bins, uint16_t n_bins, cf_dyn_buf* db) { // Paranoia - shouldn't get here on losing race with timeout. if (! tr->from.any) { cf_warning(AS_RW, "transaction origin %u has null 'from'", tr->origin); return; } // Note - if tr was setup from rw, rw->from.any has been set null and // informs timeout it lost the race. switch (tr->origin) { case FROM_CLIENT: BENCHMARK_NEXT_DATA_POINT(tr, read, local); if (db && db->used_sz != 0) { as_msg_send_ops_reply(tr->from.proto_fd_h, db); } else { as_msg_send_reply(tr->from.proto_fd_h, tr->result_code, tr->generation, tr->void_time, ops, response_bins, n_bins, tr->rsv.ns, as_transaction_trid(tr)); } BENCHMARK_NEXT_DATA_POINT(tr, read, response); HIST_TRACK_ACTIVATE_INSERT_DATA_POINT(tr, read_hist); client_read_update_stats(tr->rsv.ns, tr->result_code); break; case FROM_PROXY: if (db && db->used_sz != 0) { as_proxy_send_ops_response(tr->from.proxy_node, tr->from_data.proxy_tid, db); } else { as_proxy_send_response(tr->from.proxy_node, tr->from_data.proxy_tid, tr->result_code, tr->generation, tr->void_time, ops, response_bins, n_bins, tr->rsv.ns, as_transaction_trid(tr)); } if (as_transaction_is_batch_sub(tr)) { from_proxy_batch_sub_read_update_stats(tr->rsv.ns, tr->result_code); } else { from_proxy_read_update_stats(tr->rsv.ns, tr->result_code); } break; case FROM_BATCH: BENCHMARK_NEXT_DATA_POINT(tr, batch_sub, read_local); as_batch_add_result(tr, n_bins, response_bins, ops); BENCHMARK_NEXT_DATA_POINT(tr, batch_sub, response); batch_sub_read_update_stats(tr->rsv.ns, tr->result_code); break; default: cf_crash(AS_RW, "unexpected transaction origin %u", tr->origin); break; } tr->from.any = NULL; // pattern, not needed }
void as_transaction_demarshal_error(as_transaction* tr, uint32_t error_code) { as_msg_send_reply(tr->proto_fd_h, error_code, 0, 0, NULL, NULL, 0, NULL, NULL, 0, NULL); tr->proto_fd_h = NULL; cf_free(tr->msgp); tr->msgp = NULL; }
// TODO - deprecate this when swap is moved out into thr_demarshal.c! void as_transaction_error_unswapped(as_transaction* tr, uint32_t error_code) { if (tr->batch_shared) { as_batch_add_error(tr->batch_shared, tr->batch_index, error_code); // Clear this transaction's msgp so calling code does not free it. tr->msgp = 0; } else { as_msg_send_reply(tr->proto_fd_h, error_code, 0, 0, NULL, NULL, 0, NULL, NULL, 0, NULL); tr->proto_fd_h = 0; MICROBENCHMARK_HIST_INSERT_P(error_hist); cf_atomic_int_incr(&g_config.err_tsvc_requests); if (error_code == AS_PROTO_RESULT_FAIL_TIMEOUT) { cf_atomic_int_incr(&g_config.err_tsvc_requests_timeout); } } }
// Process one queue's batch requests. static void batch_worker(void* udata) { batch_transaction* btr = (batch_transaction*)udata; // Check for timeouts. if (btr->end_time != 0 && cf_getns() > btr->end_time) { cf_atomic_int_incr(&g_config.batch_timeout); if (btr->fd_h) { as_msg_send_reply(btr->fd_h, AS_PROTO_RESULT_FAIL_TIMEOUT, 0, 0, 0, 0, 0, 0, 0, btr->trid, NULL); btr->fd_h = 0; } batch_transaction_done(btr, false); return; } // Process batch request. uint64_t start = cf_getns(); batch_process_request(btr); histogram_insert_data_point(g_config.batch_q_process_hist, start); }
int as_msg_send_error(as_file_handle *fd_h, uint32_t result_code) { return as_msg_send_reply(fd_h, result_code, 0, 0, NULL, NULL, 0, NULL, NULL, 0, NULL); }