static int do_recv(struct libvchan *ctrl, void *data, size_t size) { int real_idx = rd_cons(ctrl) & (rd_ring_size(ctrl) - 1); int avail_contig = rd_ring_size(ctrl) - real_idx; if (avail_contig > size) avail_contig = size; barrier(); // data read must happen after rd_cons read memcpy(data, rd_ring(ctrl) + real_idx, avail_contig); if (avail_contig < size) { // we rolled across the end of the ring memcpy(data + avail_contig, rd_ring(ctrl), size - avail_contig); } rd_cons(ctrl) += size; if (VCHAN_DEBUG) { char metainfo[32]; struct iovec iov[2]; iov[0].iov_base = metainfo; iov[0].iov_len = snprintf(metainfo, 32, "vchan rd %d/%d", ctrl->other_domain_id, ctrl->device_number); iov[1].iov_base = data; iov[1].iov_len = size; writev(-1, iov, 2); } barrier(); // consumption must happen prior to notify of newly freed space if (do_notify(ctrl) < 0) return -1; return size; }
/** * Get the amount of buffer space available and enable notifications if needed. */ static inline int fast_get_data_ready(struct libxenvchan *ctrl, size_t request) { int ready = rd_prod(ctrl) - rd_cons(ctrl); if (ready >= request) return ready; /* We plan to consume all data; please tell us if you send more */ request_notify(ctrl, VCHAN_NOTIFY_WRITE); /* * If the writer moved rd_prod after our read but before request, we * will not get notified even though the actual amount of data ready is * above request. Reread rd_prod to cover this case. */ return rd_prod(ctrl) - rd_cons(ctrl); }
int libxenvchan_data_ready(struct libxenvchan *ctrl) { /* Since this value is being used outside libxenvchan, request notification * when it changes */ request_notify(ctrl, VCHAN_NOTIFY_WRITE); return rd_prod(ctrl) - rd_cons(ctrl); }
/* * Get the amount of buffer space available, and do nothing about * notifications. */ static inline int raw_get_data_ready(struct libxenvchan *ctrl) { uint32_t ready = rd_prod(ctrl) - rd_cons(ctrl); if (ready >= rd_ring_size(ctrl)) /* We have no way to return errors. Locking up the ring is * better than the alternatives. */ return 0; return ready; }
CSTUB_FN(unsigned long, lock_component_alloc) (struct usr_inv_cap *uc, spdid_t spdid) { long fault; unsigned long ret; struct rec_data_lk *rd = NULL; unsigned long ser_lkid, cli_lkid; if (first == 0) { cos_map_init_static(&uniq_lkids); first = 1; } #ifdef BENCHMARK_MEAS_CREATION_TIME rdtscll(meas_start); #endif redo: #ifdef BENCHMARK_MEAS_ALLOC rdtscll(meas_end); if (test_flag) { test_flag = 0; printc("recovery a lock cost: %llu\n", meas_end - meas_start); } #endif CSTUB_INVOKE(ret, fault, uc, 1, spdid); if (unlikely (fault)){ #ifdef BENCHMARK_MEAS_ALLOC test_flag = 1; rdtscll(meas_start); #endif CSTUB_FAULT_UPDATE(); goto redo; } assert(ret > 0); cli_lkid = rdlk_alloc(); assert(cli_lkid >= 1); rd = rdlk_lookup(cli_lkid); assert(rd); rd_cons(rd, cos_spd_id(), cli_lkid, ret, LOCK_ALLOC); ret = cli_lkid; #ifdef BENCHMARK_MEAS_CREATION_TIME rdtscll(meas_end); printc("creating a lock costs %llu\n", meas_end - meas_start); #endif return ret; }
static int do_recv(struct libxenvchan *ctrl, void *data, size_t size) { int real_idx = rd_cons(ctrl) & (rd_ring_size(ctrl) - 1); int avail_contig = rd_ring_size(ctrl) - real_idx; if (avail_contig > size) avail_contig = size; xen_rmb(); /* data read must happen /after/ rd_cons read */ memcpy(data, rd_ring(ctrl) + real_idx, avail_contig); if (avail_contig < size) { // we rolled across the end of the ring memcpy(data + avail_contig, rd_ring(ctrl), size - avail_contig); } xen_mb(); /* consume /then/ notify */ rd_cons(ctrl) += size; if (send_notify(ctrl, VCHAN_NOTIFY_READ)) return -1; return size; }
CSTUB_FN(td_t, tsplit)(struct usr_inv_cap *uc, spdid_t spdid, td_t parent_tid, char * param, int len, tor_flags_t tflags, long evtid) { long fault = 0; td_t ret; struct __sg_tsplit_data *d = NULL; cbuf_t cb = 0; struct rec_data_tor *rd = NULL; int sz = len + sizeof(struct __sg_tsplit_data); td_t curr_ptid = 0; td_t cli_tid = 0; td_t ser_tid = 0; /* printc("cli: tsplit passed in param %s\n", param); */ assert(parent_tid >= 1); assert(param && len >= 0); assert(param[len] == '\0'); if (first == 0) { cos_map_init_static(&uniq_tids); first = 1; } redo: /* printc("<<< cli rtorrent: call tsplit (thread %d, spd %ld and parent tid %d) >>>\n", */ /* cos_get_thd_id(), cos_spd_id(), parent_tid); */ rd = rd_update(parent_tid, STATE_TSPLIT_PARENT); if (rd) { curr_ptid = rd->s_tid; } else { curr_ptid = parent_tid; } /* printc("<<< In: call tsplit (thread %d, , spd %ld and curr_parent tid %d) >>>\n", */ /* cos_get_thd_id(), cos_spd_id(), curr_ptid); */ d = cbuf_alloc(sz, &cb); assert(d); if (!d) return -1; d->parent_tid = curr_ptid; d->tflags = tflags; d->evtid = evtid; d->len[0] = 0; d->len[1] = len; memcpy(&d->data[0], param, len + 1); CSTUB_INVOKE(ret, fault, uc, 3, spdid, cb, sz); if (unlikely(fault)) { CSTUB_FAULT_UPDATE(); memset(&d->data[0], 0, len); cbuf_free(cb); printc("tsplit found a fault and ready to go to redo\n"); goto redo; } cbuf_free(cb); ser_tid = ret; /* printc("passed in param %s (ser_tid %d)\n", param, ser_tid); */ assert(ser_tid >= 1); char *l_param = ""; if (len > 0) { l_param = param_save(param, len); assert(l_param); } cli_tid = map_rd_create(); assert(cli_tid >= 2); rd = map_rd_lookup(cli_tid); assert(rd); rd_cons(rd, curr_ptid, cli_tid, ser_tid, l_param, len, tflags, evtid); /* printc("tsplit done!!! return new client tid %d\n\n", cli_tid); */ return cli_tid; }
int libvchan_data_ready(struct libvchan *ctrl) { return rd_prod(ctrl) - rd_cons(ctrl); }