CAMLprim value ml_interface_read(value ml_interface, value ml_buffer, value ml_len) { CAMLparam3(ml_interface, ml_buffer, ml_len); CAMLlocal1(ml_result); struct mmap_interface *interface = GET_C_STRUCT(ml_interface); char *buffer = String_val(ml_buffer); int len = Int_val(ml_len); int result; struct xenstore_domain_interface *intf = interface->addr; XENSTORE_RING_IDX cons, prod; /* offsets only */ int total_data, data; uint32_t connection; cons = *(volatile uint32_t*)&intf->req_cons; prod = *(volatile uint32_t*)&intf->req_prod; connection = *(volatile uint32_t*)&intf->connection; if (connection != XENSTORE_CONNECTED) caml_raise_constant(*caml_named_value("Xb.Reconnect")); xen_mb(); if ((prod - cons) > XENSTORE_RING_SIZE) caml_failwith("bad connection"); /* Check for any pending data at all. */ total_data = prod - cons; if (total_data == 0) { /* No pending data at all. */ result = 0; goto exit; } else if (total_data < len) /* Some data - make a partial read. */ len = total_data; /* Check whether data crosses the end of the ring. */ data = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if (len < data) /* Data within the remaining part of the ring. */ memcpy(buffer, intf->req + MASK_XENSTORE_IDX(cons), len); else { /* Data crosses the ring boundary. Read both halves. */ memcpy(buffer, intf->req + MASK_XENSTORE_IDX(cons), data); memcpy(buffer + data, intf->req, len - data); } xen_mb(); intf->req_cons += len; result = len; exit: ml_result = Val_int(result); CAMLreturn(ml_result); }
CAMLprim value ml_interface_write(value ml_interface, value ml_buffer, value ml_len) { CAMLparam3(ml_interface, ml_buffer, ml_len); CAMLlocal1(ml_result); struct mmap_interface *interface = GET_C_STRUCT(ml_interface); char *buffer = String_val(ml_buffer); int len = Int_val(ml_len); int result; struct xenstore_domain_interface *intf = interface->addr; XENSTORE_RING_IDX cons, prod; int total_space, space; uint32_t connection; cons = *(volatile uint32_t*)&intf->rsp_cons; prod = *(volatile uint32_t*)&intf->rsp_prod; connection = *(volatile uint32_t*)&intf->connection; if (connection != XENSTORE_CONNECTED) caml_raise_constant(*caml_named_value("Xb.Reconnect")); xen_mb(); if ((prod - cons) > XENSTORE_RING_SIZE) caml_failwith("bad connection"); /* Check for space to write the full message. */ total_space = XENSTORE_RING_SIZE - (prod - cons); if (total_space == 0) { /* No space at all - exit having done nothing. */ result = 0; goto exit; } else if (total_space < len) /* Some space - make a partial write. */ len = total_space; /* Check for space until the ring wraps. */ space = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if (len < space) /* Message fits inside the remaining part of the ring. */ memcpy(intf->rsp + MASK_XENSTORE_IDX(prod), buffer, len); else { /* Message wraps around the end of the ring. Write both halves. */ memcpy(intf->rsp + MASK_XENSTORE_IDX(prod), buffer, space); memcpy(intf->rsp, buffer + space, len - space); } xen_mb(); intf->rsp_prod += len; result = len; exit: ml_result = Val_int(result); CAMLreturn(ml_result); }
CAMLprim value ml_interface_read(value ml_interface, value ml_buffer, value ml_len) { CAMLparam3(ml_interface, ml_buffer, ml_len); CAMLlocal1(ml_result); struct mmap_interface *interface = GET_C_STRUCT(ml_interface); char *buffer = String_val(ml_buffer); int len = Int_val(ml_len); int result; struct xenstore_domain_interface *intf = interface->addr; XENSTORE_RING_IDX cons, prod; /* offsets only */ int to_read; uint32_t connection; cons = *(volatile uint32_t*)&intf->req_cons; prod = *(volatile uint32_t*)&intf->req_prod; connection = *(volatile uint32_t*)&intf->connection; if (connection != XENSTORE_CONNECTED) caml_raise_constant(*caml_named_value("Xb.Reconnect")); xen_mb(); if ((prod - cons) > XENSTORE_RING_SIZE) caml_failwith("bad connection"); if (prod == cons) { result = 0; goto exit; } cons = MASK_XENSTORE_IDX(cons); prod = MASK_XENSTORE_IDX(prod); if (prod > cons) to_read = prod - cons; else to_read = XENSTORE_RING_SIZE - cons; if (to_read < len) len = to_read; memcpy(buffer, intf->req + cons, len); xen_mb(); intf->req_cons += len; result = len; exit: ml_result = Val_int(result); CAMLreturn(ml_result); }
CAMLprim value caml_sring_set_req_event(value v_sring, value v_req_cons) { struct sring *sring = SRING_VAL(v_sring); sring->req_event = Int_val(v_req_cons); xen_mb(); return Val_unit; }
CAMLprim value ml_interface_write(value ml_interface, value ml_buffer, value ml_len) { CAMLparam3(ml_interface, ml_buffer, ml_len); CAMLlocal1(ml_result); struct mmap_interface *interface = GET_C_STRUCT(ml_interface); char *buffer = String_val(ml_buffer); int len = Int_val(ml_len); int result; struct xenstore_domain_interface *intf = interface->addr; XENSTORE_RING_IDX cons, prod; int can_write; uint32_t connection; cons = *(volatile uint32_t*)&intf->rsp_cons; prod = *(volatile uint32_t*)&intf->rsp_prod; connection = *(volatile uint32_t*)&intf->connection; if (connection != XENSTORE_CONNECTED) caml_raise_constant(*caml_named_value("Xb.Reconnect")); xen_mb(); if ( (prod - cons) >= XENSTORE_RING_SIZE ) { result = 0; goto exit; } if (MASK_XENSTORE_IDX(prod) >= MASK_XENSTORE_IDX(cons)) can_write = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); else can_write = MASK_XENSTORE_IDX(cons) - MASK_XENSTORE_IDX(prod); if (can_write < len) len = can_write; memcpy(intf->rsp + MASK_XENSTORE_IDX(prod), buffer, len); xen_mb(); intf->rsp_prod += len; result = len; exit: ml_result = Val_int(result); CAMLreturn(ml_result); }
static void buffer_append(struct XenConsole *con) { struct buffer *buffer = &con->buffer; XENCONS_RING_IDX cons, prod, size; struct xencons_interface *intf = con->sring; cons = intf->out_cons; prod = intf->out_prod; xen_mb(); size = prod - cons; if ((size == 0) || (size > sizeof(intf->out))) return; if ((buffer->capacity - buffer->size) < size) { buffer->capacity += (size + 1024); buffer->data = g_realloc(buffer->data, buffer->capacity); } while (cons != prod) buffer->data[buffer->size++] = intf->out[ MASK_XENCONS_IDX(cons++, intf->out)]; xen_mb(); intf->out_cons = cons; xen_pv_send_notify(&con->xendev); if (buffer->max_capacity && buffer->size > buffer->max_capacity) { /* Discard the middle of the data. */ size_t over = buffer->size - buffer->max_capacity; uint8_t *maxpos = buffer->data + buffer->max_capacity; memmove(maxpos - over, maxpos, over); buffer->data = g_realloc(buffer->data, buffer->max_capacity); buffer->size = buffer->capacity = buffer->max_capacity; if (buffer->consumed > buffer->max_capacity - over) buffer->consumed = buffer->max_capacity - over; } }
static inline int send_notify(struct libxenvchan *ctrl, uint8_t bit) { uint8_t *notify, prev; xen_mb(); /* caller updates indexes /before/ we decode to notify */ notify = ctrl->is_server ? &ctrl->ring->srv_notify : &ctrl->ring->cli_notify; prev = __sync_fetch_and_and(notify, ~bit); if (prev & bit) return xc_evtchn_notify(ctrl->event, ctrl->event_port); else return 0; }
static void xenfb_on_fb_event(struct xenfb *xenfb) { uint32_t prod, cons; struct xenfb_page *page = xenfb->fb.page; prod = page->out_prod; if (prod == page->out_cons) return; xen_rmb(); /* ensure we see ring contents up to prod */ for (cons = page->out_cons; cons != prod; cons++) { union xenfb_out_event *event = &XENFB_OUT_RING_REF(page, cons); int x, y, w, h; switch (event->type) { case XENFB_TYPE_UPDATE: x = MAX(event->update.x, 0); y = MAX(event->update.y, 0); w = MIN(event->update.width, xenfb->width - x); h = MIN(event->update.height, xenfb->height - y); if (w < 0 || h < 0) { fprintf(stderr, "%s bogus update ignored\n", xenfb->fb.nodename); break; } if (x != event->update.x || y != event->update.y || w != event->update.width || h != event->update.height) { fprintf(stderr, "%s bogus update clipped\n", xenfb->fb.nodename); } xenfb_guest_copy(xenfb, x, y, w, h); break; case XENFB_TYPE_RESIZE: if (xenfb_configure_fb(xenfb, xenfb->fb_len, event->resize.width, event->resize.height, event->resize.depth, xenfb->fb_len, event->resize.offset, event->resize.stride) < 0) break; if (xenfb->ds->dpy_resize_shared) dpy_resize_shared(xenfb->ds, xenfb->width, xenfb->height, xenfb->depth, xenfb->row_stride, xenfb->pixels + xenfb->offset); else dpy_resize(xenfb->ds, xenfb->width, xenfb->height); xenfb_invalidate(xenfb); break; } } xen_mb(); /* ensure we're done with ring contents */ page->out_cons = cons; xc_evtchn_notify(xenfb->evt_xch, xenfb->fb.port); }
static void xenfb_send_event(struct xenfb *xenfb, union xenfb_in_event *event) { uint32_t prod; struct xenfb_page *page = xenfb->fb.page; prod = page->in_prod; /* caller ensures !xenfb_queue_full() */ xen_mb(); /* ensure ring space available */ XENFB_IN_RING_REF(page, prod) = *event; xen_wmb(); /* ensure ring contents visible */ page->in_prod = prod + 1; xc_evtchn_notify(xenfb->evt_xch, xenfb->fb.port); }
static void xenfb_send_event(struct XenFB *xenfb, union xenfb_in_event *event) { uint32_t prod; struct xenfb_page *page = xenfb->c.page; prod = page->in_prod; /* caller ensures !xenfb_queue_full() */ xen_mb(); /* ensure ring space available */ XENFB_IN_RING_REF(page, prod) = *event; xen_wmb(); /* ensure ring contents visible */ page->in_prod = prod + 1; xen_be_send_notify(&xenfb->c.xendev); }
static int ring_free_bytes(struct XenConsole *con) { struct xencons_interface *intf = con->sring; XENCONS_RING_IDX cons, prod, space; cons = intf->in_cons; prod = intf->in_prod; xen_mb(); space = prod - cons; if (space > sizeof(intf->in)) return 0; /* ring is screwed: ignore it */ return (sizeof(intf->in) - space); }
CAMLprim value ml_interface_close(value interface) { CAMLparam1(interface); struct xenstore_domain_interface *intf = GET_C_STRUCT(interface)->addr; int i; intf->req_cons = intf->req_prod = intf->rsp_cons = intf->rsp_prod = 0; /* Ensure the unused space is full of invalid xenstore packets. */ for (i = 0; i < XENSTORE_RING_SIZE; i++) { intf->req[i] = 0xff; /* XS_INVALID = 0xffff */ intf->rsp[i] = 0xff; } xen_mb (); intf->connection = XENSTORE_CONNECTED; CAMLreturn(Val_unit); }
static int do_recv(struct libxenvchan *ctrl, void *data, size_t size) { int real_idx = rd_cons(ctrl) & (rd_ring_size(ctrl) - 1); int avail_contig = rd_ring_size(ctrl) - real_idx; if (avail_contig > size) avail_contig = size; xen_rmb(); /* data read must happen /after/ rd_cons read */ memcpy(data, rd_ring(ctrl) + real_idx, avail_contig); if (avail_contig < size) { // we rolled across the end of the ring memcpy(data + avail_contig, rd_ring(ctrl), size - avail_contig); } xen_mb(); /* consume /then/ notify */ rd_cons(ctrl) += size; if (send_notify(ctrl, VCHAN_NOTIFY_READ)) return -1; return size; }
/** * returns -1 on error, or size on success */ static int do_send(struct libxenvchan *ctrl, const void *data, size_t size) { int real_idx = wr_prod(ctrl) & (wr_ring_size(ctrl) - 1); int avail_contig = wr_ring_size(ctrl) - real_idx; if (avail_contig > size) avail_contig = size; xen_mb(); /* read indexes /then/ write data */ memcpy(wr_ring(ctrl) + real_idx, data, avail_contig); if (avail_contig < size) { // we rolled across the end of the ring memcpy(wr_ring(ctrl), data + avail_contig, size - avail_contig); } xen_wmb(); /* write data /then/ notify */ wr_prod(ctrl) += size; if (send_notify(ctrl, VCHAN_NOTIFY_WRITE)) return -1; return size; }
/* Send an event to the keyboard frontend driver */ static int xenfb_kbd_event(struct xenfb *xenfb, union xenkbd_in_event *event) { uint32_t prod; struct xenkbd_page *page = xenfb->kbd.page; if (xenfb->kbd.state != XenbusStateConnected) return 0; prod = page->in_prod; if (prod - page->in_cons == XENKBD_IN_RING_LEN) { errno = EAGAIN; return -1; } xen_mb(); /* ensure ring space available */ XENKBD_IN_RING_REF(page, prod) = *event; xen_wmb(); /* ensure ring contents visible */ page->in_prod = prod + 1; return xc_evtchn_notify(xenfb->evt_xch, xenfb->kbd.port); }
/* Send an event to the keyboard frontend driver */ static int xenfb_kbd_event(struct XenInput *xenfb, union xenkbd_in_event *event) { struct xenkbd_page *page = xenfb->c.page; uint32_t prod; if (xenfb->c.xendev.be_state != XenbusStateConnected) return 0; if (!page) return 0; prod = page->in_prod; if (prod - page->in_cons == XENKBD_IN_RING_LEN) { errno = EAGAIN; return -1; } xen_mb(); /* ensure ring space available */ XENKBD_IN_RING_REF(page, prod) = *event; xen_wmb(); /* ensure ring contents visible */ page->in_prod = prod + 1; return xen_be_send_notify(&xenfb->c.xendev); }
/** * monitor_tbufs - monitor the contents of tbufs */ static int monitor_tbufs(void) { int i; struct t_struct *tbufs; /* Pointer to hypervisor maps */ struct t_buf **meta; /* pointers to the trace buffer metadata */ unsigned char **data; /* pointers to the trace buffer data areas * where they are mapped into user space. */ unsigned long tbufs_mfn; /* mfn of the tbufs */ unsigned int num; /* number of trace buffers / logical CPUS */ unsigned long tinfo_size; /* size of t_info metadata map */ unsigned long size; /* size of a single trace buffer */ unsigned long data_size, rec_size; /* get number of logical CPUs (and therefore number of trace buffers) */ num = get_num_cpus(); init_current(num); alloc_qos_data(num); printf("CPU Frequency = %7.2f\n", opts.cpu_freq); /* setup access to trace buffers */ get_tbufs(&tbufs_mfn, &tinfo_size); tbufs = map_tbufs(tbufs_mfn, num, tinfo_size); size = tbufs->t_info->tbuf_size * XC_PAGE_SIZE; data_size = size - sizeof(struct t_buf); meta = tbufs->meta; data = tbufs->data; if ( eventchn_init() < 0 ) fprintf(stderr, "Failed to initialize event channel; " "Using POLL method\r\n"); /* now, scan buffers for events */ while ( !interrupted ) { for ( i = 0; (i < num) && !interrupted; i++ ) { unsigned long start_offset, end_offset, cons, prod; cons = meta[i]->cons; prod = meta[i]->prod; xen_rmb(); /* read prod, then read item. */ if ( cons == prod ) continue; start_offset = cons % data_size; end_offset = prod % data_size; if ( start_offset >= end_offset ) { while ( start_offset != data_size ) { rec_size = process_record( i, (struct t_rec *)(data[i] + start_offset)); start_offset += rec_size; } start_offset = 0; } while ( start_offset != end_offset ) { rec_size = process_record( i, (struct t_rec *)(data[i] + start_offset)); start_offset += rec_size; } xen_mb(); /* read item, then update cons. */ meta[i]->cons = prod; } wait_for_event(); wakeups++; } /* cleanup */ free(meta); free(data); /* don't need to munmap - cleanup is automatic */ return 0; }
static void xenfb_handle_events(struct XenFB *xenfb) { uint32_t prod, cons; struct xenfb_page *page = xenfb->c.page; prod = page->out_prod; if (prod == page->out_cons) return; xen_rmb(); /* ensure we see ring contents up to prod */ for (cons = page->out_cons; cons != prod; cons++) { union xenfb_out_event *event = &XENFB_OUT_RING_REF(page, cons); int x, y, w, h; switch (event->type) { case XENFB_TYPE_UPDATE: if (xenfb->up_count == UP_QUEUE) xenfb->up_fullscreen = 1; if (xenfb->up_fullscreen) break; x = MAX(event->update.x, 0); y = MAX(event->update.y, 0); w = MIN(event->update.width, xenfb->width - x); h = MIN(event->update.height, xenfb->height - y); if (w < 0 || h < 0) { xen_be_printf(&xenfb->c.xendev, 1, "bogus update ignored\n"); break; } if (x != event->update.x || y != event->update.y || w != event->update.width || h != event->update.height) { xen_be_printf(&xenfb->c.xendev, 1, "bogus update clipped\n"); } if (w == xenfb->width && h > xenfb->height / 2) { /* scroll detector: updated more than 50% of the lines, * don't bother keeping track of the rectangles then */ xenfb->up_fullscreen = 1; } else { xenfb->up_rects[xenfb->up_count].x = x; xenfb->up_rects[xenfb->up_count].y = y; xenfb->up_rects[xenfb->up_count].w = w; xenfb->up_rects[xenfb->up_count].h = h; xenfb->up_count++; } break; #ifdef XENFB_TYPE_RESIZE case XENFB_TYPE_RESIZE: if (xenfb_configure_fb(xenfb, xenfb->fb_len, event->resize.width, event->resize.height, event->resize.depth, xenfb->fb_len, event->resize.offset, event->resize.stride) < 0) break; xenfb_invalidate(xenfb); break; #endif } } xen_mb(); /* ensure we're done with ring contents */ page->out_cons = cons; }
CAMLprim value caml_sring_rsp_event(value v_sring) { xen_mb (); return Val_int(SRING_VAL(v_sring)->rsp_event); }
static inline void request_notify(struct libxenvchan *ctrl, uint8_t bit) { uint8_t *notify = ctrl->is_server ? &ctrl->ring->cli_notify : &ctrl->ring->srv_notify; __sync_or_and_fetch(notify, bit); xen_mb(); /* post the request /before/ caller re-reads any indexes */ }