Beispiel #1
0
/* Helper functions: copy data in and out of the ring */
static void ring_write(const char *data, uint32_t len)
{
    uint32_t part;

    ASSERT(len <= XENSTORE_PAYLOAD_MAX);

    while ( len )
    {
        /* Don't overrun the consumer pointer */
        while ( (part = (XENSTORE_RING_SIZE - 1) -
                 MASK_XENSTORE_IDX(rings->req_prod - rings->req_cons)) == 0 )
            ring_wait();
        /* Don't overrun the end of the ring */
        if ( part > (XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(rings->req_prod)) )
            part = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(rings->req_prod);
        /* Don't write more than we were asked for */
        if ( part > len ) 
            part = len;

        memcpy(rings->req + MASK_XENSTORE_IDX(rings->req_prod), data, part);
        barrier(); /* = wmb before prod write, rmb before next cons read */
        rings->req_prod += part;
        len -= part;
    }
}
Beispiel #2
0
static void ring_read(char *data, uint32_t len)
{
    uint32_t part;

    ASSERT(len <= XENSTORE_PAYLOAD_MAX);

    while ( len )
    {
        /* Don't overrun the producer pointer */
        while ( (part = MASK_XENSTORE_IDX(rings->rsp_prod -
                                          rings->rsp_cons)) == 0 )
            ring_wait();
        /* Don't overrun the end of the ring */
        if ( part > (XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(rings->rsp_cons)) )
            part = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(rings->rsp_cons);
        /* Don't read more than we were asked for */
        if ( part > len )
            part = len;

        memcpy(data, rings->rsp + MASK_XENSTORE_IDX(rings->rsp_cons), part);
        barrier(); /* = wmb before cons write, rmb before next prod read */
        rings->rsp_cons += part;
        len -= part;
    }
}
Beispiel #3
0
CAMLprim value ml_interface_write(value ml_interface,
                                  value ml_buffer,
                                  value ml_len)
{
	CAMLparam3(ml_interface, ml_buffer, ml_len);
	CAMLlocal1(ml_result);

	struct mmap_interface *interface = GET_C_STRUCT(ml_interface);
	char *buffer = String_val(ml_buffer);
	int len = Int_val(ml_len);
	int result;

	struct xenstore_domain_interface *intf = interface->addr;
	XENSTORE_RING_IDX cons, prod;
	int total_space, space;
	uint32_t connection;

	cons = *(volatile uint32_t*)&intf->rsp_cons;
	prod = *(volatile uint32_t*)&intf->rsp_prod;
	connection = *(volatile uint32_t*)&intf->connection;

	if (connection != XENSTORE_CONNECTED)
		caml_raise_constant(*caml_named_value("Xb.Reconnect"));

	xen_mb();

	if ((prod - cons) > XENSTORE_RING_SIZE)
		caml_failwith("bad connection");

	/* Check for space to write the full message. */
	total_space = XENSTORE_RING_SIZE - (prod - cons);
	if (total_space == 0) {
		/* No space at all - exit having done nothing. */
		result = 0;
		goto exit;
	}
	else if (total_space < len)
		/* Some space - make a partial write. */
		len = total_space;

	/* Check for space until the ring wraps. */
	space = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
	if (len < space)
		/* Message fits inside the remaining part of the ring. */
		memcpy(intf->rsp + MASK_XENSTORE_IDX(prod), buffer, len);
	else {
		/* Message wraps around the end of the ring. Write both halves. */
		memcpy(intf->rsp + MASK_XENSTORE_IDX(prod), buffer, space);
		memcpy(intf->rsp, buffer + space, len - space);
	}

	xen_mb();
	intf->rsp_prod += len;
	result = len;
exit:
	ml_result = Val_int(result);
	CAMLreturn(ml_result);
}
Beispiel #4
0
CAMLprim value ml_interface_read(value ml_interface,
                                 value ml_buffer,
                                 value ml_len)
{
	CAMLparam3(ml_interface, ml_buffer, ml_len);
	CAMLlocal1(ml_result);

	struct mmap_interface *interface = GET_C_STRUCT(ml_interface);
	char *buffer = String_val(ml_buffer);
	int len = Int_val(ml_len);
	int result;

	struct xenstore_domain_interface *intf = interface->addr;
	XENSTORE_RING_IDX cons, prod; /* offsets only */
	int total_data, data;
	uint32_t connection;

	cons = *(volatile uint32_t*)&intf->req_cons;
	prod = *(volatile uint32_t*)&intf->req_prod;
	connection = *(volatile uint32_t*)&intf->connection;

	if (connection != XENSTORE_CONNECTED)
		caml_raise_constant(*caml_named_value("Xb.Reconnect"));

	xen_mb();

	if ((prod - cons) > XENSTORE_RING_SIZE)
		caml_failwith("bad connection");

	/* Check for any pending data at all. */
	total_data = prod - cons;
	if (total_data == 0) {
		/* No pending data at all. */
		result = 0;
		goto exit;
	}
	else if (total_data < len)
		/* Some data - make a partial read. */
		len = total_data;

	/* Check whether data crosses the end of the ring. */
	data = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
	if (len < data)
		/* Data within the remaining part of the ring. */
		memcpy(buffer, intf->req + MASK_XENSTORE_IDX(cons), len);
	else {
		/* Data crosses the ring boundary. Read both halves. */
		memcpy(buffer, intf->req + MASK_XENSTORE_IDX(cons), data);
		memcpy(buffer + data, intf->req, len - data);
	}

	xen_mb();
	intf->req_cons += len;
	result = len;
exit:
	ml_result = Val_int(result);
	CAMLreturn(ml_result);
}
static const void *
get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
    const char *buf, uint32_t *len)
{
	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
	if ((prod - cons) < *len)
		*len = prod - cons;
	return ((void *)(buf + MASK_XENSTORE_IDX(cons)));
}
static void *
get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
    char *buf, uint32_t *len)
{
	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
	if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
		*len = XENSTORE_RING_SIZE - (prod - cons);
	return ((void *)(buf + MASK_XENSTORE_IDX(prod)));
}
Beispiel #7
0
CAMLprim value ml_interface_read(value ml_interface,
                                 value ml_buffer,
                                 value ml_len)
{
	CAMLparam3(ml_interface, ml_buffer, ml_len);
	CAMLlocal1(ml_result);

	struct mmap_interface *interface = GET_C_STRUCT(ml_interface);
	char *buffer = String_val(ml_buffer);
	int len = Int_val(ml_len);
	int result;

	struct xenstore_domain_interface *intf = interface->addr;
	XENSTORE_RING_IDX cons, prod; /* offsets only */
	int to_read;
	uint32_t connection;

	cons = *(volatile uint32_t*)&intf->req_cons;
	prod = *(volatile uint32_t*)&intf->req_prod;
	connection = *(volatile uint32_t*)&intf->connection;

	if (connection != XENSTORE_CONNECTED)
		caml_raise_constant(*caml_named_value("Xb.Reconnect"));

	xen_mb();

	if ((prod - cons) > XENSTORE_RING_SIZE)
		caml_failwith("bad connection");

	if (prod == cons) {
		result = 0;
		goto exit;
	}
	cons = MASK_XENSTORE_IDX(cons);
	prod = MASK_XENSTORE_IDX(prod);
	if (prod > cons)
		to_read = prod - cons;
	else
		to_read = XENSTORE_RING_SIZE - cons;
	if (to_read < len)
		len = to_read;
	memcpy(buffer, intf->req + cons, len);
	xen_mb();
	intf->req_cons += len;
	result = len;
exit:
	ml_result = Val_int(result);
	CAMLreturn(ml_result);
}
Beispiel #8
0
static uint32_t xenstore_write(uint32_t type, uint32_t len, void *inbuf)
{
  static uint32_t req_id = 1;
  struct xsd_sockmsg m;
  void *buffer, *cur;
  uint32_t prod;

  /* build out the header and adjust the final length */
  m.type   = type;
  m.req_id = req_id++;
  m.tx_id  = 0;
  m.len    = len;
  len += sizeof(struct xsd_sockmsg);

  /* wait until we can send out the data all at once */
  while( (XENSTORE_RING_SIZE - (xsint->req_prod - xsint->req_cons)) < len )
    runtime_block(1);
  assert( (xsint->req_prod + len - xsint->req_cons) < XENSTORE_RING_SIZE);

  /* Combine the data into one block */
  cur = buffer = malloc(len);
  memcpy(buffer, &m, sizeof(struct xsd_sockmsg));
  memcpy((void*)((uintptr_t)buffer + sizeof(struct xsd_sockmsg)), inbuf, m.len);

  /* dump it out to the ring */
  prod = xsint->req_prod;
  while(len != 0) {
    uint32_t nextbit = min(len, XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod));
    memcpy(xsint->req + MASK_XENSTORE_IDX(prod), cur, nextbit);
    prod += nextbit;
    cur   = (void*)((uintptr_t)cur + nextbit);
    len  -= nextbit;
  }

  /* notify the other size */
  wmb();
  xsint->req_prod = prod;
  channel_send(system_start_info->store_evtchn);

  /* free our buffer and return the request id */
  free(buffer);
  return m.req_id;
}
Beispiel #9
0
static uint32_t xenstore_read(uint32_t req_id, uint32_t *rtype, void **buffer)
{
  struct xsd_sockmsg m;
  char *mbuf;
  uint32_t cons, i;

  *buffer = NULL; /* safety */
  *rtype  = 0xDEADBEEF;
again:
  /* wait until there's some data available */
  while( (xsint->rsp_prod - xsint->rsp_cons) < sizeof(struct xsd_sockmsg) )
    runtime_block(1);

  /* copy off the header */
  cons = xsint->rsp_cons;
  for(i = 0; i < sizeof(struct xsd_sockmsg); i++)
    ((char*)(&m))[i] = xsint->rsp[MASK_XENSTORE_IDX(cons++)];

  /* is this the item we were looking for? */
  if(m.req_id != req_id) {
    /* no ... so ignore this message and restart */
    cons += m.len;
    xsint->rsp_cons = cons;
    goto again;
  }

  /* it is! allocate and copy off the result */
  mbuf = malloc(m.len);
  while( (xsint->rsp_prod - cons) < m.len )
    runtime_block(1);
  for(i = 0; i < m.len; i++)
    mbuf[i] = xsint->rsp[MASK_XENSTORE_IDX(cons++)];

  /* update the other size and return the buffer and length */ 
  xsint->rsp_cons = cons;
  *buffer = mbuf;
  *rtype  = m.type;
  return m.len;
}
Beispiel #10
0
/**
 * Receive XenStore response raw data
 *
 * @v xen		Xen hypervisor
 * @v data		Data buffer, or NULL to discard data
 * @v len		Length of data
 */
static void xenstore_recv ( struct xen_hypervisor *xen, void *data,
			    size_t len ) {
	struct xenstore_domain_interface *intf = xen->store.intf;
	XENSTORE_RING_IDX cons = readl ( &intf->rsp_cons );
	XENSTORE_RING_IDX prod;
	XENSTORE_RING_IDX idx;
	char *bytes = data;
	size_t offset = 0;
	size_t fill;

	DBGCP ( intf, "XENSTORE raw response:\n" );

	/* Read one byte at a time */
	while ( offset < len ) {

		/* Wait for data to be ready */
		while ( 1 ) {
			prod = readl ( &intf->rsp_prod );
			fill = ( prod - cons );
			if ( fill > 0 )
				break;
			DBGC2 ( xen, "." );
			cpu_nap();
			rmb();
		}

		/* Read byte */
		idx = MASK_XENSTORE_IDX ( cons++ );
		if ( data )
			bytes[offset++] = readb ( &intf->rsp[idx] );
	}
	if ( data )
		DBGCP_HDA ( intf, MASK_XENSTORE_IDX ( cons - len ), data, len );

	/* Update consumer counter */
	writel ( cons, &intf->rsp_cons );
	wmb();
}
Beispiel #11
0
/**
 * Send XenStore request raw data
 *
 * @v xen		Xen hypervisor
 * @v data		Data buffer
 * @v len		Length of data
 */
static void xenstore_send ( struct xen_hypervisor *xen, const void *data,
			    size_t len ) {
	struct xenstore_domain_interface *intf = xen->store.intf;
	XENSTORE_RING_IDX prod = readl ( &intf->req_prod );
	XENSTORE_RING_IDX cons;
	XENSTORE_RING_IDX idx;
	const char *bytes = data;
	size_t offset = 0;
	size_t fill;

	DBGCP ( intf, "XENSTORE raw request:\n" );
	DBGCP_HDA ( intf, MASK_XENSTORE_IDX ( prod ), data, len );

	/* Write one byte at a time */
	while ( offset < len ) {

		/* Wait for space to become available */
		while ( 1 ) {
			cons = readl ( &intf->req_cons );
			fill = ( prod - cons );
			if ( fill < XENSTORE_RING_SIZE )
				break;
			DBGC2 ( xen, "." );
			cpu_nap();
			rmb();
		}

		/* Write byte */
		idx = MASK_XENSTORE_IDX ( prod++ );
		writeb ( bytes[offset++], &intf->req[idx] );
	}

	/* Update producer counter */
	wmb();
	writel ( prod, &intf->req_prod );
	wmb();
}
Beispiel #12
0
CAMLprim value ml_interface_write(value ml_interface,
                                  value ml_buffer,
                                  value ml_len)
{
	CAMLparam3(ml_interface, ml_buffer, ml_len);
	CAMLlocal1(ml_result);

	struct mmap_interface *interface = GET_C_STRUCT(ml_interface);
	char *buffer = String_val(ml_buffer);
	int len = Int_val(ml_len);
	int result;

	struct xenstore_domain_interface *intf = interface->addr;
	XENSTORE_RING_IDX cons, prod;
	int can_write;
	uint32_t connection;

	cons = *(volatile uint32_t*)&intf->rsp_cons;
	prod = *(volatile uint32_t*)&intf->rsp_prod;
	connection = *(volatile uint32_t*)&intf->connection;

	if (connection != XENSTORE_CONNECTED)
		caml_raise_constant(*caml_named_value("Xb.Reconnect"));

	xen_mb();
	if ( (prod - cons) >= XENSTORE_RING_SIZE ) {
		result = 0;
		goto exit;
	}
	if (MASK_XENSTORE_IDX(prod) >= MASK_XENSTORE_IDX(cons))
		can_write = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
	else 
		can_write = MASK_XENSTORE_IDX(cons) - MASK_XENSTORE_IDX(prod);
	if (can_write < len)
		len = can_write;
	memcpy(intf->rsp + MASK_XENSTORE_IDX(prod), buffer, len);
	xen_mb();
	intf->rsp_prod += len;
	result = len;
exit:
	ml_result = Val_int(result);
	CAMLreturn(ml_result);
}
Beispiel #13
0
/* Send data to xenbus.  This can block.  All of the requests are seen
   by xenbus as if sent atomically.  The header is added
   automatically, using type %type, req_id %req_id, and trans_id
   %trans_id. */
static void xb_write(int type, int req_id, xenbus_transaction_t trans_id,
		     const struct write_req *req, int nr_reqs)
{
    XENSTORE_RING_IDX prod;
    int r;
    int len = 0;
    const struct write_req *cur_req;
    int req_off;
    int total_off;
    int this_chunk;
    struct xsd_sockmsg m = {.type = type, .req_id = req_id,
        .tx_id = trans_id };
    struct write_req header_req = { &m, sizeof(m) };

    for (r = 0; r < nr_reqs; r++)
        len += req[r].len;
    m.len = len;
    len += sizeof(m);

    cur_req = &header_req;

    BUG_ON(len > XENSTORE_RING_SIZE);
    /* Wait for the ring to drain to the point where we can send the
       message. */
    prod = xenstore_buf->req_prod;
    if (prod + len - xenstore_buf->req_cons > XENSTORE_RING_SIZE) 
    {
        /* Wait for there to be space on the ring */
        DEBUG("prod %d, len %d, cons %d, size %d; waiting.\n",
                prod, len, xenstore_buf->req_cons, XENSTORE_RING_SIZE);
        wait_event(xb_waitq,
                xenstore_buf->req_prod + len - xenstore_buf->req_cons <=
                XENSTORE_RING_SIZE);
        DEBUG("Back from wait.\n");
        prod = xenstore_buf->req_prod;
    }

    /* We're now guaranteed to be able to send the message without
       overflowing the ring.  Do so. */
    total_off = 0;
    req_off = 0;
    while (total_off < len) 
    {
        this_chunk = min(cur_req->len - req_off,
                XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod));
        memcpy((char *)xenstore_buf->req + MASK_XENSTORE_IDX(prod),
                (char *)cur_req->data + req_off, this_chunk);
        prod += this_chunk;
        req_off += this_chunk;
        total_off += this_chunk;
        if (req_off == cur_req->len) 
        {
            req_off = 0;
            if (cur_req == &header_req)
                cur_req = req;
            else
                cur_req++;
        }
    }

    DEBUG("Complete main loop of xb_write.\n");
    BUG_ON(req_off != 0);
    BUG_ON(total_off != len);
    BUG_ON(prod > xenstore_buf->req_cons + XENSTORE_RING_SIZE);

    /* Remote must see entire message before updating indexes */
    wmb();

    xenstore_buf->req_prod += len;

    /* Send evtchn to notify remote */
    notify_remote_via_evtchn(start_info.store_evtchn);
}
Beispiel #14
0
static void xenbus_thread_func(void *ign)
{
    struct xsd_sockmsg msg;
    unsigned prod = xenstore_buf->rsp_prod;

    for (;;) 
    {
        wait_event(xb_waitq, prod != xenstore_buf->rsp_prod);
        while (1) 
        {
            prod = xenstore_buf->rsp_prod;
            DEBUG("Rsp_cons %d, rsp_prod %d.\n", xenstore_buf->rsp_cons,
                    xenstore_buf->rsp_prod);
            if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg))
                break;
            rmb();
            memcpy_from_ring(xenstore_buf->rsp,
                    &msg,
                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons),
                    sizeof(msg));
            DEBUG("Msg len %d, %d avail, id %d.\n",
                    msg.len + sizeof(msg),
                    xenstore_buf->rsp_prod - xenstore_buf->rsp_cons,
                    msg.req_id);
            if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons <
                    sizeof(msg) + msg.len)
                break;

            DEBUG("Message is good.\n");

            if(msg.type == XS_WATCH_EVENT)
            {
		struct xenbus_event *event = malloc(sizeof(*event) + msg.len);
                xenbus_event_queue *events = NULL;
		char *data = (char*)event + sizeof(*event);
                struct watch *watch;

                memcpy_from_ring(xenstore_buf->rsp,
		    data,
                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons + sizeof(msg)),
                    msg.len);

		event->path = data;
		event->token = event->path + strlen(event->path) + 1;

                xenstore_buf->rsp_cons += msg.len + sizeof(msg);

                for (watch = watches; watch; watch = watch->next)
                    if (!strcmp(watch->token, event->token)) {
                        events = watch->events;
                        break;
                    }

                if (events) {
                    event->next = *events;
                    *events = event;
                    wake_up(&xenbus_watch_queue);
                } else {
                    printk("unexpected watch token %s\n", event->token);
                    free(event);
                }
            }

            else
            {
                req_info[msg.req_id].reply = malloc(sizeof(msg) + msg.len);
                memcpy_from_ring(xenstore_buf->rsp,
                    req_info[msg.req_id].reply,
                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons),
                    msg.len + sizeof(msg));
                xenstore_buf->rsp_cons += msg.len + sizeof(msg);
                wake_up(&req_info[msg.req_id].waitq);
            }
        }
    }
}
Beispiel #15
0
static void xenbus_thread_func(void *ign)
{
    struct xsd_sockmsg msg;
    unsigned prod = xenstore_buf->rsp_prod;

    for (;;) 
    {
        minios_wait_event(xb_waitq, prod != xenstore_buf->rsp_prod);
        while (1) 
        {
            prod = xenstore_buf->rsp_prod;
            DEBUG("Rsp_cons %d, rsp_prod %d.\n", xenstore_buf->rsp_cons,
                    xenstore_buf->rsp_prod);
            if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons < sizeof(msg)) {
                minios_notify_remote_via_evtchn(start_info.store_evtchn);
                break;
            }
            rmb();
            memcpy_from_ring(xenstore_buf->rsp,
                    &msg,
                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons),
                    sizeof(msg));
            DEBUG("Msg len %d, %d avail, id %d.\n",
                    msg.len + sizeof(msg),
                    xenstore_buf->rsp_prod - xenstore_buf->rsp_cons,
                    msg.req_id);
            if (xenstore_buf->rsp_prod - xenstore_buf->rsp_cons <
                    sizeof(msg) + msg.len) {
                minios_notify_remote_via_evtchn(start_info.store_evtchn);
                break;
            }

            DEBUG("Message is good.\n");

            if(msg.type == XS_WATCH_EVENT)
            {
		struct xenbus_event *event
		    = bmk_xmalloc_bmk(sizeof(*event) + msg.len);
                struct xenbus_event_queue *events = NULL;
		char *data = (char*)event + sizeof(*event);
                struct xenbus_watch *watch;

                memcpy_from_ring(xenstore_buf->rsp,
		    data,
                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons + sizeof(msg)),
                    msg.len);

		event->path = data;
		event->token = event->path + bmk_strlen(event->path) + 1;

                mb();
                xenstore_buf->rsp_cons += msg.len + sizeof(msg);

                spin_lock(&xenbus_req_lock);

                MINIOS_LIST_FOREACH(watch, &watches, entry)
                    if (!bmk_strcmp(watch->token, event->token)) {
                        event->watch = watch;
                        events = watch->events;
                        break;
                    }

                if (events) {
                    queue_event(events, event);
                } else {
                    minios_printk("unexpected watch token %s\n", event->token);
                    bmk_memfree(event, BMK_MEMWHO_WIREDBMK);
                }

                spin_unlock(&xenbus_req_lock);
            }

            else
            {
                req_info[msg.req_id].for_queue->reply =
                    bmk_xmalloc_bmk(sizeof(msg) + msg.len);
                memcpy_from_ring(xenstore_buf->rsp,
                    req_info[msg.req_id].for_queue->reply,
                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons),
                    msg.len + sizeof(msg));
                mb();
                xenstore_buf->rsp_cons += msg.len + sizeof(msg);
                spin_lock(&xenbus_req_lock);
                queue_event(req_info[msg.req_id].reply_queue,
                            req_info[msg.req_id].for_queue);
                spin_unlock(&xenbus_req_lock);
            }

            wmb();
            minios_notify_remote_via_evtchn(start_info.store_evtchn);
        }
    }