コード例 #1
0
ファイル: xencons_ring.c プロジェクト: talex5/mini-os
static inline void notify_daemon(struct consfront_dev *dev)
{
    /* Use evtchn: this is called early, before irq is set up. */
    if (!dev)
        notify_remote_via_evtchn(start_info.console.domU.evtchn);
    else
        notify_remote_via_evtchn(dev->evtchn);
}
コード例 #2
0
void write_data()
{	
	int count;
	
	printk("============== TRANSACTION BEGINS ==============\n");

	id_selection++;
	
	if(id_selection == 10) {
		id_selection = 0;
	}

	printk("> Sending Encrypted MSG #%d : 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", message_no, 
		id_pool[id_selection][0], 
		id_pool[id_selection][1],
		id_pool[id_selection][2],
		id_pool[id_selection][3],
		id_pool[id_selection][4],
		id_pool[id_selection][5],
		id_pool[id_selection][6],
		id_pool[id_selection][7]
	);
	
	for(count = 0; count < MAX_MESSAGE_LENGTH; count++) {
		shared_ring->ring[count] = id_pool[id_selection][count];
	}

	shared_ring->message_no = message_no;
	shared_ring->ring[MAX_MESSAGE_LENGTH] = '\0';
	
	message_no+=2;
			
	notify_remote_via_evtchn(3);
}
コード例 #3
0
ファイル: xen-tpmfront.c プロジェクト: fromfuture/Elizium
static void vtpm_cancel(struct tpm_chip *chip)
{
	struct tpm_private *priv = TPM_VPRIV(chip);
	priv->shr->state = VTPM_STATE_CANCEL;
	wmb();
	notify_remote_via_evtchn(priv->evtchn);
}
コード例 #4
0
void notify_remote_via_irq(int irq)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn))
		notify_remote_via_evtchn(evtchn);
}
コード例 #5
0
ファイル: xen-tpmfront.c プロジェクト: Anjali05/linux
static void vtpm_cancel(struct tpm_chip *chip)
{
	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
	priv->shr->state = VTPM_STATE_CANCEL;
	wmb();
	notify_remote_via_evtchn(priv->evtchn);
}
コード例 #6
0
ファイル: smp.c プロジェクト: SnakeDoc/GuestVM
void smp_signal_cpu(int cpu)
{
    if (smp_init_completed) {
	if(cpu != smp_processor_id() && per_cpu(cpu, cpu_state) == CPU_SLEEPING)
	    notify_remote_via_evtchn(per_cpu(cpu, ipi_port));
    }
}
コード例 #7
0
int 
xencons_ring_send(const char *data, unsigned len)
{
	struct xencons_interface *intf; 
	XENCONS_RING_IDX cons, prod;
	int sent;

	intf = xencons_interface();
	cons = intf->out_cons;
	prod = intf->out_prod;
	sent = 0;

	mb();
	KASSERT((prod - cons) <= sizeof(intf->out),
		("console send ring inconsistent"));
	
	while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];

	wmb();
	intf->out_prod = prod;

	notify_remote_via_evtchn(xen_start_info->console_evtchn);

	return sent;

}	
コード例 #8
0
ファイル: fbfront.c プロジェクト: farewellkou/xen
int fbfront_receive(struct fbfront_dev *dev, union xenfb_in_event *buf, int n)
{
    struct xenfb_page *page = dev->page;
    uint32_t prod, cons;
    int i;

#ifdef HAVE_LIBC
    if (dev->fd != -1) {
        files[dev->fd].read = 0;
        mb(); /* Make sure to let the handler set read to 1 before we start looking at the ring */
    }
#endif

    prod = page->in_prod;

    if (prod == page->in_cons)
        return 0;

    rmb();      /* ensure we see ring contents up to prod */

    for (i = 0, cons = page->in_cons; i < n && cons != prod; i++, cons++)
        memcpy(buf + i, &XENFB_IN_RING_REF(page, cons), sizeof(*buf));

    mb();       /* ensure we got ring contents */
    page->in_cons = cons;
    notify_remote_via_evtchn(dev->evtchn);

#ifdef HAVE_LIBC
    if (cons != prod && dev->fd != -1)
        /* still some events to read */
        files[dev->fd].read = 1;
#endif

    return i;
}
コード例 #9
0
ファイル: netfront.c プロジェクト: a2k2/xen-unstable
void init_rx_buffers(struct netfront_dev *dev)
{
    int i, requeue_idx;
    netif_rx_request_t *req;
    int notify;

    /* Rebuild the RX buffer freelist and the RX ring itself. */
    for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) 
    {
        struct net_buffer* buf = &dev->rx_buffers[requeue_idx];
        req = RING_GET_REQUEST(&dev->rx, requeue_idx);

        buf->gref = req->gref = 
            gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0);

        req->id = requeue_idx;

        requeue_idx++;
    }

    dev->rx.req_prod_pvt = requeue_idx;

    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);

    if (notify) 
        notify_remote_via_evtchn(dev->evtchn);

    dev->rx.sring->rsp_event = dev->rx.rsp_cons + 1;
}
コード例 #10
0
void 
xencons_handle_input(void *unused)
{
	struct xencons_interface *intf;
	XENCONS_RING_IDX cons, prod;

	CN_LOCK(cn_mtx);
	intf = xencons_interface();

	cons = intf->in_cons;
	prod = intf->in_prod;
	CN_UNLOCK(cn_mtx);
	
	/* XXX needs locking */
	while (cons != prod) {
		xencons_rx(intf->in + MASK_XENCONS_IDX(cons, intf->in), 1);
		cons++;
	}

	mb();
	intf->in_cons = cons;

	CN_LOCK(cn_mtx);
	notify_remote_via_evtchn(xen_start_info->console_evtchn);

	xencons_tx();
	CN_UNLOCK(cn_mtx);
}
コード例 #11
0
ファイル: blkfront.c プロジェクト: jonludlam/mini-os
/* Issue an aio */
void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
{
    struct blkfront_dev *dev = aiocbp->aio_dev;
    struct blkif_request *req;
    RING_IDX i;
    int notify;
    int n, j;
    uintptr_t start, end;

    // Can't io at non-sector-aligned location
    ASSERT(!(aiocbp->aio_offset & (dev->info.sector_size-1)));
    // Can't io non-sector-sized amounts
    ASSERT(!(aiocbp->aio_nbytes & (dev->info.sector_size-1)));
    // Can't io non-sector-aligned buffer
    ASSERT(!((uintptr_t) aiocbp->aio_buf & (dev->info.sector_size-1)));

    start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
    end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes + PAGE_SIZE - 1) & PAGE_MASK;
    aiocbp->n = n = (end - start) / PAGE_SIZE;

    /* qemu's IDE max multsect is 16 (8KB) and SCSI max DMA was set to 32KB,
     * so max 44KB can't happen */
    ASSERT(n <= BLKIF_MAX_SEGMENTS_PER_REQUEST);

    blkfront_wait_slot(dev);
    i = dev->ring.req_prod_pvt;
    req = RING_GET_REQUEST(&dev->ring, i);

    req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
    req->nr_segments = n;
    req->handle = dev->handle;
    req->id = (uintptr_t) aiocbp;
    req->sector_number = aiocbp->aio_offset / 512;

    for (j = 0; j < n; j++) {
        req->seg[j].first_sect = 0;
        req->seg[j].last_sect = PAGE_SIZE / 512 - 1;
    }
    req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) / 512;
    req->seg[n-1].last_sect = (((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / 512;
    for (j = 0; j < n; j++) {
	uintptr_t data = start + j * PAGE_SIZE;
        if (!write) {
            /* Trigger CoW if needed */
            *(char*)(data + (req->seg[j].first_sect << 9)) = 0;
            barrier();
        }
	aiocbp->gref[j] = req->seg[j].gref =
            gnttab_grant_access(dev->dom, virtual_to_mfn(data), write);
    }

    dev->ring.req_prod_pvt = i + 1;

    wmb();
    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);

    if(notify) notify_remote_via_evtchn(dev->evtchn);
}
コード例 #12
0
ファイル: netfront.c プロジェクト: cnplab/mini-os
static inline void netfront_xmit_notify(struct netfront_dev *dev)
{
	int notify;

	/* So that backend sees new requests and check notify */
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->tx, notify);
	if (notify)
		notify_remote_via_evtchn(dev->tx_evtchn);
}
コード例 #13
0
ファイル: xc_minios.c プロジェクト: amodj/Utopia
int xc_evtchn_notify(int xce_handle, evtchn_port_t port)
{
    int ret;

    ret = notify_remote_via_evtchn(port);

    if (ret < 0) {
	errno = -ret;
	ret = -1;
    }
    return ret;
}
コード例 #14
0
void create_client()
{
	state = THREAD_STATE_IDLE;
	
	bind_interdomain(0x0, 0x03, &service_consumer);
	bind_interdomain(0x0, 0x04, &grant_map);
	
	alloc_unbound_port(0x00, &crash_domain);
	
	notify_remote_via_evtchn(4);
	
	create_thread("ClientThread", client_thread, NULL);
}
コード例 #15
0
ファイル: netfront.c プロジェクト: cnplab/mini-os
static void netfront_fillup_rx_buffers(struct netfront_dev *dev)
{
	RING_IDX prod;
	struct netif_rx_request *req;
	grant_ref_t ref;
	unsigned short id;
	int notify;
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	struct net_rxbuffer* buf;
	int flags;
#endif

#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	local_irq_save(flags);
#endif
	/* fill-up slots again */
	for (prod = dev->rx.req_prod_pvt;
	     prod - dev->rx.rsp_cons < NET_RX_RING_SIZE;
	     prod++) {
		id = netfront_rxidx(prod);
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
		buf = netfront_get_rxbuffer(dev);
		if (buf == NULL)
			break; /* out of rx buffers */
		BUG_ON(buf->page == NULL);
		ref = gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0);
		buf->gref = ref;
		BUG_ON(ref == GRANT_INVALID_REF);
		dev->rx_buffers[id] = buf;
#else
		ref = dev->rx_buffers[id].gref;
#endif
		req = RING_GET_REQUEST(&dev->rx, prod);
		req->id = id;
		req->gref = ref;
	}
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	local_irq_restore(flags);
#endif

	if (dev->rx.req_prod_pvt != prod) {
		dev->rx.req_prod_pvt = prod;
		wmb();
		RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
#ifdef CONFIG_SELECT_POLL
		files[dev->fd].read = 0;
#endif
		if (notify)
			notify_remote_via_evtchn(dev->rx_evtchn);
	}
}
コード例 #16
0
int xb_read(void *data, unsigned len)
{
	struct xenstore_domain_interface *intf = xen_store_interface;
	XENSTORE_RING_IDX cons, prod;
	int rc;

	while (len != 0) {
		unsigned int avail;
		const char *src;

		rc = wait_event_interruptible(
			xb_waitq,
			intf->rsp_cons != intf->rsp_prod);
		if (rc < 0)
			return rc;

		/* Read indexes, then verify. */
		cons = intf->rsp_cons;
		prod = intf->rsp_prod;
		mb();
		if (!check_indexes(cons, prod)) {
			intf->rsp_cons = intf->rsp_prod = 0;
			return -EIO;
		}

		src = get_input_chunk(cons, prod, intf->rsp, &avail);
		if (avail == 0)
			continue;
		if (avail > len)
			avail = len;

		/* We must read header before we read data. */
		rmb();

		memcpy(data, src, avail);
		data += avail;
		len -= avail;

		/* Other side must not see free space until we've copied out */
		mb();
		intf->rsp_cons += avail;

		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);

		/* Implies mb(): they will see new header. */
		notify_remote_via_evtchn(xen_store_evtchn);
	}

	return 0;
}
コード例 #17
0
ファイル: netfront.c プロジェクト: carriercomm/mini-os
void netfront_xmit(struct netfront_dev *dev, unsigned char* data,int len)
{
    int flags;
    struct netif_tx_request *tx;
    RING_IDX i;
    int notify;
    unsigned short id;
    struct net_buffer* buf;
    void* page;
#ifdef CONFIG_NETMAP
    if (dev->netmap) {
        netmap_netfront_xmit(dev->na, data, len);
        return;
    }
#endif

    BUG_ON(len > PAGE_SIZE);

    down(&dev->tx_sem);

    local_irq_save(flags);
    id = get_id_from_freelist(dev->tx_freelist);
    local_irq_restore(flags);

    buf = &dev->tx_buffers[id];
    page = buf->page;

    i = dev->tx.req_prod_pvt;
    tx = RING_GET_REQUEST(&dev->tx, i);

    memcpy(page,data,len);

    tx->gref = buf->gref;

    tx->offset=0;
    tx->size = len;
    tx->flags=0;
    tx->id = id;
    dev->tx.req_prod_pvt = i + 1;

    wmb();

    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->tx, notify);

    if(notify) notify_remote_via_evtchn(dev->evtchn);

    local_irq_save(flags);
    network_tx_buf_gc(dev);
    local_irq_restore(flags);
}
コード例 #18
0
ファイル: pcifront.c プロジェクト: avsm/xen-unstable
void pcifront_op(struct pcifront_dev *dev, struct xen_pci_op *op)
{
    dev->info->op = *op;
    /* Make sure info is written before the flag */
    wmb();
    set_bit(_XEN_PCIF_active, (void*) &dev->info->flags);
    notify_remote_via_evtchn(dev->evtchn);

    wait_event(pcifront_queue, !test_bit(_XEN_PCIF_active, (void*) &dev->info->flags));

    /* Make sure flag is read before info */
    rmb();
    *op = dev->info->op;
}
コード例 #19
0
ファイル: tpmfront.c プロジェクト: dzan/xenOnArm
int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length)
{
   unsigned int offset;
   vtpm_shared_page_t* shr = NULL;
#ifdef TPMFRONT_PRINT_DEBUG
   int i;
#endif
   /* Error Checking */
   if(dev == NULL || dev->state != XenbusStateConnected) {
      TPMFRONT_ERR("Tried to send message through disconnected frontend\n");
      return -1;
   }
   shr = dev->page;

#ifdef TPMFRONT_PRINT_DEBUG
   TPMFRONT_DEBUG("Sending Msg to backend size=%u", (unsigned int) length);
   for(i = 0; i < length; ++i) {
      if(!(i % 30)) {
	 TPMFRONT_DEBUG_MORE("\n");
      }
      TPMFRONT_DEBUG_MORE("%02X ", msg[i]);
   }
   TPMFRONT_DEBUG_MORE("\n");
#endif

   /* Copy to shared pages now */
   offset = sizeof(*shr);
   if (length + offset > PAGE_SIZE) {
      TPMFRONT_ERR("Message too long for shared page\n");
      return -1;
   }
   memcpy(offset + (uint8_t*)shr, msg, length);
   shr->length = length;
   barrier();
   shr->state = VTPM_STATE_SUBMIT;

   dev->waiting = 1;
   dev->resplen = 0;
#ifdef HAVE_LIBC
   if(dev->fd >= 0) {
      files[dev->fd].read = 0;
      files[dev->fd].tpmfront.respgot = 0;
      files[dev->fd].tpmfront.offset = 0;
   }
#endif
   wmb();
   notify_remote_via_evtchn(dev->evtchn);
   return 0;
}
コード例 #20
0
ファイル: netfront.c プロジェクト: farewellkou/xen
void netfront_xmit(struct netfront_dev *dev, unsigned char* data,int len)
{
    int flags;
    struct netif_tx_request *tx;
    RING_IDX i;
    int notify;
    unsigned short id;
    struct net_buffer* buf;
    void* page;

    //printf("netfront_xmit\n"); //farewellkou

    BUG_ON(len > PAGE_SIZE);

    down(&dev->tx_sem);

    local_irq_save(flags);
    id = get_id_from_freelist(dev->tx_freelist);
    local_irq_restore(flags);

    buf = &dev->tx_buffers[id];
    page = buf->page;
    if (!page)
	page = buf->page = (char*) alloc_page();

    i = dev->tx.req_prod_pvt;
    tx = RING_GET_REQUEST(&dev->tx, i);

    memcpy(page,data,len);

    buf->gref = 
        tx->gref = gnttab_grant_access(dev->dom,virt_to_mfn(page),1);

    tx->offset=0;
    tx->size = len;
    tx->flags=0;
    tx->id = id;
    dev->tx.req_prod_pvt = i + 1;

    wmb();

    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->tx, notify);

    if(notify) notify_remote_via_evtchn(dev->evtchn);

    local_irq_save(flags);
    network_tx_buf_gc(dev);
    local_irq_restore(flags);
}
コード例 #21
0
/**
 * xb_write - low level write
 * @data: buffer to send
 * @len: length of buffer
 *
 * Returns 0 on success, error otherwise.
 */
int xb_write(const void *data, unsigned len)
{
	struct xenstore_domain_interface *intf = xen_store_interface;
	XENSTORE_RING_IDX cons, prod;
	int rc;

	while (len != 0) {
		void *dst;
		unsigned int avail;

		rc = wait_event_interruptible(
			xb_waitq,
			(intf->req_prod - intf->req_cons) !=
			XENSTORE_RING_SIZE);
		if (rc < 0)
			return rc;

		/* Read indexes, then verify. */
		cons = intf->req_cons;
		prod = intf->req_prod;
		if (!check_indexes(cons, prod)) {
			intf->req_cons = intf->req_prod = 0;
			return -EIO;
		}

		dst = get_output_chunk(cons, prod, intf->req, &avail);
		if (avail == 0)
			continue;
		if (avail > len)
			avail = len;

		/* Must write data /after/ reading the consumer index. */
		mb();

		memcpy(dst, data, avail);
		data += avail;
		len -= avail;

		/* Other side must not see new producer until data is there. */
		wmb();
		intf->req_prod += avail;

		/* Implies mb(): other side will see the updated producer. */
		notify_remote_via_evtchn(xen_store_evtchn);
	}

	return 0;
}
コード例 #22
0
ファイル: xenbus_comms.c プロジェクト: 020gzh/linux
int xb_read(void *data, unsigned len)
{
	struct xenstore_domain_interface *intf = xen_store_interface;
	XENSTORE_RING_IDX cons, prod;
	int rc;

	while (len != 0) {
		unsigned int avail;
		const char *src;

		rc = xb_wait_for_data_to_read();
		if (rc < 0)
			return rc;

		/* Read indexes, then verify. */
		cons = intf->rsp_cons;
		prod = intf->rsp_prod;
		if (!check_indexes(cons, prod)) {
			intf->rsp_cons = intf->rsp_prod = 0;
			return -EIO;
		}

		src = get_input_chunk(cons, prod, intf->rsp, &avail);
		if (avail == 0)
			continue;
		if (avail > len)
			avail = len;

		/* Must read data /after/ reading the producer index. */
		virt_rmb();

		memcpy(data, src, avail);
		data += avail;
		len -= avail;

		/* Other side must not see free space until we've copied out */
		virt_mb();
		intf->rsp_cons += avail;

		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);

		/* Implies mb(): other side will see the updated consumer. */
		notify_remote_via_evtchn(xen_store_evtchn);
	}

	return 0;
}
コード例 #23
0
ファイル: fbfront.c プロジェクト: farewellkou/xen
static void fbfront_out_event(struct fbfront_dev *dev, union xenfb_out_event *event)
{
    struct xenfb_page *page = dev->page;
    uint32_t prod;
    DEFINE_WAIT(w);

    add_waiter(w, fbfront_queue);
    while (page->out_prod - page->out_cons == XENFB_OUT_RING_LEN)
        schedule();
    remove_waiter(w, fbfront_queue);

    prod = page->out_prod;
    mb(); /* ensure ring space available */
    XENFB_OUT_RING_REF(page, prod) = *event;
    wmb(); /* ensure ring contents visible */
    page->out_prod = prod + 1;
    notify_remote_via_evtchn(dev->evtchn);
}
コード例 #24
0
/* Process PCI operation */
static int
do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
{
	int err = 0;
	struct xen_pci_op *active_op = &pdev->sh_info->op;
	evtchn_port_t port = pdev->evtchn;
	time_t timeout;

	mtx_lock(&pdev->sh_info_lock);

	memcpy(active_op, op, sizeof(struct xen_pci_op));

	/* Go */
	wmb();
	set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_evtchn(port);

	timeout = time_uptime + 2;

	clear_evtchn(port);

	/* Spin while waiting for the answer */
	while (test_bit
	       (_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)) {
		int err = HYPERVISOR_poll(&port, 1, 3 * hz);
		if (err)
			panic("Failed HYPERVISOR_poll: err=%d", err);
		clear_evtchn(port);
		if (time_uptime > timeout) {
			WPRINTF("pciback not responding!!!\n");
			clear_bit(_XEN_PCIF_active,
				  (unsigned long *)&pdev->sh_info->flags);
			err = XEN_PCI_ERR_dev_not_found;
			goto out;
		}
	}

	memcpy(op, active_op, sizeof(struct xen_pci_op));

	err = op->err;
 out:
	mtx_unlock(&pdev->sh_info_lock);
	return err;
}
コード例 #25
0
ファイル: blkfront.c プロジェクト: jonludlam/mini-os
static void blkfront_push_operation(struct blkfront_dev *dev, uint8_t op, uint64_t id)
{
    int i;
    struct blkif_request *req;
    int notify;

    blkfront_wait_slot(dev);
    i = dev->ring.req_prod_pvt;
    req = RING_GET_REQUEST(&dev->ring, i);
    req->operation = op;
    req->nr_segments = 0;
    req->handle = dev->handle;
    req->id = id;
    /* Not needed anyway, but the backend will check it */
    req->sector_number = 0;
    dev->ring.req_prod_pvt = i + 1;
    wmb();
    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
    if (notify) notify_remote_via_evtchn(dev->evtchn);
}
コード例 #26
0
ファイル: xen-tpmfront.c プロジェクト: Anjali05/linux
static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
	struct tpm_private *priv = dev_get_drvdata(&chip->dev);
	struct vtpm_shared_page *shr = priv->shr;
	unsigned int offset = shr_data_offset(shr);

	u32 ordinal;
	unsigned long duration;

	if (offset > PAGE_SIZE)
		return -EINVAL;

	if (offset + count > PAGE_SIZE)
		return -EINVAL;

	/* Wait for completion of any existing command or cancellation */
	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c,
			&priv->read_queue, true) < 0) {
		vtpm_cancel(chip);
		return -ETIME;
	}

	memcpy(offset + (u8 *)shr, buf, count);
	shr->length = count;
	barrier();
	shr->state = VTPM_STATE_SUBMIT;
	wmb();
	notify_remote_via_evtchn(priv->evtchn);

	ordinal = be32_to_cpu(((struct tpm_header *)buf)->ordinal);
	duration = tpm_calc_ordinal_duration(chip, ordinal);

	if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
			&priv->read_queue, true) < 0) {
		/* got a signal or timeout, try to cancel */
		vtpm_cancel(chip);
		return -ETIME;
	}

	return 0;
}
コード例 #27
0
ファイル: netfront.c プロジェクト: cnplab/mini-os
void init_rx_buffers(struct netfront_dev *dev)
{
	struct net_rxbuffer* buf;
	int i, requeue_idx;
	netif_rx_request_t *req;
	int notify;

	/* Rebuild the RX buffer freelist and the RX ring itself. */
	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
		req = RING_GET_REQUEST(&dev->rx, requeue_idx);

#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
		buf = netfront_get_rxbuffer(dev);
		if (buf == NULL)
			break; /* out of rx buffers */
		dev->rx_buffers[requeue_idx] = buf;
#else
		buf = &dev->rx_buffers[requeue_idx];
#endif
		buf->gref = req->gref =
			gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0);
		BUG_ON(buf->gref == GRANT_INVALID_REF);

		req->id = requeue_idx;

		requeue_idx++;
	}

	dev->rx.req_prod_pvt = requeue_idx;

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);

	if (notify)
		notify_remote_via_evtchn(dev->rx_evtchn);

	dev->rx.sring->rsp_event = dev->rx.rsp_cons + 1;
}
コード例 #28
0
ファイル: xenbus.c プロジェクト: piyushroshan/xen-4.3.2
/* Send data to xenbus.  This can block.  All of the requests are seen
   by xenbus as if sent atomically.  The header is added
   automatically, using type %type, req_id %req_id, and trans_id
   %trans_id. */
static void xb_write(int type, int req_id, xenbus_transaction_t trans_id,
		     const struct write_req *req, int nr_reqs)
{
    XENSTORE_RING_IDX prod;
    int r;
    int len = 0;
    const struct write_req *cur_req;
    int req_off;
    int total_off;
    int this_chunk;
    struct xsd_sockmsg m = {.type = type, .req_id = req_id,
        .tx_id = trans_id };
    struct write_req header_req = { &m, sizeof(m) };

    for (r = 0; r < nr_reqs; r++)
        len += req[r].len;
    m.len = len;
    len += sizeof(m);

    cur_req = &header_req;

    BUG_ON(len > XENSTORE_RING_SIZE);
    /* Wait for the ring to drain to the point where we can send the
       message. */
    prod = xenstore_buf->req_prod;
    if (prod + len - xenstore_buf->req_cons > XENSTORE_RING_SIZE) 
    {
        /* Wait for there to be space on the ring */
        DEBUG("prod %d, len %d, cons %d, size %d; waiting.\n",
                prod, len, xenstore_buf->req_cons, XENSTORE_RING_SIZE);
        wait_event(xb_waitq,
                xenstore_buf->req_prod + len - xenstore_buf->req_cons <=
                XENSTORE_RING_SIZE);
        DEBUG("Back from wait.\n");
        prod = xenstore_buf->req_prod;
    }

    /* We're now guaranteed to be able to send the message without
       overflowing the ring.  Do so. */
    total_off = 0;
    req_off = 0;
    while (total_off < len) 
    {
        this_chunk = min(cur_req->len - req_off,
                XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod));
        memcpy((char *)xenstore_buf->req + MASK_XENSTORE_IDX(prod),
                (char *)cur_req->data + req_off, this_chunk);
        prod += this_chunk;
        req_off += this_chunk;
        total_off += this_chunk;
        if (req_off == cur_req->len) 
        {
            req_off = 0;
            if (cur_req == &header_req)
                cur_req = req;
            else
                cur_req++;
        }
    }

    DEBUG("Complete main loop of xb_write.\n");
    BUG_ON(req_off != 0);
    BUG_ON(total_off != len);
    BUG_ON(prod > xenstore_buf->req_cons + XENSTORE_RING_SIZE);

    /* Remote must see entire message before updating indexes */
    wmb();

    xenstore_buf->req_prod += len;

    /* Send evtchn to notify remote */
    notify_remote_via_evtchn(start_info.store_evtchn);
}
コード例 #29
0
ファイル: hvc_xen.c プロジェクト: CSCLOG/beaglebone
static inline void notify_daemon(void)
{
	/* Use evtchn: this is called early, before irq is set up. */
	notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
}
コード例 #30
0
ファイル: netfront.c プロジェクト: a2k2/xen-unstable
void network_rx(struct netfront_dev *dev)
{
    RING_IDX rp,cons,req_prod;
    struct netif_rx_response *rx;
    int nr_consumed, some, more, i, notify;


moretodo:
    rp = dev->rx.sring->rsp_prod;
    rmb(); /* Ensure we see queued responses up to 'rp'. */
    cons = dev->rx.rsp_cons;

    nr_consumed = 0;
    some = 0;
    while ((cons != rp) && !some)
    {
        struct net_buffer* buf;
        unsigned char* page;
        int id;

        rx = RING_GET_RESPONSE(&dev->rx, cons);

        if (rx->flags & NETRXF_extra_info)
        {
            printk("+++++++++++++++++++++ we have extras!\n");
            continue;
        }


        if (rx->status == NETIF_RSP_NULL) continue;

        id = rx->id;
        BUG_ON(id >= NET_TX_RING_SIZE);

        buf = &dev->rx_buffers[id];
        page = (unsigned char*)buf->page;
        gnttab_end_access(buf->gref);

        if(rx->status>0)
        {
#ifdef HAVE_LIBC
	    if (dev->netif_rx == NETIF_SELECT_RX) {
		int len = rx->status;
		ASSERT(current == main_thread);
		if (len > dev->len)
		    len = dev->len;
		memcpy(dev->data, page+rx->offset, len);
		dev->rlen = len;
		some = 1;
	    } else
#endif
		dev->netif_rx(page+rx->offset,rx->status);
        }

        nr_consumed++;

        ++cons;
    }
    dev->rx.rsp_cons=cons;

    RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more);
    if(more && !some) goto moretodo;

    req_prod = dev->rx.req_prod_pvt;

    for(i=0; i<nr_consumed; i++)
    {
        int id = xennet_rxidx(req_prod + i);
        netif_rx_request_t *req = RING_GET_REQUEST(&dev->rx, req_prod + i);
        struct net_buffer* buf = &dev->rx_buffers[id];
        void* page = buf->page;

        /* We are sure to have free gnttab entries since they got released above */
        buf->gref = req->gref = 
            gnttab_grant_access(dev->dom,virt_to_mfn(page),0);

        req->id = id;
    }

    wmb();

    dev->rx.req_prod_pvt = req_prod + i;
    
    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
    if (notify)
        notify_remote_via_evtchn(dev->evtchn);

}