LOCAL int mcd_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn) { int rc = 0; /* Map ring and shared pages */ d->mcd_event.ring_page = map_domain_page(mfn_x(ring_mfn)); if ( d->mcd_event.ring_page == NULL ) goto err; //printk("domain_id = %d, ring_page = %p \n", d->domain_id, d->mcd_event.ring_page); // default = 1 d->mcd_event.num_shared_page = 1; d->mcd_event.shared_page[0] = map_domain_page(mfn_x(shared_mfn)); if ( d->mcd_event.shared_page[0] == NULL ) goto err_ring; // TODO check this... whether we need this or just using ring for notification... // TODO however, ring notification should have some delay incurred before receiving... /* Allocate event channel */ rc = alloc_unbound_xen_event_channel(d->vcpu[0], current->domain->domain_id); if ( rc < 0 ) goto err_shared; // XXX since we use data as a buffer.. this is the way to avoid future conflict memcpy(((mcd_event_shared_page_t *)d->mcd_event.shared_page[0])->data, &rc, sizeof(int)); d->mcd_event.xen_port = rc; /* Prepare ring buffer */ FRONT_RING_INIT(&d->mcd_event.front_ring, (mcd_event_sring_t *)d->mcd_event.ring_page, PAGE_SIZE); //printk("ring buffer size = %d \n", (&(d->mcd_event.front_ring))->nr_ents); mcd_event_ring_lock_init(d); /* Wake any VCPUs paused for memory events */ //mcd_event_unpause_vcpus(d); init_mcdctl(); return 0; err_shared: unmap_domain_page(d->mcd_event.shared_page[0]); d->mcd_event.shared_page[0] = NULL; err_ring: unmap_domain_page(d->mcd_event.ring_page); d->mcd_event.ring_page = NULL; err: return 1; }
int domain_vpl011_init(struct domain *d, struct vpl011_init_info *info) { int rc; struct vpl011 *vpl011 = &d->arch.vpl011; if ( vpl011->ring_buf ) return -EINVAL; /* Map the guest PFN to Xen address space. */ rc = prepare_ring_for_helper(d, gfn_x(info->gfn), &vpl011->ring_page, &vpl011->ring_buf); if ( rc < 0 ) goto out; rc = vgic_reserve_virq(d, GUEST_VPL011_SPI); if ( !rc ) { rc = -EINVAL; goto out1; } rc = alloc_unbound_xen_event_channel(d, 0, info->console_domid, vpl011_notification); if ( rc < 0 ) goto out2; vpl011->evtchn = info->evtchn = rc; spin_lock_init(&vpl011->lock); register_mmio_handler(d, &vpl011_mmio_handler, GUEST_PL011_BASE, GUEST_PL011_SIZE, NULL); return 0; out2: vgic_free_virq(d, GUEST_VPL011_SPI); out1: destroy_ring_for_helper(&vpl011->ring_buf, vpl011->ring_page); out: return rc; }
static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn) { int rc; /* Map ring and shared pages */ d->mem_event.ring_page = map_domain_page(mfn_x(ring_mfn)); if ( d->mem_event.ring_page == NULL ) goto err; d->mem_event.shared_page = map_domain_page(mfn_x(shared_mfn)); if ( d->mem_event.shared_page == NULL ) goto err_ring; /* Allocate event channel */ rc = alloc_unbound_xen_event_channel(d->vcpu[0], current->domain->domain_id); if ( rc < 0 ) goto err_shared; ((mem_event_shared_page_t *)d->mem_event.shared_page)->port = rc; d->mem_event.xen_port = rc; /* Prepare ring buffer */ FRONT_RING_INIT(&d->mem_event.front_ring, (mem_event_sring_t *)d->mem_event.ring_page, PAGE_SIZE); mem_event_ring_lock_init(d); /* Wake any VCPUs paused for memory events */ mem_event_unpause_vcpus(d); return 0; err_shared: unmap_domain_page(d->mem_event.shared_page); d->mem_event.shared_page = NULL; err_ring: unmap_domain_page(d->mem_event.ring_page); d->mem_event.ring_page = NULL; err: return 1; }
static int mem_event_enable( struct domain *d, xen_domctl_mem_event_op_t *mec, struct mem_event_domain *med, int pause_flag, int param, xen_event_channel_notification_t notification_fn) { int rc; unsigned long ring_gfn = d->arch.hvm_domain.params[param]; /* Only one helper at a time. If the helper crashed, * the ring is in an undefined state and so is the guest. */ if ( med->ring_page ) return -EBUSY; /* The parameter defaults to zero, and it should be * set to something */ if ( ring_gfn == 0 ) return -ENOSYS; mem_event_ring_lock_init(med); mem_event_ring_lock(med); rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct, &med->ring_page); if ( rc < 0 ) goto err; /* Set the number of currently blocked vCPUs to 0. */ med->blocked = 0; /* Allocate event channel */ rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id, notification_fn); if ( rc < 0 ) goto err; med->xen_port = mec->port = rc; /* Prepare ring buffer */ FRONT_RING_INIT(&med->front_ring, (mem_event_sring_t *)med->ring_page, PAGE_SIZE); /* Save the pause flag for this particular ring. */ med->pause_flag = pause_flag; /* Initialize the last-chance wait queue. */ init_waitqueue_head(&med->wq); mem_event_ring_unlock(med); return 0; err: destroy_ring_for_helper(&med->ring_page, med->ring_pg_struct); mem_event_ring_unlock(med); return rc; }