コード例 #1
0
ファイル: pagein.c プロジェクト: lwhibernate/xen
static void *page_in(void *arg)
{
    struct page_in_args *pia = arg;
    void *page;
    int i, num;
    xen_pfn_t gfns[XENPAGING_PAGEIN_QUEUE_SIZE];

    while (1)
    {
        pthread_mutex_lock(&page_in_mutex);
        while (!page_in_request)
            pthread_cond_wait(&page_in_cond, &page_in_mutex);
        num = 0;
        for (i = 0; i < XENPAGING_PAGEIN_QUEUE_SIZE; i++)
        {
            if (!pia->pagein_queue[i])
                continue;
            gfns[num] = pia->pagein_queue[i];
            pia->pagein_queue[i] = 0;
            num++;
        }
        page_in_request = 0;
        pthread_mutex_unlock(&page_in_mutex);

        /* Ignore errors */
        page = xc_map_foreign_pages(pia->xch, pia->dom, PROT_READ, gfns, num);
        if (page)
            munmap(page, PAGE_SIZE * num);
    }
    page_in_possible = 0;
    pthread_exit(NULL);
}
コード例 #2
0
ファイル: xc_freebsd_osdep.c プロジェクト: CPFL/xen
static void *freebsd_privcmd_map_foreign_ranges(xc_interface *xch,
                                                xc_osdep_handle h,
                                                uint32_t dom, size_t size,
                                                int prot, size_t chunksize,
                                                privcmd_mmap_entry_t entries[],
                                                int nentries)
{
    xen_pfn_t *arr;
    int num_per_entry;
    int num;
    int i;
    int j;
    void *ret;

    num_per_entry = chunksize >> XC_PAGE_SHIFT;
    num = num_per_entry * nentries;
    arr = calloc(num, sizeof(xen_pfn_t));
    if ( arr == NULL )
        return NULL;

    for ( i = 0; i < nentries; i++ )
        for ( j = 0; j < num_per_entry; j++ )
            arr[i * num_per_entry + j] = entries[i].mfn + j;

    ret = xc_map_foreign_pages(xch, dom, prot, arr, num);
    free(arr);
    return ret;
}
コード例 #3
0
void *shmat(int shmid, const void *shmaddr, int shmflg)
{
	int i;
	xen_pfn_t *pfntable;
	char *fakeaddr;
	long fakesize;
	if (!cmd_pages || shmid != cmd_pages->shmid)
		return real_shmat(shmid, shmaddr, shmflg);
	if (cmd_pages->off >= 4096 || cmd_pages->num_mfn > MAX_MFN_COUNT
	    || cmd_pages->num_mfn == 0) {
		errno = EINVAL;
		return MAP_FAILED;
	}
	pfntable = alloca(sizeof(xen_pfn_t) * cmd_pages->num_mfn);
#ifdef DEBUG
	fprintf(stderr, "size=%d table=%p\n", cmd_pages->num_mfn,
		pfntable);
#endif
	for (i = 0; i < cmd_pages->num_mfn; i++)
		pfntable[i] = cmd_pages->mfns[i];
#ifdef BACKEND_VMM_xen
	fakeaddr =
	    xc_map_foreign_pages(xc_hnd, cmd_pages->domid, PROT_READ,
				 pfntable, cmd_pages->num_mfn);
#else
#error "shmoverride implemented only for Xen"
#endif
	fakesize = 4096 * cmd_pages->num_mfn;
#ifdef DEBUG
	fprintf(stderr, "num=%d, addr=%p, len=%d\n",
		cmd_pages->num_mfn, fakeaddr, list_len);
#endif
	if (fakeaddr && fakeaddr != MAP_FAILED) {
		list_insert(addr_list, (long) fakeaddr, (void *) fakesize);
		list_len++;
		return fakeaddr + cmd_pages->off;
	} else {
		errno = ENOMEM;
		return MAP_FAILED;
	}
}
コード例 #4
0
ファイル: xc_freebsd_osdep.c プロジェクト: CPFL/xen
static void *freebsd_privcmd_map_foreign_range(xc_interface *xch,
                                               xc_osdep_handle h,
                                               uint32_t dom, int size, int prot,
                                               unsigned long mfn)
{
    xen_pfn_t *arr;
    int num;
    int i;
    void *ret;

    num = (size + XC_PAGE_SIZE - 1) >> XC_PAGE_SHIFT;
    arr = calloc(num, sizeof(xen_pfn_t));
    if ( arr == NULL )
        return NULL;

    for ( i = 0; i < num; i++ )
        arr[i] = mfn + i;

    ret = xc_map_foreign_pages(xch, dom, prot, arr, num);
    free(arr);
    return ret;
}
コード例 #5
0
ファイル: xc_gnttab.c プロジェクト: CrazyXen/XEN_CODE
static void *_gnttab_map_table(xc_interface *xch, int domid, int *gnt_num)
{
    int rc, i;
    struct gnttab_query_size query;
    struct gnttab_setup_table setup;
    DECLARE_HYPERCALL_BUFFER(unsigned long, frame_list);
    xen_pfn_t *pfn_list = NULL;
    grant_entry_v1_t *gnt = NULL;

    if ( !gnt_num )
        return NULL;

    query.dom = domid;
    rc = xc_gnttab_op(xch, GNTTABOP_query_size, &query, sizeof(query), 1);

    if ( rc || (query.status != GNTST_okay) )
    {
        ERROR("Could not query dom's grant size\n", domid);
        return NULL;
    }

    *gnt_num = query.nr_frames * (PAGE_SIZE / sizeof(grant_entry_v1_t) );

    frame_list = xc_hypercall_buffer_alloc(xch, frame_list, query.nr_frames * sizeof(unsigned long));
    if ( !frame_list )
    {
        ERROR("Could not allocate frame_list in xc_gnttab_map_table\n");
        return NULL;
    }

    pfn_list = malloc(query.nr_frames * sizeof(xen_pfn_t));
    if ( !pfn_list )
    {
        ERROR("Could not allocate pfn_list in xc_gnttab_map_table\n");
        goto err;
    }

    setup.dom = domid;
    setup.nr_frames = query.nr_frames;
    set_xen_guest_handle(setup.frame_list, frame_list);

    /* XXX Any race with other setup_table hypercall? */
    rc = xc_gnttab_op(xch, GNTTABOP_setup_table, &setup, sizeof(setup),
                      1);

    if ( rc || (setup.status != GNTST_okay) )
    {
        ERROR("Could not get grant table frame list\n");
        goto err;
    }

    for ( i = 0; i < setup.nr_frames; i++ )
        pfn_list[i] = frame_list[i];

    gnt = xc_map_foreign_pages(xch, domid, PROT_READ, pfn_list,
                               setup.nr_frames);
    if ( !gnt )
    {
        ERROR("Could not map grant table\n");
        goto err;
    }

err:
    if ( frame_list )
        xc_hypercall_buffer_free(xch, frame_list);
    if ( pfn_list )
        free(pfn_list);

    return gnt;
}
コード例 #6
0
static int xenfb_map_fb(struct XenFB *xenfb)
{
    struct xenfb_page *page = xenfb->c.page;
    char *protocol = xenfb->c.xendev.protocol;
    int n_fbdirs;
    unsigned long *pgmfns = NULL;
    unsigned long *fbmfns = NULL;
    void *map, *pd;
    int mode, ret = -1;

    /* default to native */
    pd = page->pd;
    mode = sizeof(unsigned long) * 8;

    if (!protocol) {
	/*
	 * Undefined protocol, some guesswork needed.
	 *
	 * Old frontends which don't set the protocol use
	 * one page directory only, thus pd[1] must be zero.
	 * pd[1] of the 32bit struct layout and the lower
	 * 32 bits of pd[0] of the 64bit struct layout have
	 * the same location, so we can check that ...
	 */
	uint32_t *ptr32 = NULL;
	uint32_t *ptr64 = NULL;
#if defined(__i386__)
	ptr32 = (void*)page->pd;
	ptr64 = ((void*)page->pd) + 4;
#elif defined(__x86_64__)
	ptr32 = ((void*)page->pd) - 4;
	ptr64 = (void*)page->pd;
#endif
	if (ptr32) {
	    if (ptr32[1] == 0) {
		mode = 32;
		pd   = ptr32;
	    } else {
		mode = 64;
		pd   = ptr64;
	    }
	}
#if defined(__x86_64__)
    } else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
	/* 64bit dom0, 32bit domU */
	mode = 32;
	pd   = ((void*)page->pd) - 4;
#elif defined(__i386__)
    } else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
	/* 32bit dom0, 64bit domU */
	mode = 64;
	pd   = ((void*)page->pd) + 4;
#endif
    }

    if (xenfb->pixels) {
        munmap(xenfb->pixels, xenfb->fbpages * XC_PAGE_SIZE);
        xenfb->pixels = NULL;
    }

    xenfb->fbpages = (xenfb->fb_len + (XC_PAGE_SIZE - 1)) / XC_PAGE_SIZE;
    n_fbdirs = xenfb->fbpages * mode / 8;
    n_fbdirs = (n_fbdirs + (XC_PAGE_SIZE - 1)) / XC_PAGE_SIZE;

    pgmfns = qemu_mallocz(sizeof(unsigned long) * n_fbdirs);
    fbmfns = qemu_mallocz(sizeof(unsigned long) * xenfb->fbpages);

    xenfb_copy_mfns(mode, n_fbdirs, pgmfns, pd);
    map = xc_map_foreign_pages(xen_xc, xenfb->c.xendev.dom,
			       PROT_READ, pgmfns, n_fbdirs);
    if (map == NULL)
	goto out;
    xenfb_copy_mfns(mode, xenfb->fbpages, fbmfns, map);
    munmap(map, n_fbdirs * XC_PAGE_SIZE);

    xenfb->pixels = xc_map_foreign_pages(xen_xc, xenfb->c.xendev.dom,
					 PROT_READ | PROT_WRITE, fbmfns, xenfb->fbpages);
    if (xenfb->pixels == NULL)
	goto out;

    ret = 0; /* all is fine */

out:
    qemu_free(pgmfns);
    qemu_free(fbmfns);
    return ret;
}
コード例 #7
0
ファイル: xenbaked.c プロジェクト: jctemkin/xen
/**
 * map_tbufs - memory map Xen trace buffers into user space
 * @tbufs_mfn: mfn of the trace buffers
 * @num:       number of trace buffers to map
 * @size:      size of each trace buffer
 *
 * Maps the Xen trace buffers them into process address space.
 */
static struct t_struct *map_tbufs(unsigned long tbufs_mfn, unsigned int num,
                                  unsigned long tinfo_size)
{
    xc_interface *xc_handle;
    static struct t_struct tbufs = { 0 };
    int i;

    xc_handle = xc_interface_open(0,0,0);
    if ( !xc_handle ) 
    {
        exit(EXIT_FAILURE);
    }

    /* Map t_info metadata structure */
    tbufs.t_info = xc_map_foreign_range(xc_handle, DOMID_XEN, tinfo_size,
                                        PROT_READ, tbufs_mfn);

    if ( tbufs.t_info == 0 ) 
    {
        PERROR("Failed to mmap trace buffers");
        exit(EXIT_FAILURE);
    }

    if ( tbufs.t_info->tbuf_size == 0 )
    {
        fprintf(stderr, "%s: tbuf_size 0!\n", __func__);
        exit(EXIT_FAILURE);
    }

    /* Map per-cpu buffers */
    tbufs.meta = (struct t_buf **)calloc(num, sizeof(struct t_buf *));
    tbufs.data = (unsigned char **)calloc(num, sizeof(unsigned char *));
    if ( tbufs.meta == NULL || tbufs.data == NULL )
    {
        PERROR( "Failed to allocate memory for buffer pointers\n");
        exit(EXIT_FAILURE);
    }

    for(i=0; i<num; i++)
    {
        
        const uint32_t *mfn_list = (const uint32_t *)tbufs.t_info
                                   + tbufs.t_info->mfn_offset[i];
        int j;
        xen_pfn_t pfn_list[tbufs.t_info->tbuf_size];

        for ( j=0; j<tbufs.t_info->tbuf_size; j++)
            pfn_list[j] = (xen_pfn_t)mfn_list[j];

        tbufs.meta[i] = xc_map_foreign_pages(xc_handle, DOMID_XEN,
                                             PROT_READ | PROT_WRITE,
                                             pfn_list,
                                             tbufs.t_info->tbuf_size);
        if ( tbufs.meta[i] == NULL )
        {
            PERROR("Failed to map cpu buffer!");
            exit(EXIT_FAILURE);
        }
        tbufs.data[i] = (unsigned char *)(tbufs.meta[i]+1);
    }

    xc_interface_close(xc_handle);

    return &tbufs;
}
コード例 #8
0
ファイル: xc_vm_event.c プロジェクト: 0day-ci/xen
void *xc_vm_event_enable(xc_interface *xch, domid_t domain_id, int param,
                         uint32_t *port)
{
    void *ring_page = NULL;
    uint64_t pfn;
    xen_pfn_t ring_pfn, mmap_pfn;
    unsigned int op, mode;
    int rc1, rc2, saved_errno;

    if ( !port )
    {
        errno = EINVAL;
        return NULL;
    }

    /* Pause the domain for ring page setup */
    rc1 = xc_domain_pause(xch, domain_id);
    if ( rc1 != 0 )
    {
        PERROR("Unable to pause domain\n");
        return NULL;
    }

    /* Get the pfn of the ring page */
    rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
    if ( rc1 != 0 )
    {
        PERROR("Failed to get pfn of ring page\n");
        goto out;
    }

    ring_pfn = pfn;
    mmap_pfn = pfn;
    rc1 = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
    if ( rc1 || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
    {
        /* Page not in the physmap, try to populate it */
        rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
                                              &ring_pfn);
        if ( rc1 != 0 )
        {
            PERROR("Failed to populate ring pfn\n");
            goto out;
        }
    }

    mmap_pfn = ring_pfn;
    ring_page = xc_map_foreign_pages(xch, domain_id, PROT_READ | PROT_WRITE,
                                         &mmap_pfn, 1);
    if ( !ring_page )
    {
        PERROR("Could not map the ring page\n");
        goto out;
    }

    switch ( param )
    {
    case HVM_PARAM_PAGING_RING_PFN:
        op = XEN_VM_EVENT_ENABLE;
        mode = XEN_DOMCTL_VM_EVENT_OP_PAGING;
        break;

    case HVM_PARAM_MONITOR_RING_PFN:
        op = XEN_VM_EVENT_ENABLE;
        mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
        break;

    case HVM_PARAM_SHARING_RING_PFN:
        op = XEN_VM_EVENT_ENABLE;
        mode = XEN_DOMCTL_VM_EVENT_OP_SHARING;
        break;

    /*
     * This is for the outside chance that the HVM_PARAM is valid but is invalid
     * as far as vm_event goes.
     */
    default:
        errno = EINVAL;
        rc1 = -1;
        goto out;
    }

    rc1 = xc_vm_event_control(xch, domain_id, op, mode, port);
    if ( rc1 != 0 )
    {
        PERROR("Failed to enable vm_event\n");
        goto out;
    }

    /* Remove the ring_pfn from the guest's physmap */
    rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
    if ( rc1 != 0 )
        PERROR("Failed to remove ring page from guest physmap");

 out:
    saved_errno = errno;

    rc2 = xc_domain_unpause(xch, domain_id);
    if ( rc1 != 0 || rc2 != 0 )
    {
        if ( rc2 != 0 )
        {
            if ( rc1 == 0 )
                saved_errno = errno;
            PERROR("Unable to unpause domain");
        }

        if ( ring_page )
            xenforeignmemory_unmap(xch->fmem, ring_page, 1);
        ring_page = NULL;

        errno = saved_errno;
    }

    return ring_page;
}
コード例 #9
0
ファイル: xc_resume.c プロジェクト: Fantu/Xen
static int xc_domain_resume_any(xc_interface *xch, uint32_t domid)
{
    DECLARE_DOMCTL;
    xc_dominfo_t info;
    int i, rc = -1;
#if defined(__i386__) || defined(__x86_64__)
    struct domain_info_context _dinfo = { .guest_width = 0,
                                          .p2m_size = 0 };
    struct domain_info_context *dinfo = &_dinfo;
    unsigned long mfn;
    vcpu_guest_context_any_t ctxt;
    start_info_t *start_info;
    shared_info_t *shinfo = NULL;
    xen_pfn_t *p2m_frame_list_list = NULL;
    xen_pfn_t *p2m_frame_list = NULL;
    xen_pfn_t *p2m = NULL;
#endif

    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
    {
        PERROR("Could not get domain info");
        return rc;
    }

    /*
     * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
     */
#if defined(__i386__) || defined(__x86_64__)
    if ( info.hvm )
    {
        ERROR("Cannot resume uncooperative HVM guests");
        return rc;
    }

    if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
    {
        PERROR("Could not get domain width");
        return rc;
    }
    if ( dinfo->guest_width != sizeof(long) )
    {
        ERROR("Cannot resume uncooperative cross-address-size guests");
        return rc;
    }

    /* Map the shared info frame */
    shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
                                  PROT_READ, info.shared_info_frame);
    if ( shinfo == NULL )
    {
        ERROR("Couldn't map shared info");
        goto out;
    }

    dinfo->p2m_size = shinfo->arch.max_pfn;

    p2m_frame_list_list =
        xc_map_foreign_range(xch, domid, PAGE_SIZE, PROT_READ,
                             shinfo->arch.pfn_to_mfn_frame_list_list);
    if ( p2m_frame_list_list == NULL )
    {
        ERROR("Couldn't map p2m_frame_list_list");
        goto out;
    }

    p2m_frame_list = xc_map_foreign_pages(xch, domid, PROT_READ,
                                          p2m_frame_list_list,
                                          P2M_FLL_ENTRIES);
    if ( p2m_frame_list == NULL )
    {
        ERROR("Couldn't map p2m_frame_list");
        goto out;
    }

    /* Map all the frames of the pfn->mfn table. For migrate to succeed,
       the guest must not change which frames are used for this purpose.
       (its not clear why it would want to change them, and we'll be OK
       from a safety POV anyhow. */
    p2m = xc_map_foreign_pages(xch, domid, PROT_READ,
                               p2m_frame_list,
                               P2M_FL_ENTRIES);
    if ( p2m == NULL )
    {
        ERROR("Couldn't map p2m table");
        goto out;
    }

    if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
    {
        ERROR("Could not get vcpu context");
        goto out;
    }

    mfn = GET_FIELD(&ctxt, user_regs.edx, dinfo->guest_width);

    start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
                                      PROT_READ | PROT_WRITE, mfn);
    if ( start_info == NULL )
    {
        ERROR("Couldn't map start_info");
        goto out;
    }

    start_info->store_mfn        = p2m[start_info->store_mfn];
    start_info->console.domU.mfn = p2m[start_info->console.domU.mfn];

    munmap(start_info, PAGE_SIZE);
#endif /* defined(__i386__) || defined(__x86_64__) */

    /* Reset all secondary CPU states. */
    for ( i = 1; i <= info.max_vcpu_id; i++ )
        if ( xc_vcpu_setcontext(xch, domid, i, NULL) != 0 )
        {
            ERROR("Couldn't reset vcpu state");
            goto out;
        }

    /* Ready to resume domain execution now. */
    domctl.cmd = XEN_DOMCTL_resumedomain;
    domctl.domain = domid;
    rc = do_domctl(xch, &domctl);

out:
#if defined(__i386__) || defined(__x86_64__)
    if (p2m)
        munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
    if (p2m_frame_list)
        munmap(p2m_frame_list, P2M_FLL_ENTRIES*PAGE_SIZE);
    if (p2m_frame_list_list)
        munmap(p2m_frame_list_list, PAGE_SIZE);
    if (shinfo)
        munmap(shinfo, PAGE_SIZE);
#endif

    return rc;
}

/*
 * Resume execution of a domain after suspend shutdown.
 * This can happen in one of two ways:
 *  1. Resume with special return code.
 *  2. Reset guest environment so it believes it is resumed in a new
 *     domain context.
 * (2) should be used only for guests which cannot handle the special
 * new return code. (1) is always safe (but slower).
 */
int xc_domain_resume(xc_interface *xch, uint32_t domid, int fast)
{
    return (fast
            ? xc_domain_resume_cooperative(xch, domid)
            : xc_domain_resume_any(xch, domid));
}