Ejemplo n.º 1
0
static struct page_info * __init alloc_chunk(
    struct domain *d, unsigned long max_pages)
{
    struct page_info *page;
    unsigned int order;
    /*
     * Allocate up to 2MB at a time: It prevents allocating very large chunks
     * from DMA pools before the >4GB pool is fully depleted.
     */
    if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
        max_pages = 2UL << (20 - PAGE_SHIFT);
    order = get_order_from_pages(max_pages);
    if ( (max_pages & (max_pages-1)) != 0 )
        order--;
    while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
        if ( order-- == 0 )
            break;
    return page;
}
Ejemplo n.º 2
0
static int __init
nestedhvm_setup(void)
{
    /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
    unsigned int nr = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ? 2 : 3;
    unsigned int i, order = get_order_from_pages(nr);

    if ( !hvm_funcs.name )
        return 0;

    /* shadow_io_bitmaps can't be declared static because
     *   they must fulfill hw requirements (page aligned section)
     *   and doing so triggers the ASSERT(va >= XEN_VIRT_START)
     *   in __virt_to_maddr()
     *
     * So as a compromise pre-allocate them when xen boots.
     * This function must be called from within start_xen() when
     * it is valid to use _xmalloc()
     */

    for ( i = 0; i < ARRAY_SIZE(shadow_io_bitmap); i++ )
    {
        shadow_io_bitmap[i] = alloc_xenheap_pages(order, 0);
        if ( !shadow_io_bitmap[i] )
        {
            while ( i-- )
            {
                free_xenheap_pages(shadow_io_bitmap[i], order);
                shadow_io_bitmap[i] = NULL;
            }
            return -ENOMEM;
        }
        memset(shadow_io_bitmap[i], ~0U, nr << PAGE_SHIFT);
    }

    __clear_bit(0x80, shadow_io_bitmap[0]);
    __clear_bit(0xed, shadow_io_bitmap[1]);

    return 0;
}
Ejemplo n.º 3
0
/**
 * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
 *
 * This function is called at start of day in order to initialize the per-cpu
 * trace buffers.  The trace buffers are then available for debugging use, via
 * the %TRACE_xD macros exported in <xen/trace.h>.
 *
 * This function may also be called later when enabling trace buffers 
 * via the SET_SIZE hypercall.
 */
static int alloc_trace_bufs(void)
{
    int           i, order;
    unsigned long nr_pages;
    char         *rawbuf;
    struct t_buf *buf;

    if ( opt_tbuf_size == 0 )
        return -EINVAL;

    nr_pages = num_online_cpus() * opt_tbuf_size;
    order    = get_order_from_pages(nr_pages);
    data_size  = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf));
    
    if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
    {
        printk("Xen trace buffers: memory allocation failed\n");
        opt_tbuf_size = 0;
        return -EINVAL;
    }

    /* Share pages so that xentrace can map them. */
    for ( i = 0; i < nr_pages; i++ )
        share_xen_page_with_privileged_guests(
            virt_to_page(rawbuf) + i, XENSHARE_writable);

    for_each_online_cpu ( i )
    {
        buf = per_cpu(t_bufs, i) = (struct t_buf *)
            &rawbuf[i*opt_tbuf_size*PAGE_SIZE];
        buf->cons = buf->prod = 0;
        per_cpu(t_data, i) = (unsigned char *)(buf + 1);
    }

    t_buf_highwater = data_size >> 1; /* 50% high water */

    return 0;
}
Ejemplo n.º 4
0
static int alloc_xenoprof_struct(
    struct domain *d, int max_samples, int is_passive)
{
    struct vcpu *v;
    int nvcpu, npages, bufsize, max_bufsize;
    unsigned max_max_samples;
    int i;

    d->xenoprof = xmalloc(struct xenoprof);

    if ( d->xenoprof == NULL )
    {
        printk("alloc_xenoprof_struct(): memory allocation failed\n");
        return -ENOMEM;
    }

    memset(d->xenoprof, 0, sizeof(*d->xenoprof));

    d->xenoprof->vcpu = xmalloc_array(struct xenoprof_vcpu, d->max_vcpus);
    if ( d->xenoprof->vcpu == NULL )
    {
        xfree(d->xenoprof);
        d->xenoprof = NULL;
        printk("alloc_xenoprof_struct(): vcpu array allocation failed\n");
        return -ENOMEM;
    }

    memset(d->xenoprof->vcpu, 0, d->max_vcpus * sizeof(*d->xenoprof->vcpu));

    nvcpu = 0;
    for_each_vcpu ( d, v )
        nvcpu++;

    bufsize = sizeof(struct xenoprof_buf);
    i = sizeof(struct event_log);
#ifdef CONFIG_COMPAT
    d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? dom0 : d);
    if ( XENOPROF_COMPAT(d->xenoprof) )
    {
        bufsize = sizeof(struct compat_oprof_buf);
        i = sizeof(struct compat_event_log);
    }
#endif

    /* reduce max_samples if necessary to limit pages allocated */
    max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
    max_max_samples = ( (max_bufsize - bufsize) / i ) + 1;
    if ( (unsigned)max_samples > max_max_samples )
        max_samples = max_max_samples;

    bufsize += (max_samples - 1) * i;
    npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;

    d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
    if ( d->xenoprof->rawbuf == NULL )
    {
        xfree(d->xenoprof);
        d->xenoprof = NULL;
        return -ENOMEM;
    }

    d->xenoprof->npages = npages;
    d->xenoprof->nbuf = nvcpu;
    d->xenoprof->bufsize = bufsize;
    d->xenoprof->domain_ready = 0;
    d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;

    /* Update buffer pointers for active vcpus */
    i = 0;
    for_each_vcpu ( d, v )
    {
        xenoprof_buf_t *buf = (xenoprof_buf_t *)
            &d->xenoprof->rawbuf[i * bufsize];

        d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
        d->xenoprof->vcpu[v->vcpu_id].buffer = buf;
        xenoprof_buf(d, buf, event_size) = max_samples;
        xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id;

        i++;
        /* in the unlikely case that the number of active vcpus changes */
        if ( i >= nvcpu )
            break;
    }