Ejemplo n.º 1
0
static int process_portio_intercept(portio_action_t action, ioreq_t *p)
{
    int rc = X86EMUL_OKAY, i, sign = p->df ? -1 : 1;
    uint32_t data;

    if ( !p->data_is_ptr )
    {
        if ( p->dir == IOREQ_READ )
        {
            rc = action(IOREQ_READ, p->addr, p->size, &data);
            p->data = data;
        }
        else
        {
            data = p->data;
            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
        }
        return rc;
    }

    if ( p->dir == IOREQ_READ )
    {
        for ( i = 0; i < p->count; i++ )
        {
            rc = action(IOREQ_READ, p->addr, p->size, &data);
            if ( rc != X86EMUL_OKAY )
                break;
            (void)hvm_copy_to_guest_phys(p->data + sign*i*p->size,
                                         &data, p->size);
        }
    }
    else /* p->dir == IOREQ_WRITE */
    {
        for ( i = 0; i < p->count; i++ )
        {
            data = 0;
            (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size,
                                           p->size);
            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
            if ( rc != X86EMUL_OKAY )
                break;
        }
    }

    if ( i != 0 )
    {
        p->count = i;
        rc = X86EMUL_OKAY;
    }

    return rc;
}
Ejemplo n.º 2
0
static int hvm_mmio_access(struct vcpu *v,
                           ioreq_t *p,
                           hvm_mmio_read_t read_handler,
                           hvm_mmio_write_t write_handler)
{
    unsigned long data;
    int rc = X86EMUL_OKAY, i, sign = p->df ? -1 : 1;

    if ( !p->data_is_ptr )
    {
        if ( p->dir == IOREQ_READ )
        {
            rc = read_handler(v, p->addr, p->size, &data);
            p->data = data;
        }
        else /* p->dir == IOREQ_WRITE */
            rc = write_handler(v, p->addr, p->size, p->data);
        return rc;
    }

    if ( p->dir == IOREQ_READ )
    {
        for ( i = 0; i < p->count; i++ )
        {
            int ret;

            rc = read_handler(v, p->addr + (sign * i * p->size), p->size,
                              &data);
            if ( rc != X86EMUL_OKAY )
                break;
            ret = hvm_copy_to_guest_phys(p->data + (sign * i * p->size),
                                         &data,
                                         p->size);
            if ( (ret == HVMCOPY_gfn_paged_out) ||
                    (ret == HVMCOPY_gfn_shared) )
            {
                rc = X86EMUL_RETRY;
                break;
            }
        }
    }
    else
    {
        for ( i = 0; i < p->count; i++ )
        {
            switch ( hvm_copy_from_guest_phys(&data,
                                              p->data + sign * i * p->size,
                                              p->size) )
            {
            case HVMCOPY_okay:
                break;
            case HVMCOPY_gfn_paged_out:
            case HVMCOPY_gfn_shared:
                rc = X86EMUL_RETRY;
                break;
            case HVMCOPY_bad_gfn_to_mfn:
                data = ~0;
                break;
            case HVMCOPY_bad_gva_to_gfn:
                ASSERT(0);
            /* fall through */
            default:
                rc = X86EMUL_UNHANDLEABLE;
                break;
            }
            if ( rc != X86EMUL_OKAY )
                break;
            rc = write_handler(v, p->addr + (sign * i * p->size), p->size,
                               data);
            if ( rc != X86EMUL_OKAY )
                break;
        }
    }

    if ( i != 0 )
    {
        p->count = i;
        rc = X86EMUL_OKAY;
    }

    return rc;
}
Ejemplo n.º 3
0
static int process_portio_intercept(portio_action_t action, ioreq_t *p)
{
    int rc = X86EMUL_OKAY, i, sign = p->df ? -1 : 1;
    uint32_t data;

    if ( !p->data_is_ptr )
    {
        if ( p->dir == IOREQ_READ )
        {
            rc = action(IOREQ_READ, p->addr, p->size, &data);
            p->data = data;
        }
        else
        {
            data = p->data;
            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
        }
        return rc;
    }

    if ( p->dir == IOREQ_READ )
    {
        for ( i = 0; i < p->count; i++ )
        {
            rc = action(IOREQ_READ, p->addr, p->size, &data);
            if ( rc != X86EMUL_OKAY )
                break;
            (void)hvm_copy_to_guest_phys(p->data + sign*i*p->size,
                                         &data, p->size);
        }
    }
    else /* p->dir == IOREQ_WRITE */
    {
        for ( i = 0; i < p->count; i++ )
        {
            data = 0;
            switch ( hvm_copy_from_guest_phys(&data,
                                              p->data + sign * i * p->size,
                                              p->size) )
            {
            case HVMCOPY_okay:
                break;
            case HVMCOPY_gfn_paged_out:
            case HVMCOPY_gfn_shared:
                rc = X86EMUL_RETRY;
                break;
            case HVMCOPY_bad_gfn_to_mfn:
                data = ~0;
                break;
            case HVMCOPY_bad_gva_to_gfn:
                ASSERT(0);
            /* fall through */
            default:
                rc = X86EMUL_UNHANDLEABLE;
                break;
            }
            if ( rc != X86EMUL_OKAY )
                break;
            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
            if ( rc != X86EMUL_OKAY )
                break;
        }
    }

    if ( i != 0 )
    {
        p->count = i;
        rc = X86EMUL_OKAY;
    }

    return rc;
}
Ejemplo n.º 4
0
static void realmode_deliver_exception(
    unsigned int vector,
    unsigned int insn_len,
    struct hvm_emulate_ctxt *hvmemul_ctxt)
{
    struct segment_register *idtr, *csr;
    struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
    uint32_t cs_eip, pstk;
    uint16_t frame[3];
    unsigned int last_byte;

    idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt);
    csr  = hvmemul_get_seg_reg(x86_seg_cs,   hvmemul_ctxt);
    __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty);

 again:
    last_byte = (vector * 4) + 3;
    if ( idtr->limit < last_byte ||
         hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) !=
         HVMCOPY_okay )
    {
        /* Software interrupt? */
        if ( insn_len != 0 )
        {
            insn_len = 0;
            vector = TRAP_gp_fault;
            goto again;
        }

        /* Exception or hardware interrupt. */
        switch ( vector )
        {
        case TRAP_double_fault:
            hvm_triple_fault();
            return;
        case TRAP_gp_fault:
            vector = TRAP_double_fault;
            goto again;
        default:
            vector = TRAP_gp_fault;
            goto again;
        }
    }

    frame[0] = regs->eip + insn_len;
    frame[1] = csr->sel;
    frame[2] = regs->eflags & ~X86_EFLAGS_RF;

    /* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db )
    {
        regs->esp -= 6;
        pstk = regs->esp;
    }
    else
    {
        pstk = (uint16_t)(regs->esp - 6);
        regs->esp &= ~0xffff;
        regs->esp |= pstk;
    }

    pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
    (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));

    csr->sel  = cs_eip >> 16;
    csr->base = (uint32_t)csr->sel << 4;
    regs->eip = (uint16_t)cs_eip;
    regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);

    /* Exception delivery clears STI and MOV-SS blocking. */
    if ( hvmemul_ctxt->intr_shadow &
         (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
    {
        hvmemul_ctxt->intr_shadow &=
            ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
        __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow);
    }
}
Ejemplo n.º 5
0
Archivo: viridian.c Proyecto: djs55/xen
int viridian_hypercall(struct cpu_user_regs *regs)
{
    struct vcpu *curr = current;
    struct domain *currd = curr->domain;
    int mode = hvm_guest_x86_mode(curr);
    unsigned long input_params_gpa, output_params_gpa;
    uint16_t status = HV_STATUS_SUCCESS;

    union hypercall_input {
        uint64_t raw;
        struct {
            uint16_t call_code;
            uint16_t fast:1;
            uint16_t rsvd1:15;
            uint16_t rep_count:12;
            uint16_t rsvd2:4;
            uint16_t rep_start:12;
            uint16_t rsvd3:4;
        };
    } input;

    union hypercall_output {
        uint64_t raw;
        struct {
            uint16_t result;
            uint16_t rsvd1;
            uint32_t rep_complete:12;
            uint32_t rsvd2:20;
        };
    } output = { 0 };

    ASSERT(is_viridian_domain(currd));

    switch ( mode )
    {
    case 8:
        input.raw = regs->rcx;
        input_params_gpa = regs->rdx;
        output_params_gpa = regs->r8;
        break;
    case 4:
        input.raw = (regs->rdx << 32) | regs->_eax;
        input_params_gpa = (regs->rbx << 32) | regs->_ecx;
        output_params_gpa = (regs->rdi << 32) | regs->_esi;
        break;
    default:
        goto out;
    }

    switch ( input.call_code )
    {
    case HvNotifyLongSpinWait:
        /*
         * See Microsoft Hypervisor Top Level Spec. section 18.5.1.
         */
        perfc_incr(mshv_call_long_wait);
        do_sched_op(SCHEDOP_yield, guest_handle_from_ptr(NULL, void));
        status = HV_STATUS_SUCCESS;
        break;

    case HvFlushVirtualAddressSpace:
    case HvFlushVirtualAddressList:
    {
        cpumask_t *pcpu_mask;
        struct vcpu *v;
        struct {
            uint64_t address_space;
            uint64_t flags;
            uint64_t vcpu_mask;
        } input_params;

        /*
         * See Microsoft Hypervisor Top Level Spec. sections 12.4.2
         * and 12.4.3.
         */
        perfc_incr(mshv_call_flush);

        /* These hypercalls should never use the fast-call convention. */
        status = HV_STATUS_INVALID_PARAMETER;
        if ( input.fast )
            break;

        /* Get input parameters. */
        if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
                                      sizeof(input_params)) != HVMCOPY_okay )
            break;

        /*
         * It is not clear from the spec. if we are supposed to
         * include current virtual CPU in the set or not in this case,
         * so err on the safe side.
         */
        if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
            input_params.vcpu_mask = ~0ul;

        pcpu_mask = &this_cpu(ipi_cpumask);
        cpumask_clear(pcpu_mask);

        /*
         * For each specified virtual CPU flush all ASIDs to invalidate
         * TLB entries the next time it is scheduled and then, if it
         * is currently running, add its physical CPU to a mask of
         * those which need to be interrupted to force a flush.
         */
        for_each_vcpu ( currd, v )
        {
            if ( v->vcpu_id >= (sizeof(input_params.vcpu_mask) * 8) )
                break;

            if ( !(input_params.vcpu_mask & (1ul << v->vcpu_id)) )
                continue;

            hvm_asid_flush_vcpu(v);
            if ( v != curr && v->is_running )
                __cpumask_set_cpu(v->processor, pcpu_mask);
        }

        /*
         * Since ASIDs have now been flushed it just remains to
         * force any CPUs currently running target vCPUs out of non-
         * root mode. It's possible that re-scheduling has taken place
         * so we may unnecessarily IPI some CPUs.
         */
        if ( !cpumask_empty(pcpu_mask) )
            smp_send_event_check_mask(pcpu_mask);

        output.rep_complete = input.rep_count;

        status = HV_STATUS_SUCCESS;
        break;
    }

    default:
        status = HV_STATUS_INVALID_HYPERCALL_CODE;
        break;
    }

out:
    output.result = status;
    switch (mode) {
    case 8:
        regs->rax = output.raw;
        break;
    default:
        regs->rdx = output.raw >> 32;
        regs->rax = (uint32_t)output.raw;
        break;
    }

    return HVM_HCALL_completed;
}
Ejemplo n.º 6
0
static int hvm_mmio_access(struct vcpu *v,
                           ioreq_t *p,
                           hvm_mmio_read_t read_handler,
                           hvm_mmio_write_t write_handler)
{
    unsigned long data;
    int rc = X86EMUL_OKAY, i, sign = p->df ? -1 : 1;

    if ( !p->data_is_ptr )
    {
        if ( p->dir == IOREQ_READ )
        {
            rc = read_handler(v, p->addr, p->size, &data);
            p->data = data;
        }
        else /* p->dir == IOREQ_WRITE */
            rc = write_handler(v, p->addr, p->size, p->data);
        return rc;
    }

    if ( p->dir == IOREQ_READ )
    {
        for ( i = 0; i < p->count; i++ )
        {
            int ret;

            rc = read_handler(v, p->addr + (sign * i * p->size), p->size,
                              &data);
            if ( rc != X86EMUL_OKAY )
                break;
            ret = hvm_copy_to_guest_phys(p->data + (sign * i * p->size),
                                         &data,
                                         p->size);
            if ( (ret == HVMCOPY_gfn_paged_out) || 
                 (ret == HVMCOPY_gfn_shared) )
            {
                rc = X86EMUL_RETRY;
                break;
            }
        }
    }
    else
    {
        for ( i = 0; i < p->count; i++ )
        {
            int ret;

            ret = hvm_copy_from_guest_phys(&data,
                                           p->data + (sign * i * p->size),
                                           p->size);
            if ( (ret == HVMCOPY_gfn_paged_out) || 
                 (ret == HVMCOPY_gfn_shared) )
            {
                rc = X86EMUL_RETRY;
                break;
            }
            rc = write_handler(v, p->addr + (sign * i * p->size), p->size,
                               data);
            if ( rc != X86EMUL_OKAY )
                break;
        }
    }

    if ( i != 0 )
    {
        p->count = i;
        rc = X86EMUL_OKAY;
    }

    return rc;
}
Ejemplo n.º 7
0
int hvm_process_io_intercept(const struct hvm_io_handler *handler,
                             ioreq_t *p)
{
    const struct hvm_io_ops *ops = handler->ops;
    int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
    uint64_t data;
    uint64_t addr;

    if ( p->dir == IOREQ_READ )
    {
        for ( i = 0; i < p->count; i++ )
        {
            addr = (p->type == IOREQ_TYPE_COPY) ?
                   p->addr + step * i :
                   p->addr;
            rc = ops->read(handler, addr, p->size, &data);
            if ( rc != X86EMUL_OKAY )
                break;

            if ( p->data_is_ptr )
            {
                switch ( hvm_copy_to_guest_phys(p->data + step * i,
                                                &data, p->size) )
                {
                case HVMCOPY_okay:
                    break;
                case HVMCOPY_bad_gfn_to_mfn:
                    /* Drop the write as real hardware would. */
                    continue;
                case HVMCOPY_bad_gva_to_gfn:
                case HVMCOPY_gfn_paged_out:
                case HVMCOPY_gfn_shared:
                    ASSERT_UNREACHABLE();
                    /* fall through */
                default:
                    domain_crash(current->domain);
                    return X86EMUL_UNHANDLEABLE;
                }
            }
            else
                p->data = data;
        }
    }
    else /* p->dir == IOREQ_WRITE */
    {
        for ( i = 0; i < p->count; i++ )
        {
            if ( p->data_is_ptr )
            {
                switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
                                                  p->size) )
                {
                case HVMCOPY_okay:
                    break;
                case HVMCOPY_bad_gfn_to_mfn:
                    data = ~0;
                    break;
                case HVMCOPY_bad_gva_to_gfn:
                case HVMCOPY_gfn_paged_out:
                case HVMCOPY_gfn_shared:
                    ASSERT_UNREACHABLE();
                    /* fall through */
                default:
                    domain_crash(current->domain);
                    return X86EMUL_UNHANDLEABLE;
                }
            }
            else
                data = p->data;

            addr = (p->type == IOREQ_TYPE_COPY) ?
                   p->addr + step * i :
                   p->addr;
            rc = ops->write(handler, addr, p->size, data);
            if ( rc != X86EMUL_OKAY )
                break;
        }
    }

    if ( i )
    {
        p->count = i;
        rc = X86EMUL_OKAY;
    }
    else if ( rc == X86EMUL_UNHANDLEABLE )
    {
        /*
         * Don't forward entire batches to the device model: This would
         * prevent the internal handlers to see subsequent iterations of
         * the request.
         */
        p->count = 1;
    }

    return rc;
}
Ejemplo n.º 8
0
Archivo: intercept.c Proyecto: CPFL/xen
static int hvm_mmio_access(struct vcpu *v,
                           ioreq_t *p,
                           hvm_mmio_read_t read_handler,
                           hvm_mmio_write_t write_handler)
{
    struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
    unsigned long data;
    int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;

    if ( !p->data_is_ptr )
    {
        if ( p->dir == IOREQ_READ )
        {
            if ( vio->mmio_retrying )
            {
                if ( vio->mmio_large_read_bytes != p->size )
                    return X86EMUL_UNHANDLEABLE;
                memcpy(&data, vio->mmio_large_read, p->size);
                vio->mmio_large_read_bytes = 0;
                vio->mmio_retrying = 0;
            }
            else
                rc = read_handler(v, p->addr, p->size, &data);
            p->data = data;
        }
        else /* p->dir == IOREQ_WRITE */
            rc = write_handler(v, p->addr, p->size, p->data);
        return rc;
    }

    if ( p->dir == IOREQ_READ )
    {
        for ( i = 0; i < p->count; i++ )
        {
            if ( vio->mmio_retrying )
            {
                if ( vio->mmio_large_read_bytes != p->size )
                    return X86EMUL_UNHANDLEABLE;
                memcpy(&data, vio->mmio_large_read, p->size);
                vio->mmio_large_read_bytes = 0;
                vio->mmio_retrying = 0;
            }
            else
            {
                rc = read_handler(v, p->addr + step * i, p->size, &data);
                if ( rc != X86EMUL_OKAY )
                    break;
            }
            switch ( hvm_copy_to_guest_phys(p->data + step * i,
                                            &data, p->size) )
            {
            case HVMCOPY_okay:
                break;
            case HVMCOPY_gfn_paged_out:
            case HVMCOPY_gfn_shared:
                rc = X86EMUL_RETRY;
                break;
            case HVMCOPY_bad_gfn_to_mfn:
                /* Drop the write as real hardware would. */
                continue;
            case HVMCOPY_bad_gva_to_gfn:
                ASSERT(0);
                /* fall through */
            default:
                rc = X86EMUL_UNHANDLEABLE;
                break;
            }
            if ( rc != X86EMUL_OKAY)
                break;
        }

        if ( rc == X86EMUL_RETRY )
        {
            vio->mmio_retry = 1;
            vio->mmio_large_read_bytes = p->size;
            memcpy(vio->mmio_large_read, &data, p->size);
        }
    }
    else
    {
        for ( i = 0; i < p->count; i++ )
        {
            switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
                                              p->size) )
            {
            case HVMCOPY_okay:
                break;
            case HVMCOPY_gfn_paged_out:
            case HVMCOPY_gfn_shared:
                rc = X86EMUL_RETRY;
                break;
            case HVMCOPY_bad_gfn_to_mfn:
                data = ~0;
                break;
            case HVMCOPY_bad_gva_to_gfn:
                ASSERT(0);
                /* fall through */
            default:
                rc = X86EMUL_UNHANDLEABLE;
                break;
            }
            if ( rc != X86EMUL_OKAY )
                break;
            rc = write_handler(v, p->addr + step * i, p->size, data);
            if ( rc != X86EMUL_OKAY )
                break;
        }

        if ( rc == X86EMUL_RETRY )
            vio->mmio_retry = 1;
    }

    if ( i != 0 )
    {
        p->count = i;
        rc = X86EMUL_OKAY;
    }

    return rc;
}
Ejemplo n.º 9
0
Archivo: intercept.c Proyecto: CPFL/xen
static int process_portio_intercept(portio_action_t action, ioreq_t *p)
{
    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
    int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
    uint32_t data;

    if ( !p->data_is_ptr )
    {
        if ( p->dir == IOREQ_READ )
        {
            if ( vio->mmio_retrying )
            {
                if ( vio->mmio_large_read_bytes != p->size )
                    return X86EMUL_UNHANDLEABLE;
                memcpy(&data, vio->mmio_large_read, p->size);
                vio->mmio_large_read_bytes = 0;
                vio->mmio_retrying = 0;
            }
            else
                rc = action(IOREQ_READ, p->addr, p->size, &data);
            p->data = data;
        }
        else
        {
            data = p->data;
            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
        }
        return rc;
    }

    if ( p->dir == IOREQ_READ )
    {
        for ( i = 0; i < p->count; i++ )
        {
            if ( vio->mmio_retrying )
            {
                if ( vio->mmio_large_read_bytes != p->size )
                    return X86EMUL_UNHANDLEABLE;
                memcpy(&data, vio->mmio_large_read, p->size);
                vio->mmio_large_read_bytes = 0;
                vio->mmio_retrying = 0;
            }
            else
            {
                rc = action(IOREQ_READ, p->addr, p->size, &data);
                if ( rc != X86EMUL_OKAY )
                    break;
            }
            switch ( hvm_copy_to_guest_phys(p->data + step * i,
                                            &data, p->size) )
            {
            case HVMCOPY_okay:
                break;
            case HVMCOPY_gfn_paged_out:
            case HVMCOPY_gfn_shared:
                rc = X86EMUL_RETRY;
                break;
            case HVMCOPY_bad_gfn_to_mfn:
                /* Drop the write as real hardware would. */
                continue;
            case HVMCOPY_bad_gva_to_gfn:
                ASSERT(0);
                /* fall through */
            default:
                rc = X86EMUL_UNHANDLEABLE;
                break;
            }
            if ( rc != X86EMUL_OKAY)
                break;
        }

        if ( rc == X86EMUL_RETRY )
        {
            vio->mmio_retry = 1;
            vio->mmio_large_read_bytes = p->size;
            memcpy(vio->mmio_large_read, &data, p->size);
        }
    }
    else /* p->dir == IOREQ_WRITE */
    {
        for ( i = 0; i < p->count; i++ )
        {
            data = 0;
            switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
                                              p->size) )
            {
            case HVMCOPY_okay:
                break;
            case HVMCOPY_gfn_paged_out:
            case HVMCOPY_gfn_shared:
                rc = X86EMUL_RETRY;
                break;
            case HVMCOPY_bad_gfn_to_mfn:
                data = ~0;
                break;
            case HVMCOPY_bad_gva_to_gfn:
                ASSERT(0);
                /* fall through */
            default:
                rc = X86EMUL_UNHANDLEABLE;
                break;
            }
            if ( rc != X86EMUL_OKAY )
                break;
            rc = action(IOREQ_WRITE, p->addr, p->size, &data);
            if ( rc != X86EMUL_OKAY )
                break;
        }

        if ( rc == X86EMUL_RETRY )
            vio->mmio_retry = 1;
    }

    if ( i != 0 )
    {
        p->count = i;
        rc = X86EMUL_OKAY;
    }

    return rc;
}
Ejemplo n.º 10
0
static inline void hvm_mmio_access(struct vcpu *v,
                                   ioreq_t *p,
                                   hvm_mmio_read_t read_handler,
                                   hvm_mmio_write_t write_handler)
{
    unsigned int tmp1, tmp2;
    unsigned long data;

    switch ( p->type ) {
    case IOREQ_TYPE_COPY:
    {
        if ( !p->data_is_ptr ) {
            if ( p->dir == IOREQ_READ )
                p->data = read_handler(v, p->addr, p->size);
            else    /* p->dir == IOREQ_WRITE */
                write_handler(v, p->addr, p->size, p->data);
        } else {    /* p->data_is_ptr */
            int i, sign = (p->df) ? -1 : 1;

            if ( p->dir == IOREQ_READ ) {
                for ( i = 0; i < p->count; i++ ) {
                    data = read_handler(v,
                        p->addr + (sign * i * p->size),
                        p->size);
                    (void)hvm_copy_to_guest_phys(
                        p->data + (sign * i * p->size),
                        &data,
                        p->size);
                }
            } else {/* p->dir == IOREQ_WRITE */
                for ( i = 0; i < p->count; i++ ) {
                    (void)hvm_copy_from_guest_phys(
                        &data,
                        p->data + (sign * i * p->size),
                        p->size);
                    write_handler(v,
                        p->addr + (sign * i * p->size),
                        p->size, data);
                }
            }
        }
        break;
    }

    case IOREQ_TYPE_AND:
        tmp1 = read_handler(v, p->addr, p->size);
        if ( p->dir == IOREQ_WRITE ) {
            tmp2 = tmp1 & (unsigned long) p->data;
            write_handler(v, p->addr, p->size, tmp2);
        }
        p->data = tmp1;
        break;

    case IOREQ_TYPE_ADD:
        tmp1 = read_handler(v, p->addr, p->size);
        if (p->dir == IOREQ_WRITE) {
            tmp2 = tmp1 + (unsigned long) p->data;
            write_handler(v, p->addr, p->size, tmp2);
        }
        p->data = tmp1;
        break;

    case IOREQ_TYPE_OR:
        tmp1 = read_handler(v, p->addr, p->size);
        if ( p->dir == IOREQ_WRITE ) {
            tmp2 = tmp1 | (unsigned long) p->data;
            write_handler(v, p->addr, p->size, tmp2);
        }
        p->data = tmp1;
        break;

    case IOREQ_TYPE_XOR:
        tmp1 = read_handler(v, p->addr, p->size);
        if ( p->dir == IOREQ_WRITE ) {
            tmp2 = tmp1 ^ (unsigned long) p->data;
            write_handler(v, p->addr, p->size, tmp2);
        }
        p->data = tmp1;
        break;

    case IOREQ_TYPE_XCHG:
        /*
         * Note that we don't need to be atomic here since VCPU is accessing
         * its own local APIC.
         */
        tmp1 = read_handler(v, p->addr, p->size);
        write_handler(v, p->addr, p->size, (unsigned long) p->data);
        p->data = tmp1;
        break;

    case IOREQ_TYPE_SUB:
        tmp1 = read_handler(v, p->addr, p->size);
        if ( p->dir == IOREQ_WRITE ) {
            tmp2 = tmp1 - (unsigned long) p->data;
            write_handler(v, p->addr, p->size, tmp2);
        }
        p->data = tmp1;
        break;

    default:
        printk("hvm_mmio_access: error ioreq type %x\n", p->type);
        domain_crash_synchronous();
        break;
    }
}