/* * Check if the request is handled inside xen * return value: 0 --not handled; 1 --handled */ int hvm_io_intercept(ioreq_t *p, int type) { struct vcpu *v = current; struct hvm_io_handler *handler = v->domain->arch.hvm_domain.io_handler; int i; unsigned long addr, size; if ( type == HVM_PORTIO ) { int rc = dpci_ioport_intercept(p); if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) ) return rc; } for ( i = 0; i < handler->num_slot; i++ ) { if ( type != handler->hdl_list[i].type ) continue; addr = handler->hdl_list[i].addr; size = handler->hdl_list[i].size; if ( (p->addr >= addr) && ((p->addr + p->size) <= (addr + size)) ) { if ( type == HVM_PORTIO ) return process_portio_intercept( handler->hdl_list[i].action.portio, p); return handler->hdl_list[i].action.mmio(p); } } return X86EMUL_UNHANDLEABLE; }
/* * Check if the request is handled inside xen * return value: 0 --not handled; 1 --handled */ int hvm_io_intercept(ioreq_t *p, int type) { struct vcpu *v = current; struct hvm_io_handler *handler = &v->domain->arch.hvm_domain.io_handler; int i; unsigned long addr, size; if ( (type == HVM_PORTIO) && (dpci_ioport_intercept(p)) ) return 1; for ( i = 0; i < handler->num_slot; i++ ) { if ( type != handler->hdl_list[i].type ) continue; addr = handler->hdl_list[i].addr; size = handler->hdl_list[i].size; if ( (p->addr >= addr) && ((p->addr + p->size) <= (addr + size)) ) { if ( type == HVM_PORTIO ) return process_portio_intercept( handler->hdl_list[i].action.portio, p); return handler->hdl_list[i].action.mmio(p); } } return 0; }