static void echo_server (void) { T_IO_REQUEST rcv_packet; T_IO_RESPONSE res_packet; ER err; printf ("ECHO: server start.\n"); for (;;) { err = get_ioreq (deviceid, &rcv_packet); if (err == E_OK) { printf ("ECHO: Receive request; command = %d\n", rcv_packet.command); /* コマンド解釈部を実行する */ switch (rcv_packet.command) { /* IO_NULL, IO_OPEN, IO_CLOSE については何もしない */ case IO_NULL: case IO_OPEN: case IO_CLOSE: res_packet.stat = E_OK; break; case IO_READ: res_packet.stat = E_OK; break; case IO_WRITE: { W i; printf ("ECHO: write size = %d\n", rcv_packet.s.write_pack.size); for (i = 0; i < rcv_packet.s.write_pack.size; i++) { putchar (((B *)(rcv_packet.s.write_pack.bufp))[i]); } } res_packet.stat = E_OK; break; case IO_STAT: res_packet.stat = E_OK; break; case IO_CONTROL: res_packet.stat = E_OK; break; default: printf ("ECHO: paramater error\n"); res_packet.stat = E_PAR; break; } } put_res (deviceid, &rcv_packet, &res_packet); } }
static void gdc_server (void) { T_IO_REQUEST rcv_packet; T_IO_RESPONSE res_packet; ER err; printf ("PD7220(GDC): server start.\n"); for (;;) { err = get_ioreq (deviceid, &rcv_packet); if (err == E_OK) { printf ("GDC: Receive request %d\n", rcv_packet.command); /* コマンド解釈部を実行する */ switch (rcv_packet.command) { /* IO_NULL, IO_OPEN, IO_CLOSE については何もしない */ case IO_NULL: case IO_OPEN: case IO_CLOSE: res_packet.stat = E_OK; break; case IO_READ: res_packet.stat = E_NOSPT; break; case IO_WRITE: res_packet.stat = E_NOSPT; break; case IO_STAT: res_packet.stat = E_NOSPT; break; case IO_CONTROL: res_packet.stat = gdc_control (&(rcv_packet.s)); break; default: res_packet.stat = E_PAR; break; } } put_res (deviceid, &rcv_packet, &res_packet); } }
static int hvmemul_do_io( int is_mmio, paddr_t addr, unsigned long *reps, int size, paddr_t ram_gpa, int dir, int df, void *p_data) { paddr_t value = ram_gpa; int value_is_ptr = (p_data == NULL); struct vcpu *curr = current; struct p2m_domain *p2m = p2m_get_hostp2m(curr->domain); ioreq_t *p = get_ioreq(curr); unsigned long ram_gfn = paddr_to_pfn(ram_gpa); p2m_type_t p2mt; mfn_t ram_mfn; int rc; /* Check for paged out page */ ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt, 0); if ( p2m_is_paging(p2mt) ) { p2m_mem_paging_populate(p2m, ram_gfn); return X86EMUL_RETRY; } if ( p2m_is_shared(p2mt) ) return X86EMUL_RETRY; /* * Weird-sized accesses have undefined behaviour: we discard writes * and read all-ones. */ if ( unlikely((size > sizeof(long)) || (size & (size - 1))) ) { gdprintk(XENLOG_WARNING, "bad mmio size %d\n", size); ASSERT(p_data != NULL); /* cannot happen with a REP prefix */ if ( dir == IOREQ_READ ) memset(p_data, ~0, size); return X86EMUL_UNHANDLEABLE; } if ( (p_data != NULL) && (dir == IOREQ_WRITE) ) { memcpy(&value, p_data, size); p_data = NULL; } if ( is_mmio && !value_is_ptr ) { /* Part of a multi-cycle read or write? */ if ( dir == IOREQ_WRITE ) { paddr_t pa = curr->arch.hvm_vcpu.mmio_large_write_pa; unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_write_bytes; if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) ) return X86EMUL_OKAY; } else { paddr_t pa = curr->arch.hvm_vcpu.mmio_large_read_pa; unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes; if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) ) { memcpy(p_data, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa], size); return X86EMUL_OKAY; } } } switch ( curr->arch.hvm_vcpu.io_state ) { case HVMIO_none: break; case HVMIO_completed: curr->arch.hvm_vcpu.io_state = HVMIO_none; if ( p_data == NULL ) return X86EMUL_UNHANDLEABLE; goto finish_access; case HVMIO_dispatched: /* May have to wait for previous cycle of a multi-write to complete. */ if ( is_mmio && !value_is_ptr && (dir == IOREQ_WRITE) && (addr == (curr->arch.hvm_vcpu.mmio_large_write_pa + curr->arch.hvm_vcpu.mmio_large_write_bytes)) ) return X86EMUL_RETRY; default: return X86EMUL_UNHANDLEABLE; } if ( p->state != STATE_IOREQ_NONE ) { gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n", p->state); return X86EMUL_UNHANDLEABLE; } curr->arch.hvm_vcpu.io_state = (p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion; curr->arch.hvm_vcpu.io_size = size; p->dir = dir; p->data_is_ptr = value_is_ptr; p->type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO; p->size = size; p->addr = addr; p->count = *reps; p->df = df; p->data = value; hvmtrace_io_assist(is_mmio, p); if ( is_mmio ) { rc = hvm_mmio_intercept(p); if ( rc == X86EMUL_UNHANDLEABLE ) rc = hvm_buffered_io_intercept(p); } else { rc = hvm_portio_intercept(p); } switch ( rc ) { case X86EMUL_OKAY: case X86EMUL_RETRY: *reps = p->count; p->state = STATE_IORESP_READY; hvm_io_assist(); curr->arch.hvm_vcpu.io_state = HVMIO_none; break; case X86EMUL_UNHANDLEABLE: rc = X86EMUL_RETRY; if ( !hvm_send_assist_req(curr) ) curr->arch.hvm_vcpu.io_state = HVMIO_none; else if ( p_data == NULL ) rc = X86EMUL_OKAY; break; default: BUG(); } if ( rc != X86EMUL_OKAY ) return rc; finish_access: if ( p_data != NULL ) memcpy(p_data, &curr->arch.hvm_vcpu.io_data, size); if ( is_mmio && !value_is_ptr ) { /* Part of a multi-cycle read or write? */ if ( dir == IOREQ_WRITE ) { paddr_t pa = curr->arch.hvm_vcpu.mmio_large_write_pa; unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_write_bytes; if ( bytes == 0 ) pa = curr->arch.hvm_vcpu.mmio_large_write_pa = addr; if ( addr == (pa + bytes) ) curr->arch.hvm_vcpu.mmio_large_write_bytes += size; } else { paddr_t pa = curr->arch.hvm_vcpu.mmio_large_read_pa; unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes; if ( bytes == 0 ) pa = curr->arch.hvm_vcpu.mmio_large_read_pa = addr; if ( (addr == (pa + bytes)) && ((bytes + size) < sizeof(curr->arch.hvm_vcpu.mmio_large_read)) ) { memcpy(&curr->arch.hvm_vcpu.mmio_large_read[addr - pa], p_data, size); curr->arch.hvm_vcpu.mmio_large_read_bytes += size; } } } return X86EMUL_OKAY; }