void BX_CPU_C::real_mode_int(Bit8u vector, bx_bool push_error, Bit16u error_code) { if ((vector*4+3) > BX_CPU_THIS_PTR idtr.limit) { BX_ERROR(("interrupt(real mode) vector > idtr.limit")); exception(BX_GP_EXCEPTION, 0); } push_16((Bit16u) read_eflags()); push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value); push_16(IP); Bit16u new_ip = system_read_word(BX_CPU_THIS_PTR idtr.base + 4 * vector); // CS.LIMIT can't change when in real/v8086 mode if (new_ip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) { BX_ERROR(("interrupt(real mode): instruction pointer not within code segment limits")); exception(BX_GP_EXCEPTION, 0); } Bit16u cs_selector = system_read_word(BX_CPU_THIS_PTR idtr.base + 4 * vector + 2); load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_selector); EIP = new_ip; /* INT affects the following flags: I,T */ BX_CPU_THIS_PTR clear_IF(); BX_CPU_THIS_PTR clear_TF(); #if BX_CPU_LEVEL >= 4 BX_CPU_THIS_PTR clear_AC(); #endif BX_CPU_THIS_PTR clear_RF(); }
int bx_sound_linux_c::alsa_pcm_write() { int ret; if (alsa_pcm[0].buffer == NULL) { alsa_pcm[0].buffer = (char *)malloc(alsa_pcm[0].alsa_bufsize); } while (alsa_pcm[0].audio_bufsize >= alsa_pcm[0].alsa_bufsize) { memcpy(alsa_pcm[0].buffer, audio_buffer[0], alsa_pcm[0].alsa_bufsize); ret = snd_pcm_writei(alsa_pcm[0].handle, alsa_pcm[0].buffer, alsa_pcm[0].frames); if (ret == -EAGAIN) continue; if (ret == -EPIPE) { /* EPIPE means underrun */ BX_ERROR(("ALSA: underrun occurred")); snd_pcm_prepare(alsa_pcm[0].handle); } else if (ret < 0) { BX_ERROR(("ALSA: error from writei: %s", snd_strerror(ret))); } else if (ret != (int)alsa_pcm[0].frames) { BX_ERROR(("ALSA: short write, write %d frames", ret)); } alsa_pcm[0].audio_bufsize -= alsa_pcm[0].alsa_bufsize; memcpy(audio_buffer[0], audio_buffer[0]+alsa_pcm[0].alsa_bufsize, alsa_pcm[0].audio_bufsize); } if ((alsa_pcm[0].audio_bufsize == 0) && (alsa_pcm[0].buffer != NULL)) { free(alsa_pcm[0].buffer); alsa_pcm[0].buffer = NULL; } return BX_SOUNDLOW_OK; }
static bx_bool pcidev_mem_read_handler(bx_phy_address addr, unsigned len, void *data, void *param) { struct region_struct *region = (struct region_struct *)param; bx_pcidev_c *pcidev = region->pcidev; int fd = pcidev->pcidev_fd; int ret = -1; if (fd == -1) return false; /* we failed to handle the request, so let a default handler do it for us */ BX_INFO(("Reading I/O memory at 0x%08x", (unsigned)addr)); struct pcidev_io_struct io; io.address = addr + region->host_start - region->start; switch(len) { case 1: ret = ioctl(fd, PCIDEV_IOCTL_READ_MEM_BYTE, &io); *(Bit8u *)data = io.value; break; case 2: ret = ioctl(fd, PCIDEV_IOCTL_READ_MEM_WORD, &io); *(Bit16u *)data = io.value; break; case 4: ret = ioctl(fd, PCIDEV_IOCTL_READ_MEM_DWORD, &io); *(Bit32u *)data = io.value; break; default: BX_ERROR(("Unsupported pcidev read mem operation")); break; } if (ret == -1) { BX_ERROR(("pcidev read mem error")); } return true; // ok, we handled the request }
void BX_CPU_C::branch_far64(bx_selector_t *selector, bx_descriptor_t *descriptor, bx_address rip, Bit8u cpl) { #if BX_SUPPORT_X86_64 if (descriptor->u.segment.l) { if (! IsCanonical(rip)) { BX_ERROR(("branch_far: canonical RIP violation")); exception(BX_GP_EXCEPTION, 0, 0); } } else #endif { /* instruction pointer must be in code segment limit else #GP(0) */ if (rip > descriptor->u.segment.limit_scaled) { BX_ERROR(("branch_far: RIP > limit")); exception(BX_GP_EXCEPTION, 0, 0); } } /* Load CS:IP from destination pointer */ /* Load CS-cache with new segment descriptor */ load_cs(selector, descriptor, cpl); /* Change the RIP value */ RIP = rip; }
int bx_sound_linux_c::sendwavepacket(int length, Bit8u data[]) { #if BX_HAVE_ALSASOUND if (use_alsa_pcm) { if ((alsa_pcm[0].audio_bufsize+length) <= BX_SOUND_LINUX_BUFSIZE) { memcpy(audio_buffer[0]+alsa_pcm[0].audio_bufsize, data, length); alsa_pcm[0].audio_bufsize += length; } else { BX_ERROR(("ALSA: audio buffer overflow")); return BX_SOUNDLOW_ERR; } if (alsa_pcm[0].audio_bufsize < alsa_pcm[0].alsa_bufsize) { return BX_SOUNDLOW_OK; } else { return alsa_pcm_write(); } } #endif int ret = write(wave_fd[0], data, length); if (ret == length) { return BX_SOUNDLOW_OK; } else { BX_ERROR(("OSS: write error")); return BX_SOUNDLOW_ERR; } }
void bx_tap_pktmover_c::rx_timer() { int nbytes; Bit8u buf[BX_PACKET_BUFSIZE]; Bit8u *rxbuf; if (fd<0) return; #if defined(__sun__) struct strbuf sbuf; int f = 0; sbuf.maxlen = sizeof(buf); sbuf.buf = (char *)buf; nbytes = getmsg(fd, NULL, &sbuf, &f) >=0 ? sbuf.len : -1; #else nbytes = read (fd, buf, sizeof(buf)); #endif // hack: discard first two bytes #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__APPLE__) || defined(__sun__) // Should be fixed for other *BSD rxbuf = buf; #else rxbuf = buf+2; nbytes-=2; #endif #if defined(__linux__) // hack: TAP device likes to create an ethernet header which has // the same source and destination address FE:FD:00:00:00:00. // Change the dest address to FE:FD:00:00:00:01. if (!memcmp(&rxbuf[0], &rxbuf[6], 6)) { rxbuf[5] = guest_macaddr[5]; } #endif if (nbytes>0) BX_DEBUG(("tap read returned %d bytes", nbytes)); if (nbytes<0) { if (errno != EAGAIN) BX_ERROR(("tap read error: %s", strerror(errno))); return; } #if BX_ETH_TAP_LOGGING if (nbytes > 0) { BX_DEBUG(("receive packet length %u", nbytes)); // dump raw bytes to a file, eventually dump in pcap format so that // tcpdump -r FILE can interpret them for us. int n = fwrite(rxbuf, nbytes, 1, rxlog); if (n != 1) BX_ERROR(("fwrite to rxlog failed, nbytes = %d", nbytes)); // dump packet in hex into an ascii log file write_pktlog_txt(rxlog_txt, rxbuf, nbytes, 1); // flush log so that we see the packets as they arrive w/o buffering fflush(rxlog); } #endif BX_DEBUG(("eth_tap: got packet: %d bytes, dst=%x:%x:%x:%x:%x:%x, src=%x:%x:%x:%x:%x:%x\n", nbytes, rxbuf[0], rxbuf[1], rxbuf[2], rxbuf[3], rxbuf[4], rxbuf[5], rxbuf[6], rxbuf[7], rxbuf[8], rxbuf[9], rxbuf[10], rxbuf[11])); if (nbytes < 60) { BX_INFO(("packet too short (%d), padding to 60", nbytes)); nbytes = 60; } (*rxh)(netdev, rxbuf, nbytes); }
bx_bool usb_msd_device_c::init() { if (d.type == USB_DEV_TYPE_DISK) { s.hdimage = DEV_hdimage_init_image(s.image_mode, 0, s.journal); if (s.hdimage->open(s.fname) < 0) { BX_ERROR(("could not open hard drive image file '%s'", s.fname)); return 0; } else { s.scsi_dev = new scsi_device_t(s.hdimage, 0, usb_msd_command_complete, (void*)this); } sprintf(s.info_txt, "USB HD: path='%s', mode='%s'", s.fname, hdimage_mode_names[s.image_mode]); } else if (d.type == USB_DEV_TYPE_CDROM) { #ifdef LOWLEVEL_CDROM s.cdrom = DEV_hdimage_init_cdrom(s.fname); if (!s.cdrom->insert_cdrom()) { BX_ERROR(("could not open cdrom image file '%s'", s.fname)); return 0; } else { s.scsi_dev = new scsi_device_t(s.cdrom, 0, usb_msd_command_complete, (void*)this); } sprintf(s.info_txt, "USB CD: path='%s'", s.fname); #else BX_PANIC(("missing LOWLEVEL_CDROM support")); return 0; #endif } s.scsi_dev->register_state(s.sr_list, "scsidev"); s.mode = USB_MSDM_CBW; d.connected = 1; return 1; }
BX_CPU_C::read_RMW_virtual_dword(unsigned s, bx_address offset) { bx_address laddr; bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s]; Bit32u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW); if (seg->cache.valid & SegAccessWOK4G) { accessOK: laddr = BX_CPU_THIS_PTR get_laddr(s, offset); #if BX_SupportGuest2HostTLB unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); bx_address lpf = AlignedAccessLPFOf(laddr, 3); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; if (tlbEntry->lpf == lpf) { // See if the TLB entry privilege level allows us write access // from this CPL. if (tlbEntry->accessBits & (0x10 << CPL)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW); Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif ReadHostDWordFromLittleEndian(hostAddr, data); BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data); return data; } } #endif #if BX_SUPPORT_X86_64 if (! IsCanonical(laddr)) { BX_ERROR(("read_RMW_virtual_dword(): canonical failure")); exception(int_number(seg), 0, 0); } #endif #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (BX_CPU_THIS_PTR alignment_check()) { if (laddr & 3) { BX_ERROR(("read_RMW_virtual_dword(): #AC misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data); return data; } if (seg->cache.valid & SegAccessWOK) { if (Is64BitMode() || (offset < (seg->cache.u.segment.limit_scaled-2))) goto accessOK; } write_virtual_checks(seg, offset, 4); goto accessOK; }
int BX_CPU_C::v86_redirect_interrupt(Bit8u vector) { #if BX_CPU_LEVEL >= 5 if (BX_CPU_THIS_PTR cr4.get_VME()) { bx_address tr_base = BX_CPU_THIS_PTR tr.cache.u.segment.base; if (BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled < 103) { BX_ERROR(("INT_Ib(): TR.limit < 103 in VME")); exception(BX_GP_EXCEPTION, 0); } Bit32u io_base = system_read_word(tr_base + 102), offset = io_base - 32 + (vector >> 3); if (offset > BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled) { BX_ERROR(("INT_Ib(): failed to fetch VME redirection bitmap")); exception(BX_GP_EXCEPTION, 0); } Bit8u vme_redirection_bitmap = system_read_byte(tr_base + offset); if (!(vme_redirection_bitmap & (1 << (vector & 7)))) { // redirect interrupt through virtual-mode idt Bit16u temp_flags = (Bit16u) read_eflags(); Bit16u temp_CS = system_read_word(vector*4 + 2); Bit16u temp_IP = system_read_word(vector*4); if (BX_CPU_THIS_PTR get_IOPL() < 3) { temp_flags |= EFlagsIOPLMask; if (BX_CPU_THIS_PTR get_VIF()) temp_flags |= EFlagsIFMask; else temp_flags &= ~EFlagsIFMask; } Bit16u old_IP = IP; Bit16u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value; push_16(temp_flags); // push return address onto new stack push_16(old_CS); push_16(old_IP); load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], (Bit16u) temp_CS); EIP = temp_IP; BX_CPU_THIS_PTR clear_TF(); BX_CPU_THIS_PTR clear_RF(); if (BX_CPU_THIS_PTR get_IOPL() == 3) BX_CPU_THIS_PTR clear_IF(); else BX_CPU_THIS_PTR clear_VIF(); return 1; } }
int bx_sound_linux_c::alsa_seq_open(const char *alsadev) { char *mididev, *ptr; int client, port, ret = 0; int length = strlen(alsadev) + 1; mididev = new char[length]; if (mididev == NULL) return BX_SOUNDLOW_ERR; strcpy(mididev, alsadev); ptr = strtok(mididev, ":"); if (ptr == NULL) { BX_ERROR(("ALSA sequencer setup: missing client parameters")); return BX_SOUNDLOW_ERR; } client = atoi(ptr); ptr = strtok(NULL, ":"); if (ptr == NULL) { BX_ERROR(("ALSA sequencer setup: missing port parameter")); return BX_SOUNDLOW_ERR; } port = atoi(ptr); delete(mididev); if (snd_seq_open(&alsa_seq.handle, "default", SND_SEQ_OPEN_OUTPUT, 0) < 0) { BX_ERROR(("Couldn't open ALSA sequencer for midi output")); return BX_SOUNDLOW_ERR; } ret = snd_seq_create_simple_port(alsa_seq.handle, NULL, SND_SEQ_PORT_CAP_WRITE | SND_SEQ_PORT_CAP_SUBS_WRITE | SND_SEQ_PORT_CAP_READ, SND_SEQ_PORT_TYPE_MIDI_GENERIC); if (ret < 0) { BX_ERROR(("ALSA sequencer: error creating port %s", snd_strerror(errno))); } else { alsa_seq.source_port = ret; ret = snd_seq_connect_to(alsa_seq.handle, alsa_seq.source_port, client, port); if (ret < 0) { BX_ERROR(("ALSA sequencer: could not connect to port %d:%d", client, port)); } } if (ret < 0) { snd_seq_close(alsa_seq.handle); return BX_SOUNDLOW_ERR; } else { return BX_SOUNDLOW_OK; } }
BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset) { BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64); bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s]; Bit64u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW); Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); #if BX_SupportGuest2HostTLB unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); Bit64u lpf = AlignedAccessLPFOf(laddr, 7); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; if (tlbEntry->lpf == lpf) { // See if the TLB entry privilege level allows us write access // from this CPL. if (tlbEntry->accessBits & (0x10 << CPL)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW); Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif ReadHostQWordFromLittleEndian(hostAddr, data); BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data); return data; } } #endif if (! IsCanonical(laddr)) { BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure")); exception(int_number(seg), 0, 0); } #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (BX_CPU_THIS_PTR alignment_check()) { if (laddr & 7) { BX_ERROR(("read_RMW_virtual_qword_64(): #AC misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data); return data; }
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::BSWAP_RX(bxInstruction_c *i) { BX_ERROR(("BSWAP with 16-bit opsize: undefined behavior !")); BX_WRITE_16BIT_REG(i->rm(), 0); BX_NEXT_INSTR(i); }
void serial_raw::transmit(Bit8u byte) { #ifdef WIN32 DWORD DErr, Len2; OVERLAPPED tx_ovl; #endif BX_DEBUG (("transmit %d", byte)); if (present) { #ifdef WIN32 if (DCBchanged) { setup_port(); } else { ClearCommError(hCOM, &DErr, NULL); } memset(&tx_ovl, 0, sizeof(OVERLAPPED)); tx_ovl.hEvent = CreateEvent(NULL,TRUE,TRUE,"transmit"); if (!WriteFile(hCOM, &byte, 1, &Len2, &tx_ovl)) { if (GetLastError() == ERROR_IO_PENDING) { if (WaitForSingleObject(tx_ovl.hEvent, 100) == WAIT_OBJECT_0) { GetOverlappedResult(hCOM, &tx_ovl, &Len2, FALSE); } } } if (Len2 != 1) BX_ERROR(("transmit failed: len = %d", Len2)); ClearCommError(hCOM, &DErr, NULL); CloseHandle(tx_ovl.hEvent); #endif } }
void BX_CPU_C::real_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code, Bit16u error_code) { // real mode interrupt Bit16u cs_selector, ip; if ((vector*4+3) > BX_CPU_THIS_PTR idtr.limit) { BX_ERROR(("interrupt(real mode) vector > idtr.limit")); exception(BX_GP_EXCEPTION, 0, 0); } push_16((Bit16u) read_eflags()); cs_selector = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value; push_16(cs_selector); ip = EIP; push_16(ip); access_read_linear(BX_CPU_THIS_PTR idtr.base + 4 * vector, 2, 0, BX_READ, &ip); EIP = (Bit32u) ip; access_read_linear(BX_CPU_THIS_PTR idtr.base + 4 * vector + 2, 2, 0, BX_READ, &cs_selector); load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_selector); /* INT affects the following flags: I,T */ BX_CPU_THIS_PTR clear_IF(); BX_CPU_THIS_PTR clear_TF(); #if BX_CPU_LEVEL >= 4 BX_CPU_THIS_PTR clear_AC(); #endif BX_CPU_THIS_PTR clear_RF(); }
void BX_CPU_C::task_switch_load_selector(bx_segment_reg_t *seg, bx_selector_t *selector, Bit16u raw_selector, Bit8u cs_rpl) { bx_descriptor_t descriptor; Bit32u dword1, dword2; // NULL selector is OK, will leave cache invalid if ((raw_selector & 0xfffc) != 0) { bx_bool good = fetch_raw_descriptor2(selector, &dword1, &dword2); if (!good) { BX_ERROR(("task_switch(%s): bad selector fetch !", strseg(seg))); exception(BX_TS_EXCEPTION, raw_selector & 0xfffc, 0); } parse_descriptor(dword1, dword2, &descriptor); /* AR byte must indicate data or readable code segment else #TS(selector) */ if (descriptor.segment==0 || (IS_CODE_SEGMENT(descriptor.type) && IS_CODE_SEGMENT_READABLE(descriptor.type) == 0)) { BX_ERROR(("task_switch(%s): not data or readable code !", strseg(seg))); exception(BX_TS_EXCEPTION, raw_selector & 0xfffc, 0); } /* If data or non-conforming code, then both the RPL and the CPL * must be less than or equal to DPL in AR byte else #GP(selector) */ if (IS_DATA_SEGMENT(descriptor.type) || IS_CODE_SEGMENT_NON_CONFORMING(descriptor.type)) { if ((selector->rpl > descriptor.dpl) || (cs_rpl > descriptor.dpl)) { BX_ERROR(("load_seg_reg(%s): RPL & CPL must be <= DPL", strseg(seg))); exception(BX_TS_EXCEPTION, raw_selector & 0xfffc, 0); } } if (! IS_PRESENT(descriptor)) { BX_ERROR(("task_switch(%s): descriptor not present !", strseg(seg))); exception(BX_NP_EXCEPTION, raw_selector & 0xfffc, 0); } // All checks pass, fill in shadow cache seg->cache = descriptor; } }
int bx_sound_linux_c::getwavepacket(int length, Bit8u data[]) { int ret; #if BX_HAVE_ALSASOUND if (use_alsa_pcm) { if (alsa_pcm[1].buffer == NULL) { alsa_pcm[1].buffer = (char *)malloc(alsa_pcm[1].alsa_bufsize); } while (alsa_pcm[1].audio_bufsize < length) { ret = snd_pcm_readi(alsa_pcm[1].handle, alsa_pcm[1].buffer, alsa_pcm[1].frames); if (ret == -EAGAIN) continue; if (ret == -EPIPE) { /* EPIPE means overrun */ BX_ERROR(("overrun occurred")); snd_pcm_prepare(alsa_pcm[1].handle); } else if (ret < 0) { BX_ERROR(("error from read: %s", snd_strerror(ret))); } else if (ret != (int)alsa_pcm[1].frames) { BX_ERROR(("short read, read %d frames", ret)); } memcpy(audio_buffer[1]+alsa_pcm[1].audio_bufsize, alsa_pcm[1].buffer, alsa_pcm[1].alsa_bufsize); alsa_pcm[1].audio_bufsize += alsa_pcm[1].alsa_bufsize; } memcpy(data, audio_buffer[1], length); alsa_pcm[1].audio_bufsize -= length; if ((alsa_pcm[1].audio_bufsize <= 0) && (alsa_pcm[1].buffer != NULL)) { free(alsa_pcm[1].buffer); alsa_pcm[1].buffer = NULL; } return BX_SOUNDLOW_OK; } #endif ret = read(wave_fd[1], data, length); if (ret == length) { return BX_SOUNDLOW_OK; } else { BX_ERROR(("OSS: write error")); return BX_SOUNDLOW_ERR; } }
BX_CPU_C::branch_near32(Bit32u new_EIP) { // check always, not only in protected mode if (new_EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) { BX_ERROR(("branch_near: offset outside of CS limits")); exception(BX_GP_EXCEPTION, 0, 0); } EIP = new_EIP; }
void bx_cpu_c::IRET32(BxInstruction_t *i) { Bit32u eip, ecs_raw, eflags; #if BX_DEBUGGER bx_cpu. show_flag |= Flag_iret; bx_cpu. show_eip = bx_cpu. eip; #endif invalidate_prefetch_q(); if (v8086_mode()) { // IOPL check in stack_return_from_v86() stack_return_from_v86(i); goto done; } #if BX_CPU_LEVEL >= 2 if (bx_cpu. cr0.pe) { iret_protected(i); goto done; } #endif BX_ERROR(("IRET32 called when you're not in vm8086 mode or protected mode.")); BX_ERROR(("IRET32 may not be implemented right, since it doesn't check anything.")); BX_PANIC(("Please report that you have found a test case for bx_cpu_c::IRET32.")); pop_32(&eip); pop_32(&ecs_raw); pop_32(&eflags); load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], (Bit16u) ecs_raw); bx_cpu. eip = eip; //FIXME: this should do (eflags & 0x257FD5) | (EFLAGS | 0x1A0000) write_eflags(eflags, /* change IOPL? */ 1, /* change IF? */ 1, 0, 1); done: BX_INSTR_FAR_BRANCH(BX_INSTR_IS_IRET, bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip); return; }
void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u data) { #if BX_SupportGuest2HostTLB unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); Bit64u lpf = AlignedAccessLPFOf(laddr, 7); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; if (tlbEntry->lpf == lpf) { // See if the TLB entry privilege level allows us write access // from this CPL. if (tlbEntry->accessBits & (0x10 << CPL)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE); BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, curr_pl, BX_WRITE, (Bit8u*) &data); Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif WriteHostQWordToLittleEndian(hostAddr, data); return; } } #endif if (! IsCanonical(laddr)) { BX_ERROR(("write_new_stack_qword_64(): canonical failure")); exception(BX_SS_EXCEPTION, 0, 0); } #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (BX_CPU_THIS_PTR alignment_check() && curr_pl == 3) { if (laddr & 7) { BX_ERROR(("write_new_stack_qword_64(): #AC misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif access_write_linear(laddr, 8, curr_pl, (void *) &data); }
serial_raw::serial_raw(const char *devname) { #ifdef WIN32 char portstr[MAX_PATH]; #ifdef WIN32_RECEIVE_RAW DWORD threadID; #endif #endif put ("SERR"); settype (SERRLOG); #ifdef WIN32 memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); dcb.fBinary = 1; dcb.fDtrControl = DTR_CONTROL_ENABLE; dcb.fRtsControl = RTS_CONTROL_ENABLE; dcb.Parity = NOPARITY; dcb.ByteSize = 8; dcb.StopBits = ONESTOPBIT; dcb.BaudRate = CBR_115200; DCBchanged = FALSE; if (lstrlen(devname) > 0) { wsprintf(portstr, "\\\\.\\%s", devname); hCOM = CreateFile(portstr, GENERIC_READ|GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL); if (hCOM != INVALID_HANDLE_VALUE) { present = 1; GetCommModemStatus(hCOM, &MSR_value); SetupComm(hCOM, 8192, 2048); PurgeComm(hCOM, PURGE_TXABORT | PURGE_RXABORT | PURGE_TXCLEAR | PURGE_RXCLEAR); #ifdef WIN32_RECEIVE_RAW SetCommMask(hCOM, EV_BREAK | EV_CTS | EV_DSR | EV_ERR | EV_RING | EV_RLSD | EV_RXCHAR); memset(&rx_ovl, 0, sizeof(OVERLAPPED)); rx_ovl.hEvent = CreateEvent(NULL,TRUE,FALSE,"receive"); hRawSerialThread = CreateThread(NULL, 0, RawSerialThread, this, 0, &threadID); #endif } else { present = 0; BX_ERROR(("Raw device '%s' not present", devname)); } } else { present = 0; } #else present = 0; #endif set_modem_control(0x00); set_break(0); rxdata_count = 0; }
BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset) { bx_address laddr; bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s]; Bit8u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW); if (seg->cache.valid & SegAccessWOK4G) { accessOK: laddr = BX_CPU_THIS_PTR get_laddr(s, offset); #if BX_SupportGuest2HostTLB unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; if (tlbEntry->lpf == lpf) { // See if the TLB entry privilege level allows us write access // from this CPL. if (tlbEntry->accessBits & (0x10 << CPL)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW); Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif data = *hostAddr; BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data); return data; } } #endif // Accelerated attempt falls through to long path. Do it the // old fashioned way... #if BX_SUPPORT_X86_64 if (! IsCanonical(laddr)) { BX_ERROR(("read_RMW_virtual_byte(): canonical failure")); exception(int_number(seg), 0, 0); } #endif access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data); return data; } if (seg->cache.valid & SegAccessWOK) { if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled)) goto accessOK; } write_virtual_checks(seg, offset, 1); goto accessOK; }
void bx_tap_pktmover_c::sendpkt(void *buf, unsigned io_len) { Bit8u txbuf[BX_PACKET_BUFSIZE]; txbuf[0] = 0; txbuf[1] = 0; unsigned int size; #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \ defined(__APPLE__) || defined(__OpenBSD__) || defined(__sun__) // Should be fixed for other *BSD memcpy(txbuf, buf, io_len); /* PD - for Sun variant the retry cycle from qemu - mainly to be sure packet is really out */ #if defined(__sun__) int ret; for(;;) { ret=write(fd, txbuf, io_len); if (ret < 0 && (errno == EINTR || errno == EAGAIN)) { } else { size=ret; break; } } #else /* not defined __sun__ */ size = write(fd, txbuf, io_len); #endif /* whole condition about defined __sun__ */ if (size != io_len) { #else /* not bsd/apple/sun style */ memcpy(txbuf+2, buf, io_len); size = write(fd, txbuf, io_len+2); if (size != io_len+2) { #endif BX_PANIC(("write on tap device: %s", strerror(errno))); } else { BX_DEBUG(("wrote %d bytes + ev. 2 byte pad on tap", io_len)); } #if BX_ETH_TAP_LOGGING BX_DEBUG(("sendpkt length %u", io_len)); // dump raw bytes to a file, eventually dump in pcap format so that // tcpdump -r FILE can interpret them for us. int n = fwrite(buf, io_len, 1, txlog); if (n != 1) BX_ERROR(("fwrite to txlog failed, io_len = %u", io_len)); // dump packet in hex into an ascii log file write_pktlog_txt(txlog_txt, (const Bit8u *)buf, io_len, 0); // flush log so that we see the packets as they arrive w/o buffering fflush(txlog); #endif } void bx_tap_pktmover_c::rx_timer_handler(void *this_ptr) { bx_tap_pktmover_c *class_ptr = (bx_tap_pktmover_c *) this_ptr; class_ptr->rx_timer(); }
// assuming the write happens in legacy mode void BX_CPU_C::write_new_stack_dword_32(bx_segment_reg_t *seg, bx_address offset, unsigned curr_pl, Bit32u data) { Bit32u laddr; BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); if (seg->cache.valid & SegAccessWOK4G) { accessOK: laddr = (Bit32u)(seg->cache.u.segment.base + offset); #if BX_SupportGuest2HostTLB unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); bx_address lpf = AlignedAccessLPFOf(laddr, 3); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; if (tlbEntry->lpf == lpf) { // See if the TLB entry privilege level allows us write access // from this CPL. if (tlbEntry->accessBits & (0x10 << CPL)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE); BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, curr_pl, BX_WRITE, (Bit8u*) &data); Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif WriteHostDWordToLittleEndian(hostAddr, data); return; } } #endif #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK if (BX_CPU_THIS_PTR alignment_check() && curr_pl == 3) { if (laddr & 3) { BX_ERROR(("write_new_stack_dword_32(): #AC misaligned access")); exception(BX_AC_EXCEPTION, 0, 0); } } #endif access_write_linear(laddr, 4, curr_pl, (void *) &data); return; } if (seg->cache.valid & SegAccessWOK) { if (offset < (seg->cache.u.segment.limit_scaled-2)) goto accessOK; } write_virtual_checks(seg, offset, 4); goto accessOK; }
bx_bool usb_msd_device_t::init(const char *filename) { s.hdimage = new default_image_t(); if (s.hdimage->open(filename) < 0) { BX_ERROR(("could not open hard drive image file '%s'", filename)); return 0; } else { s.scsi_dev = new scsi_device_t(s.hdimage, 0, usb_msd_command_complete, (void*)this); s.scsi_dev->register_state(s.sr_list, "scsidev"); s.mode = USB_MSDM_CBW; d.connected = 1; return 1; } }
void bx_arpback_pktmover_c::sendpkt(void *buf, unsigned io_len) { if(io_len<BX_PACKET_BUFSIZE) { eth_packet barney; memcpy(barney.buf,buf,io_len); barney.len=io_len; if(packetmaker.ishandler(barney)) { packetmaker.sendpacket(barney); } /* if(( (!memcmp(buf, external_mac, 6)) || (!memcmp(buf, broadcast_macaddr, 6)) ) && (!memcmp(((Bit8u *)buf)+12, ethtype_arp, 2)) ) { Bit32u tempcrc; memcpy(arpbuf,buf,io_len); //move to temporary buffer memcpy(arpbuf, arpbuf+6, 6); //set destination to sender memcpy(arpbuf+6, external_mac, 6); //set sender to us memcpy(arpbuf+32, arpbuf+22, 10); //move destination to sender memcpy(arpbuf+22, external_mac, 6); //set sender to us memcpy(arpbuf+28, external_ip, 4); //set sender to us arpbuf[21]=2; //make this a reply and not a request tempcrc=mycrc.get_CRC(arpbuf,io_len); memcpy(arpbuf+io_len, &tempcrc, 4); buflen=io_len;//+4 bufvalid=1; } */ } #if BX_ETH_NULL_LOGGING BX_DEBUG (("sendpkt length %u", io_len)); // dump raw bytes to a file, eventually dump in pcap format so that // tcpdump -r FILE can interpret them for us. int n = fwrite (buf, io_len, 1, txlog); if (n != 1) BX_ERROR (("fwrite to txlog failed, length %u", io_len)); // dump packet in hex into an ascii log file fprintf (txlog_txt, "NE2K transmitting a packet, length %u\n", io_len); Bit8u *charbuf = (Bit8u *)buf; for (n=0; n<(int)io_len; n++) { if (((n % 16) == 0) && n>0) fprintf (txlog_txt, "\n"); fprintf (txlog_txt, "%02x ", charbuf[n]); } fprintf (txlog_txt, "\n--\n"); // flush log so that we see the packets as they arrive w/o buffering fflush (txlog); fflush (txlog_txt); #endif }
BX_CPU_C::branch_near64(bxInstruction_c *i) { Bit64u new_RIP = RIP + (Bit32s) i->Id(); if (! i->os32L()) { new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared. } else { if (! IsCanonical(new_RIP)) { BX_ERROR(("branch_near64: canonical RIP violation")); exception(BX_GP_EXCEPTION, 0, 0); } } RIP = new_RIP; }
void BX_CPU_C::branch_far32(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit32u eip, Bit8u cpl) { /* instruction pointer must be in code segment limit else #GP(0) */ if (eip > descriptor->u.segment.limit_scaled) { BX_ERROR(("branch_far: EIP > limit")); exception(BX_GP_EXCEPTION, 0, 0); } /* Load CS:IP from destination pointer */ /* Load CS-cache with new segment descriptor */ load_cs(selector, descriptor, cpl); /* Change the EIP value */ EIP = eip; }
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset) { bx_address laddr; bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s]; Bit8u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ); if (seg->cache.valid & SegAccessROK4G) { accessOK: laddr = BX_CPU_THIS_PTR get_laddr(s, offset); #if BX_SupportGuest2HostTLB unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; if (tlbEntry->lpf == lpf) { // See if the TLB entry privilege level allows us read access // from this CPL. if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK. bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ); Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); data = *hostAddr; BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data); return data; } } #endif #if BX_SUPPORT_X86_64 if (! IsCanonical(laddr)) { BX_ERROR(("read_virtual_byte(): canonical failure")); exception(int_number(seg), 0, 0); } #endif access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data); return data; } if (seg->cache.valid & SegAccessROK) { if (Is64BitMode() || (offset <= seg->cache.u.segment.limit_scaled)) goto accessOK; } read_virtual_checks(seg, offset, 1); goto accessOK; }
void bx_tap_pktmover_c::sendpkt(void *buf, unsigned io_len) { Bit8u txbuf[BX_PACKET_BUFSIZE]; txbuf[0] = 0; txbuf[1] = 0; #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \ defined(__APPLE__) || defined(__OpenBSD__) // Should be fixed for other *BSD memcpy(txbuf, buf, io_len); unsigned int size = write(fd, txbuf, io_len); if (size != io_len) { #else memcpy(txbuf+2, buf, io_len); unsigned int size = write(fd, txbuf, io_len+2); if (size != io_len+2) { #endif BX_PANIC(("write on tap device: %s", strerror(errno))); } else { BX_DEBUG(("wrote %d bytes + 2 byte pad on tap", io_len)); } #if BX_ETH_TAP_LOGGING BX_DEBUG(("sendpkt length %u", io_len)); // dump raw bytes to a file, eventually dump in pcap format so that // tcpdump -r FILE can interpret them for us. int n = fwrite(buf, io_len, 1, txlog); if (n != 1) BX_ERROR(("fwrite to txlog failed, io_len = %u", io_len)); // dump packet in hex into an ascii log file fprintf(txlog_txt, "NE2K transmitting a packet, length %u\n", io_len); Bit8u *charbuf = (Bit8u *)buf; for (n=0; n<(int)io_len; n++) { if (((n % 16) == 0) && n>0) fprintf(txlog_txt, "\n"); fprintf(txlog_txt, "%02x ", charbuf[n]); } fprintf(txlog_txt, "\n--\n"); // flush log so that we see the packets as they arrive w/o buffering fflush(txlog); fflush(txlog_txt); #endif } void bx_tap_pktmover_c::rx_timer_handler(void *this_ptr) { bx_tap_pktmover_c *class_ptr = (bx_tap_pktmover_c *) this_ptr; class_ptr->rx_timer(); }
/* pass zero in check_rpl if no needed selector RPL checking for non-conforming segments */ void BX_CPU_C::check_cs(bx_descriptor_t *descriptor, Bit16u cs_raw, Bit8u check_rpl, Bit8u check_cpl) { // descriptor AR byte must indicate code segment else #GP(selector) if (descriptor->valid==0 || descriptor->segment==0 || IS_DATA_SEGMENT(descriptor->type)) { BX_ERROR(("check_cs: not a valid code segment !")); exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0); } #if BX_SUPPORT_X86_64 if (descriptor->u.segment.l) { if (! BX_CPU_THIS_PTR efer.lma) { BX_PANIC(("check_cs: attempt to jump to long mode without enabling EFER.LMA !")); } if (descriptor->u.segment.d_b) { BX_ERROR(("check_cs: Both L and D bits enabled for segment descriptor !")); exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0); } } #endif // if non-conforming, code segment descriptor DPL must = CPL else #GP(selector) if (IS_CODE_SEGMENT_NON_CONFORMING(descriptor->type)) { if (descriptor->dpl != check_cpl) { BX_ERROR(("check_cs: non-conforming code seg descriptor dpl != cpl")); exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0); } /* RPL of destination selector must be <= CPL else #GP(selector) */ if (check_rpl > check_cpl) { BX_ERROR(("check_cs: non-conforming code seg selector rpl > cpl")); exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0); } } // if conforming, then code segment descriptor DPL must <= CPL else #GP(selector) else { if (descriptor->dpl > check_cpl) { BX_ERROR(("check_cs: conforming code seg descriptor dpl > cpl")); exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0); } } // code segment must be present else #NP(selector) if (! descriptor->p) { BX_ERROR(("check_cs: code segment not present !")); exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0); } }