Пример #1
0
/**
 * Write sectors to an attached SCSI device.
 *
 * @returns status code.
 * @param   bios_dsk    Pointer to disk request packet (in the 
 *                      EBDA).
 */
int scsi_write_sectors(bio_dsk_t __far *bios_dsk)
{
    uint8_t             rc;
    cdb_rw10            cdb;
    uint16_t            count;
    uint16_t            io_base;
    uint8_t             target_id;
    uint8_t             device_id;

    device_id = bios_dsk->drqp.dev_id - BX_MAX_ATA_DEVICES;
    if (device_id > BX_MAX_SCSI_DEVICES)
        BX_PANIC("scsi_write_sectors: device_id out of range %d\n", device_id);

    count    = bios_dsk->drqp.nsect;

    /* Prepare a CDB. */
    cdb.command = SCSI_WRITE_10;
    cdb.lba     = swap_32(bios_dsk->drqp.lba);
    cdb.pad1    = 0;
    cdb.nsect   = swap_16(count);
    cdb.pad2    = 0;

    io_base   = bios_dsk->scsidev[device_id].io_base;
    target_id = bios_dsk->scsidev[device_id].target_id;

    rc = scsi_cmd_data_out(io_base, target_id, (void __far *)&cdb, 10,
                           bios_dsk->drqp.buffer, (count * 512));

    if (!rc)
    {
        bios_dsk->drqp.trsfsectors = count;
        bios_dsk->drqp.trsfbytes   = (count * 512);
    }

    return rc;
}
Пример #2
0
//
// chipmem_read/chipmem_write - access the 64K private RAM.
// The ne2000 memory is accessed through the data port of
// the asic (offset 0) after setting up a remote-DMA transfer.
// Both byte and word accesses are allowed.
// The first 16 bytes contains the MAC address at even locations,
// and there is 16K of buffer memory starting at 16K
//
Bit32u bx_ne2k_c::chipmem_read(Bit32u address, unsigned int io_len)
{
  Bit32u retval = 0;

  if ((io_len == 2) && (address & 0x1)) 
    BX_PANIC(("unaligned chipmem word read"));

  // ROM'd MAC address
  if ((address >=0) && (address <= 31)) {
    retval = BX_NE2K_THIS s.macaddr[address];
    if ((io_len == 2) || (io_len == 4)) {
      retval |= (BX_NE2K_THIS s.macaddr[address + 1] << 8);
	  if (io_len == 4) {
			retval |= (BX_NE2K_THIS s.macaddr[address + 2] << 16);
			retval |= (BX_NE2K_THIS s.macaddr[address + 3] << 24);
	  }
    }
    return (retval);
  }

  if ((address >= BX_NE2K_MEMSTART) && (address < BX_NE2K_MEMEND)) {
    retval = BX_NE2K_THIS s.mem[address - BX_NE2K_MEMSTART];
    if ((io_len == 2) || (io_len == 4)) {
      retval |= (BX_NE2K_THIS s.mem[address - BX_NE2K_MEMSTART + 1] << 8);
    }
	if (io_len == 4) {
       retval |= (BX_NE2K_THIS s.mem[address - BX_NE2K_MEMSTART + 2] << 16);
       retval |= (BX_NE2K_THIS s.mem[address - BX_NE2K_MEMSTART + 3] << 24);
    }
    return (retval);
  }

  BX_DEBUG("out-of-bounds chipmem read, %04X", address);

  return (0xff);
}
Пример #3
0
void BIOSCALL nmi_handler_msg(void)
{
    BX_PANIC("NMI Handler called\n");
}
Пример #4
0
static s32 get_next_keymap_line(FILE*  fp, char*  bxsym, char*  modsym,
                                s32*  ascii, char*  hostsym)
{
  char  line[256];
  char  buf[256];
  line[0] = 0;
  while(1)
  {
    lineCount++;
    if(!fgets(line, sizeof(line) - 1, fp))
      return -1;            // EOF
    init_parse_line(line);
    if(get_next_word(bxsym) >= 0)
    {
      modsym[0] = 0;

      char*   p;
      if((p = strchr(bxsym, '+')) != NULL)
      {
        *p = 0;             // truncate bxsym.
        p++;                // move one char beyond the +
        strcpy(modsym, p);  // copy the rest to modsym
      }

      if(get_next_word(buf) < 0)
      {
        BX_PANIC(("keymap line %d: expected 3 columns", lineCount));
        return -1;
      }

      if(buf[0] == '\'' && buf[2] == '\'' && buf[3] == 0)
      {
        *ascii = (u8) buf[1];
      }
      else if(!strcmp(buf, "space"))
      {
        *ascii = ' ';
      }
      else if(!strcmp(buf, "return"))
      {
        *ascii = '\n';
      }
      else if(!strcmp(buf, "tab"))
      {
        *ascii = '\t';
      }
      else if(!strcmp(buf, "backslash"))
      {
        *ascii = '\\';
      }
      else if(!strcmp(buf, "apostrophe"))
      {
        *ascii = '\'';
      }
      else if(!strcmp(buf, "none"))
      {
        *ascii = -1;
      }
      else
      {
        BX_PANIC((
                   "keymap line %d: ascii equivalent is \"%s\" but it must be char constant like 'x', or one of space,tab,return,none",
               lineCount, buf));
      }

      if(get_next_word(hostsym) < 0)
      {
        BX_PANIC(("keymap line %d: expected 3 columns", lineCount));
        return -1;
      }

      return 0;
    }

    // no words on this line, keep reading.
  }
}
Пример #5
0
void BIOSCALL int13_cdrom(uint16_t EHBX, disk_regs_t r)
{
    uint16_t            ebda_seg = read_word(0x0040,0x000E);
    uint8_t             device, status, locks;
    cdb_atapi           atapicmd;
    uint32_t            lba;
    uint16_t            count, segment, offset, size;
    bio_dsk_t __far     *bios_dsk;
    int13ext_t __far    *i13x;
    dpt_t __far         *dpt;

    bios_dsk = ebda_seg :> &EbdaData->bdisk;

    BX_DEBUG_INT13_CD("%s: AX=%04x BX=%04x CX=%04x DX=%04x ES=%04x\n", __func__, AX, BX, CX, DX, ES);

    SET_DISK_RET_STATUS(0x00);

    /* basic check : device should be 0xE0+ */
    if( (GET_ELDL() < 0xE0) || (GET_ELDL() >= 0xE0 + BX_MAX_STORAGE_DEVICES) ) {
        BX_DEBUG("%s: function %02x, ELDL out of range %02x\n", __func__, GET_AH(), GET_ELDL());
        goto int13_fail;
    }

    // Get the ata channel
    device = bios_dsk->cdidmap[GET_ELDL()-0xE0];

    /* basic check : device has to be valid  */
    if (device >= BX_MAX_STORAGE_DEVICES) {
        BX_DEBUG("%s: function %02x, unmapped device for ELDL=%02x\n", __func__, GET_AH(), GET_ELDL());
        goto int13_fail;
    }

    switch (GET_AH()) {

    // all those functions return SUCCESS
    case 0x00: /* disk controller reset */
    case 0x09: /* initialize drive parameters */
    case 0x0c: /* seek to specified cylinder */
    case 0x0d: /* alternate disk reset */
    case 0x10: /* check drive ready */
    case 0x11: /* recalibrate */
    case 0x14: /* controller internal diagnostic */
    case 0x16: /* detect disk change */
        goto int13_success;
        break;

    // all those functions return disk write-protected
    case 0x03: /* write disk sectors */
    case 0x05: /* format disk track */
    case 0x43: // IBM/MS extended write
        SET_AH(0x03);
        goto int13_fail_noah;
        break;

    case 0x01: /* read disk status */
        status = read_byte(0x0040, 0x0074);
        SET_AH(status);
        SET_DISK_RET_STATUS(0);

        /* set CF if error status read */
        if (status)
            goto int13_fail_nostatus;
        else
            goto int13_success_noah;
        break;

    case 0x15: /* read disk drive size */
        SET_AH(0x02);
        goto int13_fail_noah;
        break;

    case 0x41: // IBM/MS installation check
        BX = 0xaa55;    // install check
        SET_AH(0x30);   // EDD 2.1
        CX = 0x0007;    // ext disk access, removable and edd
        goto int13_success_noah;
        break;

    case 0x42: // IBM/MS extended read
    case 0x44: // IBM/MS verify sectors
    case 0x47: // IBM/MS extended seek

        /* Load the I13X struct pointer. */
        i13x = MK_FP(DS, SI);

        count   = i13x->count;
        segment = i13x->segment;
        offset  = i13x->offset;

        // Can't use 64 bits lba
        lba = i13x->lba2;
        if (lba != 0L) {
            BX_PANIC("%s: function %02x. Can't use 64bits lba\n", __func__, GET_AH());
            goto int13_fail;
        }

        // Get 32 bits lba
        lba = i13x->lba1;

        // If verify or seek
        if (( GET_AH() == 0x44 ) || ( GET_AH() == 0x47 ))
            goto int13_success;

        BX_DEBUG_INT13_CD("%s: read %u sectors @ LBA %lu to %04X:%04X\n",
                          __func__, count, lba, segment, offset);

        _fmemset(&atapicmd, 0, sizeof(atapicmd));
        atapicmd.command = 0x28;    // READ 10 command
        atapicmd.lba     = swap_32(lba);
        atapicmd.nsect   = swap_16(count);

        bios_dsk->drqp.nsect   = count;
        bios_dsk->drqp.sect_sz = 2048;

        status = pktacc[bios_dsk->devices[device].type](device, 12, (char __far *)&atapicmd, 0, count*2048L, ATA_DATA_IN, MK_FP(segment,offset));

        count = (uint16_t)(bios_dsk->drqp.trsfbytes >> 11);
        i13x->count = count;

        if (status != 0) {
            BX_INFO("%s: function %02x, status %02x !\n", __func__, GET_AH(), status);
            SET_AH(0x0c);
            goto int13_fail_noah;
        }

        goto int13_success;
        break;

    case 0x45: // IBM/MS lock/unlock drive
        if (GET_AL() > 2)
            goto int13_fail;

        locks = bios_dsk->devices[device].lock;

        switch (GET_AL()) {
        case 0 :  // lock
            if (locks == 0xff) {
                SET_AH(0xb4);
                SET_AL(1);
                goto int13_fail_noah;
            }
            bios_dsk->devices[device].lock = ++locks;
            SET_AL(1);
            break;
        case 1 :  // unlock
            if (locks == 0x00) {
                SET_AH(0xb0);
                SET_AL(0);
                goto int13_fail_noah;
            }
            bios_dsk->devices[device].lock = --locks;
            SET_AL(locks==0?0:1);
            break;
        case 2 :  // status
            SET_AL(locks==0?0:1);
            break;
        }
        goto int13_success;
        break;

    case 0x46: // IBM/MS eject media
        locks = bios_dsk->devices[device].lock;

        if (locks != 0) {
            SET_AH(0xb1); // media locked
            goto int13_fail_noah;
        }
        // FIXME should handle 0x31 no media in device
        // FIXME should handle 0xb5 valid request failed

#if 0 //@todo: implement!
        // Call removable media eject
        ASM_START
        push bp
        mov  bp, sp

        mov ah, #0x52
        int #0x15
        mov _int13_cdrom.status + 2[bp], ah
        jnc int13_cdrom_rme_end
        mov _int13_cdrom.status, #1
int13_cdrom_rme_end:
        pop bp
        ASM_END
#endif

        if (status != 0) {
            SET_AH(0xb1); // media locked
            goto int13_fail_noah;
        }

        goto int13_success;
        break;

    //@todo: Part of this should be merged with analogous code in disk.c
    case 0x48: // IBM/MS get drive parameters
        dpt = DS :> (dpt_t *)SI;
        size = dpt->size;

        // Buffer is too small
        if (size < 0x1a)
            goto int13_fail;

        // EDD 1.x
        if (size >= 0x1a) {
            uint16_t   blksize;

            blksize = bios_dsk->devices[device].blksize;

            dpt->size      = 0x1a;
            dpt->infos     = 0x74;  /* Removable, media change, lockable, max values */
            dpt->cylinders = 0xffffffff;
            dpt->heads     = 0xffffffff;
            dpt->spt       = 0xffffffff;
            dpt->blksize   = blksize;
            dpt->sector_count1 = 0xffffffff;  // FIXME should be Bit64
            dpt->sector_count2 = 0xffffffff;
        }

        // EDD 2.x
        if(size >= 0x1e) {
            uint8_t     channel, irq, mode, checksum, i;
            uint16_t    iobase1, iobase2, options;

            dpt->size = 0x1e;
            dpt->dpte_segment = ebda_seg;
            dpt->dpte_offset  = (uint16_t)&EbdaData->bdisk.dpte;

            // Fill in dpte
            channel = device / 2;
            iobase1 = bios_dsk->channels[channel].iobase1;
            iobase2 = bios_dsk->channels[channel].iobase2;
            irq     = bios_dsk->channels[channel].irq;
            mode    = bios_dsk->devices[device].mode;

            // FIXME atapi device
            options  = (1<<4); // lba translation
            options |= (1<<5); // removable device
            options |= (1<<6); // atapi device
#if VBOX_BIOS_CPU >= 80386
            options |= (mode==ATA_MODE_PIO32?1:0<<7);
#endif

            bios_dsk->dpte.iobase1  = iobase1;
            bios_dsk->dpte.iobase2  = iobase2;
            bios_dsk->dpte.prefix   = (0xe | (device % 2))<<4;
            bios_dsk->dpte.unused   = 0xcb;
            bios_dsk->dpte.irq      = irq;
            bios_dsk->dpte.blkcount = 1 ;
            bios_dsk->dpte.dma      = 0;
            bios_dsk->dpte.pio      = 0;
            bios_dsk->dpte.options  = options;
            bios_dsk->dpte.reserved = 0;
            bios_dsk->dpte.revision = 0x11;

            checksum = 0;
            for (i = 0; i < 15; ++i)
                checksum += read_byte(ebda_seg, (uint16_t)&EbdaData->bdisk.dpte + i);
            checksum = -checksum;
            bios_dsk->dpte.checksum = checksum;
        }

        // EDD 3.x
        if(size >= 0x42) {
            uint8_t     channel, iface, checksum, i;
            uint16_t    iobase1;

            channel = device / 2;
            iface   = bios_dsk->channels[channel].iface;
            iobase1 = bios_dsk->channels[channel].iobase1;

            dpt->size       = 0x42;
            dpt->key        = 0xbedd;
            dpt->dpi_length = 0x24;
            dpt->reserved1  = 0;
            dpt->reserved2  = 0;

            if (iface == ATA_IFACE_ISA) {
                dpt->host_bus[0] = 'I';
                dpt->host_bus[1] = 'S';
                dpt->host_bus[2] = 'A';
                dpt->host_bus[3] = ' ';
            }
            else {
                // FIXME PCI
            }
            dpt->iface_type[0] = 'A';
            dpt->iface_type[1] = 'T';
            dpt->iface_type[2] = 'A';
            dpt->iface_type[3] = ' ';
            dpt->iface_type[4] = ' ';
            dpt->iface_type[5] = ' ';
            dpt->iface_type[6] = ' ';
            dpt->iface_type[7] = ' ';

            if (iface == ATA_IFACE_ISA) {
                ((uint16_t __far *)dpt->iface_path)[0] = iobase1;
                ((uint16_t __far *)dpt->iface_path)[1] = 0;
                ((uint32_t __far *)dpt->iface_path)[1] = 0;
            }
            else {
                // FIXME PCI
            }
            ((uint16_t __far *)dpt->device_path)[0] = device & 1;
            ((uint16_t __far *)dpt->device_path)[1] = 0;
            ((uint32_t __far *)dpt->device_path)[1] = 0;

            checksum = 0;
            for (i = 30; i < 64; ++i)
                checksum += ((uint8_t __far *)dpt)[i];
            checksum = -checksum;
            dpt->checksum = checksum;
        }

        goto int13_success;
        break;

    case 0x49: // IBM/MS extended media change
        // always send changed ??
        SET_AH(06);
        goto int13_fail_nostatus;
        break;

    case 0x4e: // // IBM/MS set hardware configuration
        // DMA, prefetch, PIO maximum not supported
        switch (GET_AL()) {
        case 0x01:
        case 0x03:
        case 0x04:
        case 0x06:
            goto int13_success;
            break;
        default :
            goto int13_fail;
        }
        break;

    // all those functions return unimplemented
    case 0x02: /* read sectors */
    case 0x04: /* verify sectors */
    case 0x08: /* read disk drive parameters */
    case 0x0a: /* read disk sectors with ECC */
    case 0x0b: /* write disk sectors with ECC */
    case 0x18: /* set media type for format */
    case 0x50: // ? - send packet command
    default:
        BX_INFO("%s: unsupported AH=%02x\n", __func__, GET_AH());
        goto int13_fail;
        break;
    }

int13_fail:
    SET_AH(0x01); // defaults to invalid function in AH or invalid parameter
int13_fail_noah:
    SET_DISK_RET_STATUS(GET_AH());
int13_fail_nostatus:
    SET_CF();     // error occurred
    return;

int13_success:
    SET_AH(0x00); // no error
int13_success_noah:
    SET_DISK_RET_STATUS(0x00);
    CLEAR_CF();   // no error
    return;
}
Пример #6
0
void BIOSCALL int13_harddisk_ext(disk_regs_t r)
{
    uint32_t            lba;
    uint16_t            ebda_seg = read_word(0x0040,0x000E);
    uint16_t            segment, offset;
    uint16_t            npc, nph, npspt;
    uint16_t            size, count;
    uint8_t             device, status;
    uint8_t             type;
    bio_dsk_t __far     *bios_dsk;
    int13ext_t __far    *i13_ext;
    dpt_t __far         *dpt;

    bios_dsk = read_word(0x0040,0x000E) :> &EbdaData->bdisk;

    BX_DEBUG_INT13_HD("%s: AX=%04x BX=%04x CX=%04x DX=%04x ES=%04x\n", __func__, AX, BX, CX, DX, ES);
    
    write_byte(0x0040, 0x008e, 0);  // clear completion flag
    
    // basic check : device has to be defined
    if ( (GET_ELDL() < 0x80) || (GET_ELDL() >= 0x80 + BX_MAX_STORAGE_DEVICES) ) {
        BX_DEBUG("%s: function %02x, ELDL out of range %02x\n", __func__, GET_AH(), GET_ELDL());
        goto int13x_fail;
    }
    
    // Get the ata channel
    device = bios_dsk->hdidmap[GET_ELDL()-0x80];
    
    // basic check : device has to be valid
    if (device >= BX_MAX_STORAGE_DEVICES) {
        BX_DEBUG("%s: function %02x, unmapped device for ELDL=%02x\n", __func__, GET_AH(), GET_ELDL());
        goto int13x_fail;
    }

    switch (GET_AH()) {
    case 0x41: // IBM/MS installation check
        BX=0xaa55;     // install check
        SET_AH(0x30);  // EDD 3.0
        CX=0x0007;     // ext disk access and edd, removable supported
        goto int13x_success_noah;
        break;

    case 0x42: // IBM/MS extended read
    case 0x43: // IBM/MS extended write
    case 0x44: // IBM/MS verify
    case 0x47: // IBM/MS extended seek

        /* Get a pointer to the extended structure. */
        i13_ext = DS :> (int13ext_t *)SI;

        count   = i13_ext->count;
        segment = i13_ext->segment;
        offset  = i13_ext->offset;

        BX_DEBUG_INT13_HD("%s: %d sectors from lba %u @ %04x:%04x\n", __func__, 
                          count, i13_ext->lba1, segment, offset);

        // Can't use 64 bits lba
        lba = i13_ext->lba2;
        if (lba != 0L) {
            BX_PANIC("%s: function %02x. Can't use 64bits lba\n", __func__, GET_AH());
            goto int13x_fail;
        }
        
        // Get 32 bits lba and check
        lba = i13_ext->lba1;

        type = bios_dsk->devices[device].type;
        if (lba >= bios_dsk->devices[device].sectors) {
              BX_INFO("%s: function %02x. LBA out of range\n", __func__, GET_AH());
              goto int13x_fail;
        }

        /* Don't bother with seek or verify. */
        if (( GET_AH() == 0x44 ) || ( GET_AH() == 0x47 ))
            goto int13x_success;

        /* Clear the count of transferred sectors/bytes. */
        bios_dsk->drqp.trsfsectors = 0;
        bios_dsk->drqp.trsfbytes   = 0;

        /* Pass request information to low level disk code. */
        bios_dsk->drqp.lba     = lba;
        bios_dsk->drqp.buffer  = MK_FP(segment, offset);
        bios_dsk->drqp.nsect   = count;
        bios_dsk->drqp.sect_sz = 512;   //@todo: device specific?
        bios_dsk->drqp.sector  = 0;     /* Indicate LBA. */
        bios_dsk->drqp.dev_id  = device;
        
        /* Execute the read or write command. */
        status = dskacc[type].a[GET_AH() - 0x42](bios_dsk);
        count  = bios_dsk->drqp.trsfsectors;
        i13_ext->count = count;
        
        if (status != 0) {
            BX_INFO("%s: function %02x, error %02x !\n", __func__, GET_AH(), status);
            SET_AH(0x0c);
            goto int13x_fail_noah;
        }
        
        goto int13x_success;
        break;

    case 0x45: // IBM/MS lock/unlock drive
    case 0x49: // IBM/MS extended media change
        goto int13x_success;   // Always success for HD
        break;

    case 0x46: // IBM/MS eject media
        SET_AH(0xb2);          // Volume Not Removable
        goto int13x_fail_noah; // Always fail for HD
        break;

    case 0x48: // IBM/MS get drive parameters
        dpt = DS :> (dpt_t *)SI;
        size = dpt->size;

        /* Check if buffer is large enough. */
        if (size < 0x1a)
            goto int13x_fail;
        
        /* Fill in EDD 1.x table. */
        if (size >= 0x1a) {
            uint16_t   blksize;

            npc     = bios_dsk->devices[device].pchs.cylinders;
            nph     = bios_dsk->devices[device].pchs.heads;
            npspt   = bios_dsk->devices[device].pchs.spt;
            lba     = bios_dsk->devices[device].sectors;
            blksize = bios_dsk->devices[device].blksize;

            dpt->size      = 0x1a;
            dpt->infos     = 0x02;  // geometry is valid
            dpt->cylinders = npc;
            dpt->heads     = nph;
            dpt->spt       = npspt;
            dpt->blksize   = blksize;
            dpt->sector_count1 = lba;   // FIXME should be Bit64
            dpt->sector_count2 = 0;
        }

        /* Fill in EDD 2.x table. */
        if (size >= 0x1e) {
            uint8_t     channel, irq, mode, checksum, i, translation;
            uint16_t    iobase1, iobase2, options;
            
            dpt->size = 0x1e;
            dpt->dpte_segment = ebda_seg;
            dpt->dpte_offset  = (uint16_t)&EbdaData->bdisk.dpte;
            
            // Fill in dpte
            channel = device / 2;
            iobase1 = bios_dsk->channels[channel].iobase1;
            iobase2 = bios_dsk->channels[channel].iobase2;
            irq     = bios_dsk->channels[channel].irq;
            mode    = bios_dsk->devices[device].mode;
            translation = bios_dsk->devices[device].translation;
            
            options  = (translation == GEO_TRANSLATION_NONE ? 0 : 1 << 3);  // chs translation
            options |= (1 << 4);    // lba translation
            options |= (mode == ATA_MODE_PIO32 ? 1 : 0 << 7);
            options |= (translation == GEO_TRANSLATION_LBA ? 1 : 0 << 9);
            options |= (translation == GEO_TRANSLATION_RECHS ? 3 : 0 << 9);
            
            bios_dsk->dpte.iobase1  = iobase1;
            bios_dsk->dpte.iobase2  = iobase2;
            bios_dsk->dpte.prefix   = (0xe | (device % 2)) << 4;
            bios_dsk->dpte.unused   = 0xcb;
            bios_dsk->dpte.irq      = irq;
            bios_dsk->dpte.blkcount = 1;
            bios_dsk->dpte.dma      = 0;
            bios_dsk->dpte.pio      = 0;
            bios_dsk->dpte.options  = options;
            bios_dsk->dpte.reserved = 0;
            bios_dsk->dpte.revision = 0x11;
            
            checksum = 0;
            for (i = 0; i < 15; ++i)
                checksum += read_byte(ebda_seg, (uint16_t)&EbdaData->bdisk.dpte + i);
            checksum = -checksum;
            bios_dsk->dpte.checksum = checksum;
        }

        /* Fill in EDD 3.x table. */
        if(size >= 0x42) {
            uint8_t     channel, iface, checksum, i;
            uint16_t    iobase1;

            channel = device / 2;
            iface   = bios_dsk->channels[channel].iface;
            iobase1 = bios_dsk->channels[channel].iobase1;
            
            dpt->size       = 0x42;
            dpt->key        = 0xbedd;
            dpt->dpi_length = 0x24;
            dpt->reserved1  = 0;
            dpt->reserved2  = 0;
            
            if (iface == ATA_IFACE_ISA) {
                dpt->host_bus[0] = 'I';
                dpt->host_bus[1] = 'S';
                dpt->host_bus[2] = 'A';
                dpt->host_bus[3] = ' ';
            }
            else {
                // FIXME PCI
            }
            dpt->iface_type[0] = 'A';
            dpt->iface_type[1] = 'T';
            dpt->iface_type[2] = 'A';
            dpt->iface_type[3] = ' ';
            dpt->iface_type[4] = ' ';
            dpt->iface_type[5] = ' ';
            dpt->iface_type[6] = ' ';
            dpt->iface_type[7] = ' ';
            
            if (iface == ATA_IFACE_ISA) {
                ((uint16_t __far *)dpt->iface_path)[0] = iobase1;
                ((uint16_t __far *)dpt->iface_path)[1] = 0;
                ((uint32_t __far *)dpt->iface_path)[1] = 0;
            }
            else {
                // FIXME PCI
            }
            ((uint16_t __far *)dpt->device_path)[0] = device & 1; // device % 2; @todo: correct?
            ((uint16_t __far *)dpt->device_path)[1] = 0;
            ((uint32_t __far *)dpt->device_path)[1] = 0;
            
            checksum = 0;
            for (i = 30; i < 64; i++)
                checksum += read_byte(DS, SI + i);
            checksum = -checksum;
            dpt->checksum = checksum;
        }

        goto int13x_success;
        break;

    case 0x4e: // // IBM/MS set hardware configuration
        // DMA, prefetch, PIO maximum not supported
        switch (GET_AL()) {
        case 0x01:
        case 0x03:
        case 0x04:
        case 0x06:
            goto int13x_success;
            break;
        default :
            goto int13x_fail;
        }
        break;

    case 0x50: // IBM/MS send packet command
    default:
        BX_INFO("%s: function %02xh unsupported, returns fail\n", __func__, GET_AH());
        goto int13x_fail;
        break;
    }

int13x_fail:
    SET_AH(0x01); // defaults to invalid function in AH or invalid parameter
int13x_fail_noah:
    SET_DISK_RET_STATUS(GET_AH());
    SET_CF();     // error occurred
    return;

int13x_success:
    SET_AH(0x00); // no error
int13x_success_noah:
    SET_DISK_RET_STATUS(0x00);
    CLEAR_CF();   // no error
    return;
}
Пример #7
0
void BX_CPU_C::interrupt(Bit8u vector, unsigned type, bx_bool push_error, Bit16u error_code)
{
#if BX_DEBUGGER
  BX_CPU_THIS_PTR show_flag |= Flag_intsig;
#if BX_DEBUG_LINUX
  if (bx_dbg.linux_syscall) {
    if (vector == 0x80) bx_dbg_linux_syscall(BX_CPU_ID);
  }
#endif
  bx_dbg_interrupt(BX_CPU_ID, vector, error_code);
#endif

  BX_INSTR_INTERRUPT(BX_CPU_ID, vector);

  invalidate_prefetch_q();

  bx_bool soft_int = 0;
  switch(type) {
    case BX_SOFTWARE_INTERRUPT:
    case BX_SOFTWARE_EXCEPTION:
      soft_int = 1;
      break;
    case BX_PRIVILEGED_SOFTWARE_INTERRUPT:
    case BX_EXTERNAL_INTERRUPT:
    case BX_NMI:
    case BX_HARDWARE_EXCEPTION:
      break;

    default:
      BX_PANIC(("interrupt(): unknown exception type %d", type));
  }

  BX_DEBUG(("interrupt(): vector = %02x, TYPE = %u, EXT = %u",
      vector, type, (unsigned) BX_CPU_THIS_PTR EXT));

  // Discard any traps and inhibits for new context; traps will
  // resume upon return.
  BX_CPU_THIS_PTR debug_trap = 0;
  BX_CPU_THIS_PTR inhibit_mask = 0;

#if BX_SUPPORT_VMX
  BX_CPU_THIS_PTR in_event = 1;
#endif

#if BX_SUPPORT_X86_64
  if (long_mode()) {
    long_mode_int(vector, soft_int, push_error, error_code);
  }
  else
#endif
  {
    RSP_SPECULATIVE;

    if(real_mode()) {
       real_mode_int(vector, push_error, error_code);
    }
    else {
       protected_mode_int(vector, soft_int, push_error, error_code);
    }

    RSP_COMMIT;
  }

#if BX_X86_DEBUGGER
  BX_CPU_THIS_PTR in_repeat = 0;
#endif

#if BX_SUPPORT_VMX
  BX_CPU_THIS_PTR in_event = 0;
#endif
}
Пример #8
0
/**
 * Enumerate attached devices.
 *
 * @returns nothing.
 * @param   io_base    The I/O base port of the controller.
 */
void scsi_enumerate_attached_devices(uint16_t io_base)
{
    int                 i;
    uint8_t             buffer[0x0200];
    bio_dsk_t __far     *bios_dsk;

    bios_dsk = read_word(0x0040, 0x000E) :> &EbdaData->bdisk;

        /* Go through target devices. */
        for (i = 0; i < VBSCSI_MAX_DEVICES; i++)
    {
        uint8_t     rc;
        uint8_t     aCDB[10];

        aCDB[0] = SCSI_INQUIRY;
        aCDB[1] = 0;
        aCDB[2] = 0;
        aCDB[3] = 0;
        aCDB[4] = 5; /* Allocation length. */
        aCDB[5] = 0;

        rc = scsi_cmd_data_in(io_base, i, aCDB, 6, buffer, 5);
        if (rc != 0)
            BX_PANIC("scsi_enumerate_attached_devices: SCSI_INQUIRY failed\n");

        /* Check if there is a disk attached. */
        if (   ((buffer[0] & 0xe0) == 0)
                && ((buffer[0] & 0x1f) == 0x00))
        {
            VBSCSI_DEBUG("scsi_enumerate_attached_devices: Disk detected at %d\n", i);

            /* We add the disk only if the maximum is not reached yet. */
            if (bios_dsk->scsi_hdcount < BX_MAX_SCSI_DEVICES)
            {
                uint32_t    sectors, sector_size, cylinders;
                uint16_t    heads, sectors_per_track;
                uint8_t     hdcount, hdcount_scsi, hd_index;

                /* Issue a read capacity command now. */
                _fmemset(aCDB, 0, sizeof(aCDB));
                aCDB[0] = SCSI_READ_CAPACITY;

                rc = scsi_cmd_data_in(io_base, i, aCDB, 10, buffer, 8);
                if (rc != 0)
                    BX_PANIC("scsi_enumerate_attached_devices: SCSI_READ_CAPACITY failed\n");

                /* Build sector number and size from the buffer. */
                //@todo: byte swapping for dword sized items should be farmed out...
                sectors =   ((uint32_t)buffer[0] << 24)
                            | ((uint32_t)buffer[1] << 16)
                            | ((uint32_t)buffer[2] << 8)
                            | ((uint32_t)buffer[3]);

                sector_size =   ((uint32_t)buffer[4] << 24)
                                | ((uint32_t)buffer[5] << 16)
                                | ((uint32_t)buffer[6] << 8)
                                | ((uint32_t)buffer[7]);

                /* We only support the disk if sector size is 512 bytes. */
                if (sector_size != 512)
                {
                    /* Leave a log entry. */
                    BX_INFO("Disk %d has an unsupported sector size of %u\n", i, sector_size);
                    continue;
                }

                /* We need to calculate the geometry for the disk. From
                 * the BusLogic driver in the Linux kernel.
                 */
                if (sectors >= (uint32_t)4 * 1024 * 1024)
                {
                    heads = 255;
                    sectors_per_track = 63;
                }
                else if (sectors >= (uint32_t)2 * 1024 * 1024)
                {
                    heads = 128;
                    sectors_per_track = 32;
                }
                else
                {
                    heads = 64;
                    sectors_per_track = 32;
                }
                cylinders = (uint32_t)(sectors / (heads * sectors_per_track));
                hdcount_scsi = bios_dsk->scsi_hdcount;

                /* Calculate index into the generic disk table. */
                hd_index = hdcount_scsi + BX_MAX_ATA_DEVICES;

                bios_dsk->scsidev[hdcount_scsi].io_base   = io_base;
                bios_dsk->scsidev[hdcount_scsi].target_id = i;
                bios_dsk->devices[hd_index].type        = DSK_TYPE_SCSI;
                bios_dsk->devices[hd_index].device      = DSK_DEVICE_HD;
                bios_dsk->devices[hd_index].removable   = 0;
                bios_dsk->devices[hd_index].lock        = 0;
                bios_dsk->devices[hd_index].blksize     = sector_size;
                bios_dsk->devices[hd_index].translation = GEO_TRANSLATION_LBA;

                /* Write LCHS values. */
                bios_dsk->devices[hd_index].lchs.heads = heads;
                bios_dsk->devices[hd_index].lchs.spt   = sectors_per_track;
                if (cylinders > 1024)
                    bios_dsk->devices[hd_index].lchs.cylinders = 1024;
                else
                    bios_dsk->devices[hd_index].lchs.cylinders = (uint16_t)cylinders;

                /* Write PCHS values. */
                bios_dsk->devices[hd_index].pchs.heads = heads;
                bios_dsk->devices[hd_index].pchs.spt   = sectors_per_track;
                if (cylinders > 1024)
                    bios_dsk->devices[hd_index].pchs.cylinders = 1024;
                else
                    bios_dsk->devices[hd_index].pchs.cylinders = (uint16_t)cylinders;

                bios_dsk->devices[hd_index].sectors = sectors;

                /* Store the id of the disk in the ata hdidmap. */
                hdcount = bios_dsk->hdcount;
                bios_dsk->hdidmap[hdcount] = hdcount_scsi + BX_MAX_ATA_DEVICES;
                hdcount++;
                bios_dsk->hdcount = hdcount;

                /* Update hdcount in the BDA. */
                hdcount = read_byte(0x40, 0x75);
                hdcount++;
                write_byte(0x40, 0x75, hdcount);

                hdcount_scsi++;
                bios_dsk->scsi_hdcount = hdcount_scsi;
            }
            else
            {
                /* We reached the maximum of SCSI disks we can boot from. We can quit detecting. */
                break;
            }
        }
        else
            VBSCSI_DEBUG("scsi_enumerate_attached_devices: No disk detected at %d\n", i);
    }
}
Пример #9
0
// the constructor
bx_tap_pktmover_c::bx_tap_pktmover_c(const char *netif,
				       const char *macaddr,
				       eth_rx_handler_t rxh,
				       void *rxarg,
				       char *script)
{
  int flags;
  char filename[BX_PATHNAME_LEN];
  if (strncmp (netif, "tap", 3) != 0) {
    BX_PANIC (("eth_tap: interface name (%s) must be tap0..tap15", netif));
  }
  sprintf (filename, "/dev/%s", netif);

#if defined(__linux__)
  // check if the TAP devices is running, and turn on ARP.  This is based
  // on code from the Mac-On-Linux project. http://http://www.maconlinux.org/
  int sock = socket(AF_INET, SOCK_DGRAM, 0);
  if (sock < 0) {
    BX_PANIC (("socket creation: %s", strerror(errno)));
    return;
  }
  struct ifreq ifr;
  memset(&ifr, 0, sizeof(ifr));
  strncpy(ifr.ifr_name, netif, sizeof(ifr.ifr_name));
  if(ioctl(sock, SIOCGIFFLAGS, &ifr) < 0) {
    BX_PANIC (("SIOCGIFFLAGS on %s: %s", netif, strerror(errno)));
    close(sock);
    return;
  }
  if (!(ifr.ifr_flags & IFF_RUNNING)) {
    BX_PANIC (("%s device is not running", netif));
    close(sock);
    return;
  }
  if ((ifr.ifr_flags & IFF_NOARP)){
    BX_INFO(("turn on ARP for %s device", netif));
    ifr.ifr_flags &= ~IFF_NOARP;
    if (ioctl(sock, SIOCSIFFLAGS, &ifr) < 0) {
      BX_PANIC (("SIOCSIFFLAGS: %s", strerror(errno)));
      close(sock);
      return;
    }
  }
  close(sock);
#endif

  fd = open (filename, O_RDWR);
  if (fd < 0) {
    BX_PANIC(("open failed on %s: %s", netif, strerror(errno)));
    return;
  }

  /* set O_ASYNC flag so that we can poll with read() */
  if ((flags = fcntl(fd, F_GETFL)) < 0) {
    BX_PANIC(("getflags on tap device: %s", strerror(errno)));
  }
  flags |= O_NONBLOCK;
  if (fcntl(fd, F_SETFL, flags) < 0) {
    BX_PANIC(("set tap device flags: %s", strerror(errno)));
  }

  BX_INFO(("eth_tap: opened %s device", netif));

  /* Execute the configuration script */
  char intname[IFNAMSIZ];
  strcpy(intname,netif);
  if((script != NULL) && (strcmp(script, "") != 0) && (strcmp(script, "none") != 0))
  {
    if (execute_script(script, intname) < 0)
      BX_ERROR(("execute script '%s' on %s failed", script, intname));
  }

  // Start the rx poll
  this->rx_timer_index =
    bx_pc_system.register_timer(this, this->rx_timer_handler, 1000,
				1, 1, "eth_tap"); // continuous, active
  this->rxh   = rxh;
  this->rxarg = rxarg;
  memcpy(&guest_macaddr[0], macaddr, 6);
#if BX_ETH_TAP_LOGGING
  // eventually Bryce wants txlog to dump in pcap format so that
  // tcpdump -r FILE can read it and interpret packets.
  txlog = fopen ("ne2k-tx.log", "wb");
  if (!txlog) BX_PANIC (("open ne2k-tx.log failed"));
  txlog_txt = fopen ("ne2k-txdump.txt", "wb");
  if (!txlog_txt) BX_PANIC (("open ne2k-txdump.txt failed"));
  fprintf (txlog_txt, "tap packetmover readable log file\n");
  fprintf (txlog_txt, "net IF = %s\n", netif);
  fprintf (txlog_txt, "MAC address = ");
  for (int i=0; i<6; i++)
    fprintf (txlog_txt, "%02x%s", 0xff & macaddr[i], i<5?":" : "");
  fprintf (txlog_txt, "\n--\n");
  fflush (txlog_txt);

  rxlog = fopen ("ne2k-rx.log", "wb");
  if (!rxlog) BX_PANIC (("open ne2k-rx.log failed"));
  rxlog_txt = fopen ("ne2k-rxdump.txt", "wb");
  if (!rxlog_txt) BX_PANIC (("open ne2k-rxdump.txt failed"));
  fprintf (rxlog_txt, "tap packetmover readable log file\n");
  fprintf (rxlog_txt, "net IF = %s\n", netif);
  fprintf (rxlog_txt, "MAC address = ");
  for (int i=0; i<6; i++)
    fprintf (rxlog_txt, "%02x%s", 0xff & macaddr[i], i<5?":" : "");
  fprintf (rxlog_txt, "\n--\n");
  fflush (rxlog_txt);

#endif
}
Пример #10
0
void plugin_load(char *name, char *args, plugintype_t type)
{
    plugin_t *plugin;

    plugin = (plugin_t *)malloc (sizeof(plugin_t));
    if (!plugin)
    {
      BX_PANIC(("malloc plugin_t failed"));
    }

    plugin->type = type;
    plugin->name = name;
    plugin->args = args;
    plugin->initialized = 0;

    char plugin_filename[BX_PATHNAME_LEN], buf[BX_PATHNAME_LEN];
    sprintf(buf, PLUGIN_FILENAME_FORMAT, name);
    sprintf(plugin_filename, "%s%s", PLUGIN_PATH, buf);

    // Set context so that any devices that the plugin registers will
    // be able to see which plugin created them.  The registration will
    // be called from either dlopen (global constructors) or plugin_init.
    BX_ASSERT (current_plugin_context == NULL);
    current_plugin_context = plugin;
    plugin->handle = lt_dlopen (plugin_filename);
    BX_INFO (("lt_dlhandle is %p", plugin->handle));
    if (!plugin->handle)
    {
      current_plugin_context = NULL;
      BX_PANIC (("dlopen failed for module '%s': %s", name, lt_dlerror ()));
      free (plugin);
      return;
    }

    sprintf(buf, PLUGIN_INIT_FMT_STRING, name);
    plugin->plugin_init =  
      (int  (*)(struct _plugin_t *, enum plugintype_t, int, char *[])) /* monster typecast */
      lt_dlsym (plugin->handle, buf);
    if (plugin->plugin_init == NULL) {
        pluginlog->panic("could not find plugin_init: %s", lt_dlerror ());
        plugin_abort ();
    }

    sprintf(buf, PLUGIN_FINI_FMT_STRING, name);
    plugin->plugin_fini = (void (*)(void)) lt_dlsym (plugin->handle, buf);
    if (plugin->plugin_init == NULL) {
        pluginlog->panic("could not find plugin_fini: %s", lt_dlerror ());
        plugin_abort ();
    }
    pluginlog->info("loaded plugin %s",plugin_filename);


    /* Insert plugin at the _end_ of the plugin linked list. */
    plugin->next = NULL;

    if (!plugins)
    {
        /* Empty list, this become the first entry. */
        plugins = plugin;
    }
    else
    {
        /* Non-empty list.  Add to end. */
        plugin_t *temp = plugins;

        while (temp->next)
            temp = temp->next;

        temp->next = plugin;
    }

    plugin_init_one(plugin);

    // check that context didn't change.  This should only happen if we
    // need a reentrant plugin_load.
    BX_ASSERT (current_plugin_context == plugin);
    current_plugin_context = NULL;

    return;
}
Пример #11
0
// the constructor
bx_win32_pktmover_c::bx_win32_pktmover_c(
    const char *netif, const char *macaddr,
    eth_rx_handler_t rxh, void *rxarg, char *script)
{
    // Open Packet Driver Here.
    DWORD dwVersion;
    DWORD dwWindowsMajorVersion;

    BX_INFO(("bx_win32_pktmover_c"));
    rx_Arg     = rxarg;
    rx_handler = rxh;

    hPacket = LoadLibrary("PACKET.DLL");
    memcpy(cMacAddr, macaddr, 6);
    if (hPacket) {
        PacketOpenAdapter     = (LPADAPTER (*)(LPTSTR))                          GetProcAddress(hPacket, "PacketOpenAdapter");
        PacketCloseAdapter    = (VOID      (*)(LPADAPTER))                       GetProcAddress(hPacket, "PacketCloseAdapter");
        PacketSetHwFilter     = (BOOLEAN   (*)(LPADAPTER, ULONG))                GetProcAddress(hPacket, "PacketSetHwFilter");
        PacketSetBpf          = (BOOLEAN   (*)(LPADAPTER, struct bpf_program *)) GetProcAddress(hPacket, "PacketSetBpf");
        PacketGetAdapterNames = (BOOLEAN   (*)(PTSTR, PULONG))                   GetProcAddress(hPacket, "PacketGetAdapterNames");
        PacketSendPacket      = (BOOLEAN   (*)(LPADAPTER, LPPACKET, BOOLEAN))    GetProcAddress(hPacket, "PacketSendPacket");
        PacketReceivePacket   = (BOOLEAN   (*)(LPADAPTER, LPPACKET, BOOLEAN))    GetProcAddress(hPacket, "PacketReceivePacket");
        PacketSetBuff         = (BOOLEAN   (*)(LPADAPTER, int))                  GetProcAddress(hPacket, "PacketSetBuff");
        PacketSetReadTimeout  = (BOOLEAN   (*)(LPADAPTER, int))                  GetProcAddress(hPacket, "PacketSetReadTimeout");
        PacketAllocatePacket  = (LPPACKET  (*)(void))                            GetProcAddress(hPacket, "PacketAllocatePacket");
        PacketInitPacket      = (VOID      (*)(LPPACKET, PVOID, UINT))           GetProcAddress(hPacket, "PacketInitPacket");
        PacketFreePacket      = (VOID      (*)(LPPACKET))                        GetProcAddress(hPacket, "PacketFreePacket");
    } else {
        BX_PANIC(("Could not load WPCap Drivers for ethernet support!"));
    }

    memset(&netdev, 0, sizeof(netdev));
    dwVersion=GetVersion();
    dwWindowsMajorVersion =  (DWORD)(LOBYTE(LOWORD(dwVersion)));
    if (!(dwVersion >= 0x80000000 && dwWindowsMajorVersion >= 4))
    {   // Windows NT/2k
        int nLen = MultiByteToWideChar(CP_ACP, 0, netif, -1, NULL, 0);
        MultiByteToWideChar(CP_ACP, 0, netif, -1, (WCHAR *)netdev, nLen);
        IsNT = TRUE;
    } else { // Win9x
        strcpy(netdev, netif);
    }

    lpAdapter = PacketOpenAdapter(netdev);
    if (!lpAdapter || (lpAdapter->hFile == INVALID_HANDLE_VALUE)) {
        BX_PANIC(("Could not open adapter for ethernet reception"));
        return;
    }
    PacketSetHwFilter(lpAdapter, NDIS_PACKET_TYPE_PROMISCUOUS);

    /* The code below sets a BPF mac address filter
       that seems to really kill performance, for now
       im just using code to filter, and it works
       better
    */

//  memcpy(&this->filter, macfilter, sizeof(macfilter));
//  this->filter[1].k = (macaddr[2] & 0xff) << 24 | (macaddr[3] & 0xff) << 16 | (macaddr[4] & 0xff) << 8  | (macaddr[5] & 0xff);
//  this->filter[3].k = (macaddr[0] & 0xff) << 8 | (macaddr[1] & 0xff);
//  bp.bf_len   = 8;
//  bp.bf_insns = &this->filter[0];
//  if (!PacketSetBpf(lpAdapter, &bp)) {
//    BX_PANIC(("Could not set mac address BPF filter"));
//  }

    PacketSetBuff(lpAdapter, 512000);
    PacketSetReadTimeout(lpAdapter, -1);

    if ((pkSend = PacketAllocatePacket()) == NULL) {
        BX_PANIC(("Could not allocate a send packet"));
    }

    if ((pkRecv = PacketAllocatePacket()) == NULL) {
        BX_PANIC(("Could not allocate a recv packet"));
    }
    rx_timer_index =
        bx_pc_system.register_timer(this, this->rx_timer_handler, 10000, 1, 1, "eth_win32");

#if BX_ETH_WIN32_LOGGING
    pktlog_txt = fopen ("ne2k-pktlog.txt", "wb");
    if (!pktlog_txt) BX_PANIC (("ne2k-pktlog.txt failed"));
    fprintf (pktlog_txt, "win32 packetmover readable log file\n");
    fprintf (pktlog_txt, "host adapter = %s\n", netif);
    fprintf (pktlog_txt, "guest MAC address = ");
    int i;
    for (i=0; i<6; i++)
        fprintf (pktlog_txt, "%02x%s", 0xff & macaddr[i], i<5?":" : "\n");
    fprintf (pktlog_txt, "--\n");
    fflush (pktlog_txt);
#endif
}
Пример #12
0
BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned length)
{
  Bit32u upper_limit;

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    // Mark cache as being OK type for succeeding reads/writes
    seg->cache.valid |= SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
    return;
  }
#endif

  if (seg->cache.valid==0) {
    BX_DEBUG(("write_virtual_checks(): segment descriptor not valid"));
    exception(int_number(seg), 0, 0);
  }

  if (seg->cache.p == 0) { /* not present */
    BX_ERROR(("write_virtual_checks(): segment not present"));
    exception(int_number(seg), 0, 0);
  }

  switch (seg->cache.type) {
    case 0: case 1:   // read only
    case 4: case 5:   // read only, expand down
    case 8: case 9:   // execute only
    case 10: case 11: // execute/read
    case 12: case 13: // execute only, conforming
    case 14: case 15: // execute/read-only, conforming
      BX_ERROR(("write_virtual_checks(): no write access to seg"));
      exception(int_number(seg), 0, 0);

    case 2: case 3: /* read/write */
      if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
          || (length-1 > seg->cache.u.segment.limit_scaled))
      {
        BX_ERROR(("write_virtual_checks(): write beyond limit, r/w"));
        exception(int_number(seg), 0, 0);
      }
      if (seg->cache.u.segment.limit_scaled >= 7) {
        // Mark cache as being OK type for succeeding read/writes. The limit
        // checks still needs to be done though, but is more simple. We
        // could probably also optimize that out with a flag for the case
        // when limit is the maximum 32bit value. Limit should accomodate
        // at least a dword, since we subtract from it in the simple
        // limit check in other functions, and we don't want the value to roll.
        // Only normal segments (not expand down) are handled this way.
        seg->cache.valid |= SegAccessROK | SegAccessWOK;
 
        if (seg->cache.u.segment.limit_scaled == 0xffffffff)
          seg->cache.valid |= SegAccessROK4G | SegAccessWOK4G;
      }
      break;

    case 6: case 7: /* read/write, expand down */
      if (seg->cache.u.segment.d_b)
        upper_limit = 0xffffffff;
      else
        upper_limit = 0x0000ffff;
      if ((offset <= seg->cache.u.segment.limit_scaled) ||
           (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
      {
        BX_ERROR(("write_virtual_checks(): write beyond limit, r/w ED"));
        exception(int_number(seg), 0, 0);
      }
      break;

    default:
      BX_PANIC(("write_virtual_checks(): unknown descriptor type=%d", seg->cache.type));
  }
}
Пример #13
0
BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned length)
{
  Bit32u upper_limit;

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    // Mark cache as being OK type for succeeding reads/writes
    seg->cache.valid |= SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
    return;
  }
#endif

  if (seg->cache.valid==0) {
    BX_DEBUG(("execute_virtual_checks(): segment descriptor not valid"));
    exception(int_number(seg), 0, 0);
  }

  if (seg->cache.p == 0) { /* not present */
    BX_ERROR(("execute_virtual_checks(): segment not present"));
    exception(int_number(seg), 0, 0);
  }

  switch (seg->cache.type) {
    case 0: case 1: /* read only */
    case 2: case 3: /* read/write */
    case 10: case 11: /* execute/read */
    case 14: case 15: /* execute/read-only, conforming */
      if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
          || (length-1 > seg->cache.u.segment.limit_scaled))
      {
        BX_ERROR(("execute_virtual_checks(): read beyond limit"));
        exception(int_number(seg), 0, 0);
      }
      if (seg->cache.u.segment.limit_scaled >= 7) {
        // Mark cache as being OK type for succeeding reads. See notes for
        // write checks; similar code.
        seg->cache.valid |= SegAccessROK;
        if (seg->cache.u.segment.limit_scaled == 0xffffffff)
          seg->cache.valid |= SegAccessROK4G;
      }
      break;

    case 8: case 9: /* execute only */
    case 12: case 13: /* execute only, conforming */
      if (offset > (seg->cache.u.segment.limit_scaled - length + 1)
          || (length-1 > seg->cache.u.segment.limit_scaled))
      {
        BX_ERROR(("execute_virtual_checks(): read beyond limit execute only"));
        exception(int_number(seg), 0, 0);
      }
      break;
 
    case 4: case 5: /* read only, expand down */
    case 6: case 7: /* read/write, expand down */
      if (seg->cache.u.segment.d_b)
        upper_limit = 0xffffffff;
      else
        upper_limit = 0x0000ffff;
      if ((offset <= seg->cache.u.segment.limit_scaled) ||
           (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
      {
        BX_ERROR(("execute_virtual_checks(): read beyond limit ED"));
        exception(int_number(seg), 0, 0);
      }
      break;

    default:
      BX_PANIC(("execute_virtual_checks(): unknown descriptor type=%d", seg->cache.type));
  }
}
Пример #14
0
void BIOSCALL int13_eltorito(disk_regs_t r)
{
    // @TODO: a macro or a function for getting the EBDA segment
    uint16_t        ebda_seg=read_word(0x0040,0x000E);
    cdemu_t __far   *cdemu;

    cdemu = ebda_seg :> &EbdaData->cdemu;

    
    BX_DEBUG_INT13_ET("%s: AX=%04x BX=%04x CX=%04x DX=%04x ES=%04x\n", __func__, AX, BX, CX, DX, ES);
    // BX_DEBUG_INT13_ET("%s: SS=%04x DS=%04x ES=%04x DI=%04x SI=%04x\n", __func__, get_SS(), DS, ES, DI, SI);
    
    switch (GET_AH()) {

    // FIXME ElTorito Various. Should be implemented
    case 0x4a: // ElTorito - Initiate disk emu
    case 0x4c: // ElTorito - Initiate disk emu and boot
    case 0x4d: // ElTorito - Return Boot catalog
        BX_PANIC("%s: call with AX=%04x. Please report\n", __func__, AX);
        goto int13_fail;
        break;

    case 0x4b: // ElTorito - Terminate disk emu
        // FIXME ElTorito Hardcoded
        //@todo: maybe our cdemu struct should match El Torito to allow memcpy()?
        write_byte(DS,SI+0x00,0x13);
        write_byte(DS,SI+0x01,cdemu->media);
        write_byte(DS,SI+0x02,cdemu->emulated_drive);
        write_byte(DS,SI+0x03,cdemu->controller_index);
        write_dword(DS,SI+0x04,cdemu->ilba);
        write_word(DS,SI+0x08,cdemu->device_spec);
        write_word(DS,SI+0x0a,cdemu->buffer_segment);
        write_word(DS,SI+0x0c,cdemu->load_segment);
        write_word(DS,SI+0x0e,cdemu->sector_count);
        write_byte(DS,SI+0x10,cdemu->vdevice.cylinders);
        write_byte(DS,SI+0x11,cdemu->vdevice.spt);
        write_byte(DS,SI+0x12,cdemu->vdevice.heads);
        
        // If we have to terminate emulation
        if(GET_AL() == 0x00) {
            // FIXME ElTorito Various. Should be handled accordingly to spec
            cdemu->active = 0;  // bye bye
        }

        goto int13_success;
        break;

    default:
          BX_INFO("%s: unsupported AH=%02x\n", __func__, GET_AH());
          goto int13_fail;
          break;
    }

int13_fail:
    SET_AH(0x01); // defaults to invalid function in AH or invalid parameter
    SET_DISK_RET_STATUS(GET_AH());
    SET_CF();     // error occurred
    return;

int13_success:
    SET_AH(0x00); // no error
    SET_DISK_RET_STATUS(0x00);
    CLEAR_CF();   // no error
    return;
}
Пример #15
0
void bx_pit_c::write(Bit32u address, Bit32u dvalue, unsigned io_len)
{
#else
  UNUSED(this_ptr);
#endif  // !BX_USE_PIT_SMF
  Bit8u   value;
  Bit64u my_time_usec = bx_virt_timer.time_usec();
  Bit64u time_passed = my_time_usec-BX_PIT_THIS s.last_usec;
  Bit32u time_passed32 = (Bit32u)time_passed;

  if(time_passed32) {
    periodic(time_passed32);
  }
  BX_PIT_THIS s.last_usec = BX_PIT_THIS s.last_usec + time_passed;

  value = (Bit8u) dvalue;

  BX_DEBUG(("write to port 0x%04x, value = 0x%02x", address, value));

  switch (address) {
    case 0x40: /* timer 0: write count register */
      BX_PIT_THIS s.timer.write(0, value);
      break;

    case 0x41: /* timer 1: write count register */
      BX_PIT_THIS s.timer.write(1, value);
      break;

    case 0x42: /* timer 2: write count register */
      BX_PIT_THIS s.timer.write(2, value);
      break;

    case 0x43: /* timer 0-2 mode control */
      BX_PIT_THIS s.timer.write(3, value);
      break;

    case 0x61:
      BX_PIT_THIS s.speaker_data_on = (value >> 1) & 0x01;
      if (BX_PIT_THIS s.speaker_data_on) {
        DEV_speaker_beep_on((float)(1193180.0 / BX_PIT_THIS get_timer(2)));
      } else {
        DEV_speaker_beep_off();
      }
      /* ??? only on AT+ */
      BX_PIT_THIS s.timer.set_GATE(2, value & 0x01);
      break;

    default:
      BX_PANIC(("unsupported io write to port 0x%04x = 0x%02x", address, value));
  }

  if (time_passed || (BX_PIT_THIS s.last_next_event_time != BX_PIT_THIS s.timer.get_next_event_time())) {
    BX_DEBUG(("RESETting timer"));
    bx_virt_timer.deactivate_timer(BX_PIT_THIS s.timer_handle[0]);
    BX_DEBUG(("deactivated timer"));
    if(BX_PIT_THIS s.timer.get_next_event_time()) {
      bx_virt_timer.activate_timer(BX_PIT_THIS s.timer_handle[0],
                                   (Bit32u)BX_MAX(1,TICKS_TO_USEC(BX_PIT_THIS s.timer.get_next_event_time())),
                                   0);
      BX_DEBUG(("activated timer"));
    }
    BX_PIT_THIS s.last_next_event_time = BX_PIT_THIS s.timer.get_next_event_time();
  }
  BX_DEBUG(("s.last_usec="FMT_LL"d", BX_PIT_THIS s.last_usec));
  BX_DEBUG(("s.timer_id=%d", BX_PIT_THIS s.timer_handle[0]));
  BX_DEBUG(("s.timer.get_next_event_time=%x", BX_PIT_THIS s.timer.get_next_event_time()));
  BX_DEBUG(("s.last_next_event_time=%d", BX_PIT_THIS s.last_next_event_time));

}
Пример #16
0
void plugin_load(char *name, char *args, plugintype_t type)
{
  plugin_t *plugin, *temp;

  if (plugins != NULL) {
    temp = plugins;

    while (temp != NULL) {
      if (!strcmp(name, temp->name)) {
        BX_PANIC(("plugin '%s' already loaded", name));
        return;
      }
      temp = temp->next;
    }
  }

  plugin = (plugin_t *)malloc (sizeof(plugin_t));
  if (!plugin)
  {
    BX_PANIC(("malloc plugin_t failed"));
  }

  plugin->type = type;
  plugin->name = name;
  plugin->args = args;
  plugin->initialized = 0;

  char plugin_filename[BX_PATHNAME_LEN], buf[BX_PATHNAME_LEN];
  sprintf(buf, PLUGIN_FILENAME_FORMAT, name);
  sprintf(plugin_filename, "%s%s", PLUGIN_PATH, buf);

  // Set context so that any devices that the plugin registers will
  // be able to see which plugin created them.  The registration will
  // be called from either dlopen (global constructors) or plugin_init.
  BX_ASSERT(current_plugin_context == NULL);
  current_plugin_context = plugin;
#if defined(_MSC_VER)
  plugin->handle = LoadLibrary(plugin_filename);
  BX_INFO(("DLL handle is %p", plugin->handle));
  if (!plugin->handle)
  {
    current_plugin_context = NULL;
    BX_PANIC(("LoadLibrary failed for module '%s': error=%d", name, GetLastError()));
    free(plugin);
    return;
  }
#else
  plugin->handle = lt_dlopen (plugin_filename);
  BX_INFO(("lt_dlhandle is %p", plugin->handle));
  if (!plugin->handle)
  {
    current_plugin_context = NULL;
    BX_PANIC(("dlopen failed for module '%s': %s", name, lt_dlerror ()));
    free(plugin);
    return;
  }
#endif

  if (type != PLUGTYPE_USER) {
    sprintf(buf, PLUGIN_INIT_FMT_STRING, name);
  } else {
    sprintf(buf, PLUGIN_INIT_FMT_STRING, "user");
  }
#if defined(_MSC_VER)
  plugin->plugin_init = (plugin_init_t) GetProcAddress(plugin->handle, buf);
  if (plugin->plugin_init == NULL) {
    pluginlog->panic("could not find plugin_init: error=%d", GetLastError());
    plugin_abort ();
  }
#else
  plugin->plugin_init = (plugin_init_t) lt_dlsym (plugin->handle, buf);
  if (plugin->plugin_init == NULL) {
    pluginlog->panic("could not find plugin_init: %s", lt_dlerror ());
    plugin_abort ();
  }
#endif

  if (type != PLUGTYPE_USER) {
    sprintf(buf, PLUGIN_FINI_FMT_STRING, name);
  } else {
    sprintf(buf, PLUGIN_FINI_FMT_STRING, "user");
  }
#if defined(_MSC_VER)
  plugin->plugin_fini = (plugin_fini_t) GetProcAddress(plugin->handle, buf);
  if (plugin->plugin_fini == NULL) {
    pluginlog->panic("could not find plugin_fini: error=%d", GetLastError());
    plugin_abort ();
  }
#else
  plugin->plugin_fini = (plugin_fini_t) lt_dlsym (plugin->handle, buf);
  if (plugin->plugin_fini == NULL) {
    pluginlog->panic("could not find plugin_fini: %s", lt_dlerror ());
    plugin_abort();
  }
#endif
  pluginlog->info("loaded plugin %s",plugin_filename);

  /* Insert plugin at the _end_ of the plugin linked list. */
  plugin->next = NULL;

  if (!plugins)
  {
    /* Empty list, this become the first entry. */
    plugins = plugin;
  }
  else
  {
   /* Non-empty list.  Add to end. */
   temp = plugins;

   while (temp->next)
      temp = temp->next;

    temp->next = plugin;
  }

  plugin_init_one(plugin);

  // check that context didn't change.  This should only happen if we
  // need a reentrant plugin_load.
  BX_ASSERT(current_plugin_context == plugin);
  current_plugin_context = NULL;
}
Пример #17
0
// vector:     0..255: vector in IDT
// error_code: if exception generates and error, push this error code
// trap:       override exception class to TRAP
void BX_CPU_C::exception(unsigned vector, Bit16u error_code, bx_bool trap)
{
  unsigned exception_type = 0, exception_class = BX_EXCEPTION_CLASS_FAULT;
  bx_bool push_error = 0;

  invalidate_prefetch_q();
  BX_INSTR_EXCEPTION(BX_CPU_ID, vector);

#if BX_DEBUGGER
  bx_dbg_exception(BX_CPU_ID, vector, error_code);
#endif

  BX_DEBUG(("exception(0x%02x): error_code=%04x", vector, error_code));

  // if not initial error, restore previous register values from
  // previous attempt to handle exception
  if (BX_CPU_THIS_PTR errorno) {
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS] = BX_CPU_THIS_PTR save_cs;
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS] = BX_CPU_THIS_PTR save_ss;
    RIP = BX_CPU_THIS_PTR save_eip;
    RSP = BX_CPU_THIS_PTR save_esp;
  }

  if (BX_CPU_THIS_PTR errorno > 0) {
    if (errno > 2 || BX_CPU_THIS_PTR curr_exception == BX_ET_DOUBLE_FAULT) {
      debug(BX_CPU_THIS_PTR prev_rip); // print debug information to the log
#if BX_DEBUGGER
      // trap into debugger (similar as done when PANIC occured)
      bx_debug_break();
#endif
      if (SIM->get_param_bool(BXPN_RESET_ON_TRIPLE_FAULT)->get()) {
        BX_ERROR(("exception(): 3rd (%d) exception with no resolution, shutdown status is %02xh, resetting", vector, DEV_cmos_get_reg(0x0f)));
        bx_pc_system.Reset(BX_RESET_SOFTWARE);
      }
      else {
        BX_PANIC(("exception(): 3rd (%d) exception with no resolution", vector));
        BX_ERROR(("WARNING: Any simulation after this point is completely bogus !"));
        shutdown();
      }
      longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
    }
  }

  // note: fault-class exceptions _except_ #DB set RF in
  //       eflags image.

  switch (vector) {
    case BX_DE_EXCEPTION: // DIV by 0
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_CONTRIBUTORY;
      break;
    case BX_DB_EXCEPTION: // debug exceptions
      push_error = 0;
      // Instruction fetch breakpoint  - FAULT
      // Data read or write breakpoint - TRAP
      // I/O read or write breakpoint  - TRAP
      // General detect condition      - FAULT
      // Single-step                   - TRAP
      // Task-switch                   - TRAP
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_BENIGN;
      break;
    case 2:               // NMI
      push_error = 0;
      exception_type  = BX_ET_BENIGN;
      break;
    case BX_BP_EXCEPTION: // breakpoint
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_TRAP;
      exception_type  = BX_ET_BENIGN;
      break;
    case BX_OF_EXCEPTION: // overflow
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_TRAP;
      exception_type  = BX_ET_BENIGN;
      break;
    case BX_BR_EXCEPTION: // bounds check
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_BENIGN;
      break;
    case BX_UD_EXCEPTION: // invalid opcode
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_BENIGN;
      break;
    case BX_NM_EXCEPTION: // device not available
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_BENIGN;
      break;
    case BX_DF_EXCEPTION: // double fault
      push_error = 1;
      error_code = 0;
      exception_class = BX_EXCEPTION_CLASS_ABORT;
      exception_type  = BX_ET_DOUBLE_FAULT;
      break;
    case 9:               // coprocessor segment overrun (286,386 only)
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_ABORT;
      exception_type  = BX_ET_BENIGN;
      BX_PANIC(("exception(9): unfinished"));
      break;
    case BX_TS_EXCEPTION: // invalid TSS
      push_error = 1;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_CONTRIBUTORY;
      break;
    case BX_NP_EXCEPTION: // segment not present
      push_error = 1;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_CONTRIBUTORY;
      break;
    case BX_SS_EXCEPTION: // stack fault
      push_error = 1;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_CONTRIBUTORY;
      break;
    case BX_GP_EXCEPTION: // general protection
      push_error = 1;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_CONTRIBUTORY;
      break;
    case BX_PF_EXCEPTION: // page fault
      push_error = 1;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_PAGE_FAULT;
      break;
    case 15:              // reserved
      BX_PANIC(("exception(15): reserved"));
      push_error = 0;
      exception_type = 0;
      break;
    case BX_MF_EXCEPTION: // floating-point error
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_BENIGN;
      break;
#if BX_CPU_LEVEL >= 4
    case BX_AC_EXCEPTION: // alignment check
      push_error = 1;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_BENIGN;
      break;
#endif
#if BX_CPU_LEVEL >= 5
    case BX_MC_EXCEPTION: // machine check
      BX_PANIC(("exception(): machine-check, vector 18 not implemented"));
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_ABORT;
      exception_type  = BX_ET_BENIGN;
      break;
#if BX_SUPPORT_SSE
    case BX_XM_EXCEPTION: // SIMD Floating-Point exception
      push_error = 0;
      exception_class = BX_EXCEPTION_CLASS_FAULT;
      exception_type  = BX_ET_BENIGN;
      break;
#endif
#endif
    default:
      BX_PANIC(("exception(%u): bad vector", (unsigned) vector));
      exception_type = BX_ET_BENIGN;
      push_error = 0;    // keep compiler happy for now
      break;
  }

  if (trap) {
    exception_class = BX_EXCEPTION_CLASS_TRAP;
  }
  else {
    if (exception_class == BX_EXCEPTION_CLASS_FAULT)
    {
      // restore RIP/RSP to value before error occurred
      RIP = BX_CPU_THIS_PTR prev_rip;
      if (BX_CPU_THIS_PTR speculative_rsp)
        RSP = BX_CPU_THIS_PTR prev_rsp;

      if (vector != BX_DB_EXCEPTION) BX_CPU_THIS_PTR assert_RF();
    }
  }

  // clear GD flag in the DR7 prior entering debug exception handler
  if (vector == BX_DB_EXCEPTION)
    BX_CPU_THIS_PTR dr7 &= ~0x00002000;

  if (exception_type != BX_ET_PAGE_FAULT) {
    // Page faults have different format
    error_code = (error_code & 0xfffe) | BX_CPU_THIS_PTR EXT;
  }
  else {
    // FIXME: special format error returned for page faults ?
  }
  BX_CPU_THIS_PTR EXT = 1;

  /* if we've already had 1st exception, see if 2nd causes a
   * Double Fault instead.  Otherwise, just record 1st exception
   */
  if (BX_CPU_THIS_PTR errorno > 0) {
    if (is_exception_OK[BX_CPU_THIS_PTR curr_exception][exception_type]) {
      BX_CPU_THIS_PTR curr_exception = exception_type;
    }
    else {
      exception(BX_DF_EXCEPTION, 0, 0);
    }
  }
  else {
    BX_CPU_THIS_PTR curr_exception = exception_type;
  }

  BX_CPU_THIS_PTR errorno++;

  if (real_mode()) {
    // not INT, no error code pushed
    BX_CPU_THIS_PTR interrupt(vector, 0, 0, 0);
    BX_CPU_THIS_PTR errorno = 0; // error resolved
    longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
  }
  else {
    BX_CPU_THIS_PTR interrupt(vector, 0, push_error, error_code);
    BX_CPU_THIS_PTR errorno = 0; // error resolved
    longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
  }
}
Пример #18
0
void bx_pcidev_c::init(void)
{
  // called once when bochs initializes
  BX_PCIDEV_THIS pcidev_fd = -1;
  int fd;
  fd = open("/dev/pcidev", O_RDWR);
  if (fd == -1) {
    switch(errno) {
      case ENODEV:
        BX_PANIC(("The pcidev kernel module is not loaded!"));
        break;
      default:
        BX_PANIC(("open /dev/pcidev: %s", strerror(errno)));
        break;
    }
    return;
  }
  BX_PCIDEV_THIS pcidev_fd = fd;
  struct pcidev_find_struct find;
  unsigned short vendor = SIM->get_param_num(BXPN_PCIDEV_VENDOR)->get();
  unsigned short device = SIM->get_param_num(BXPN_PCIDEV_DEVICE)->get();
  find.deviceID = device;
  find.vendorID = vendor;
  if (ioctl(fd, PCIDEV_IOCTL_FIND, &find) == -1) {
    switch (errno) {
    case ENOENT:
      BX_PANIC(("PCI device not found on host system."));
      break;
    case EBUSY:
      BX_PANIC(("PCI device already used by another kernel module."));
      break;
    default:
      perror("ioctl");
      break;
    }
    close(fd);
    BX_PCIDEV_THIS pcidev_fd = -1;
    return;
  }
  BX_INFO(("vendor: %04x; device: %04x @ host %04x:%04x.%d", vendor, device,
    (unsigned)find.bus, (unsigned)find.device, (unsigned)find.func));

  BX_PCIDEV_THIS devfunc = 0x00;
  DEV_register_pci_handlers(this, &BX_PCIDEV_THIS devfunc, BX_PLUGIN_PCIDEV,
                            pcidev_name);

  BX_PCIDEV_THIS irq = 0;
  struct pcidev_io_struct io;
  io.address = 0x3d;
  int ret = ioctl(fd, PCIDEV_IOCTL_READ_CONFIG_BYTE, &io);
  if (ret != -1) {
    BX_PCIDEV_THIS intpin = io.value;
  } else {
    BX_PCIDEV_THIS intpin = 0;
  }

  for (int idx = 0; idx < PCIDEV_COUNT_RESOURCES; idx++) {
    BX_PCIDEV_THIS regions[idx].start = 0; // emulated device not yet initialized
    if (!find.resources[idx].start)
      continue;
    BX_INFO(("PCI resource @ %x-%x (%s)", (unsigned)find.resources[idx].start,
             (unsigned)find.resources[idx].end,
             (find.resources[idx].flags & PCIDEV_RESOURCE_IO ? "I/O" : "Mem")));
    BX_PCIDEV_THIS regions[idx].size = find.resources[idx].end - find.resources[idx].start + 1;
    BX_PCIDEV_THIS regions[idx].host_start = find.resources[idx].start;
    struct pcidev_io_struct io;
    io.address = PCI_BASE_ADDRESS_0 + idx * 4;
    if (ioctl(fd, PCIDEV_IOCTL_READ_CONFIG_DWORD, &io) == -1)
      BX_ERROR(("Error reading a base address config reg"));
    BX_PCIDEV_THIS regions[idx].config_value = io.value;
    /*
     * We will use &region[idx] as parameter for our I/O or memory
     * handler. So we provide a pcidev pointer to the pcidev object
     * in order for the handle to be able to use its pcidev object
     */
    BX_PCIDEV_THIS regions[idx].pcidev = this;
  }

  struct sigaction sa;
  sa.sa_handler = pcidev_sighandler;
  sigemptyset(&sa.sa_mask);
  sa.sa_flags = 0;
  sigaction(SIGUSR1, &sa, NULL);

  /*
   * The kernel pcidev will fire SIGUSR1 signals when it receives
   * interrupts from the host PCI device.
   */
  ioctl(fd, PCIDEV_IOCTL_INTERRUPT, 1);
}
Пример #19
0
void BX_CPU_C::protected_mode_int(Bit8u vector, unsigned soft_int, bx_bool push_error, Bit16u error_code)
{
  bx_descriptor_t gate_descriptor, cs_descriptor;
  bx_selector_t cs_selector;

  Bit16u raw_tss_selector;
  bx_selector_t   tss_selector;
  bx_descriptor_t tss_descriptor;

  Bit16u gate_dest_selector;
  Bit32u gate_dest_offset;

  // interrupt vector must be within IDT table limits,
  // else #GP(vector*8 + 2 + EXT)
  if ((vector*8 + 7) > BX_CPU_THIS_PTR idtr.limit) {
    BX_ERROR(("interrupt(): vector must be within IDT table limits, IDT.limit = 0x%x", BX_CPU_THIS_PTR idtr.limit));
    exception(BX_GP_EXCEPTION, vector*8 + 2);
  }

  Bit64u desctmp = system_read_qword(BX_CPU_THIS_PTR idtr.base + vector*8);

  Bit32u dword1 = GET32L(desctmp);
  Bit32u dword2 = GET32H(desctmp);

  parse_descriptor(dword1, dword2, &gate_descriptor);

  if ((gate_descriptor.valid==0) || gate_descriptor.segment) {
    BX_ERROR(("interrupt(): gate descriptor is not valid sys seg (vector=0x%02x)", vector));
    exception(BX_GP_EXCEPTION, vector*8 + 2);
  }

  // descriptor AR byte must indicate interrupt gate, trap gate,
  // or task gate, else #GP(vector*8 + 2 + EXT)
  switch (gate_descriptor.type) {
  case BX_TASK_GATE:
  case BX_286_INTERRUPT_GATE:
  case BX_286_TRAP_GATE:
  case BX_386_INTERRUPT_GATE:
  case BX_386_TRAP_GATE:
    break;
  default:
    BX_ERROR(("interrupt(): gate.type(%u) != {5,6,7,14,15}",
      (unsigned) gate_descriptor.type));
    exception(BX_GP_EXCEPTION, vector*8 + 2);
  }

  // if software interrupt, then gate descripor DPL must be >= CPL,
  // else #GP(vector * 8 + 2 + EXT)
  if (soft_int && gate_descriptor.dpl < CPL) {
    BX_ERROR(("interrupt(): soft_int && (gate.dpl < CPL)"));
    exception(BX_GP_EXCEPTION, vector*8 + 2);
  }

  // Gate must be present, else #NP(vector * 8 + 2 + EXT)
  if (! IS_PRESENT(gate_descriptor)) {
    BX_ERROR(("interrupt(): gate not present"));
    exception(BX_NP_EXCEPTION, vector*8 + 2);
  }

  switch (gate_descriptor.type) {
  case BX_TASK_GATE:
    // examine selector to TSS, given in task gate descriptor
    raw_tss_selector = gate_descriptor.u.taskgate.tss_selector;
    parse_selector(raw_tss_selector, &tss_selector);

    // must specify global in the local/global bit,
    //      else #GP(TSS selector)
    if (tss_selector.ti) {
      BX_ERROR(("interrupt(): tss_selector.ti=1 from gate descriptor - #GP(tss_selector)"));
      exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc);
    }

    // index must be within GDT limits, else #TS(TSS selector)
    fetch_raw_descriptor(&tss_selector, &dword1, &dword2, BX_GP_EXCEPTION);

    parse_descriptor(dword1, dword2, &tss_descriptor);

    // AR byte must specify available TSS,
    //   else #GP(TSS selector)
    if (tss_descriptor.valid==0 || tss_descriptor.segment) {
      BX_ERROR(("interrupt(): TSS selector points to invalid or bad TSS - #GP(tss_selector)"));
      exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc);
    }

    if (tss_descriptor.type!=BX_SYS_SEGMENT_AVAIL_286_TSS &&
        tss_descriptor.type!=BX_SYS_SEGMENT_AVAIL_386_TSS)
    {
      BX_ERROR(("interrupt(): TSS selector points to bad TSS - #GP(tss_selector)"));
      exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc);
    }

    // TSS must be present, else #NP(TSS selector)
    if (! IS_PRESENT(tss_descriptor)) {
      BX_ERROR(("interrupt(): TSS descriptor.p == 0"));
      exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc);
    }

    // switch tasks with nesting to TSS
    task_switch(0, &tss_selector, &tss_descriptor,
                    BX_TASK_FROM_INT, dword1, dword2);

    RSP_SPECULATIVE;

    // if interrupt was caused by fault with error code
    //   stack limits must allow push of 2 more bytes, else #SS(0)
    // push error code onto stack

    if (push_error) {
      if (tss_descriptor.type >= 9) // TSS386
        push_32(error_code);
      else
        push_16(error_code);
    }

    // instruction pointer must be in CS limit, else #GP(0)
    if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
      BX_ERROR(("interrupt(): EIP > CS.limit"));
      exception(BX_GP_EXCEPTION, 0);
    }

    RSP_COMMIT;

    return;

  case BX_286_INTERRUPT_GATE:
  case BX_286_TRAP_GATE:
  case BX_386_INTERRUPT_GATE:
  case BX_386_TRAP_GATE:
    gate_dest_selector = gate_descriptor.u.gate.dest_selector;
    gate_dest_offset   = gate_descriptor.u.gate.dest_offset;

    // examine CS selector and descriptor given in gate descriptor
    // selector must be non-null else #GP(EXT)
    if ((gate_dest_selector & 0xfffc) == 0) {
      BX_ERROR(("int_trap_gate(): selector null"));
      exception(BX_GP_EXCEPTION, 0);
    }

    parse_selector(gate_dest_selector, &cs_selector);

    // selector must be within its descriptor table limits
    // else #GP(selector+EXT)
    fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
    parse_descriptor(dword1, dword2, &cs_descriptor);

    // descriptor AR byte must indicate code seg
    // and code segment descriptor DPL<=CPL, else #GP(selector+EXT)
    if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
        IS_DATA_SEGMENT(cs_descriptor.type) ||
        cs_descriptor.dpl > CPL)
    {
      BX_ERROR(("interrupt(): not accessible or not code segment cs=0x%04x", cs_selector.value));
      exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc);
    }

    // segment must be present, else #NP(selector + EXT)
    if (! IS_PRESENT(cs_descriptor)) {
      BX_ERROR(("interrupt(): segment not present"));
      exception(BX_NP_EXCEPTION, cs_selector.value & 0xfffc);
    }

    // if code segment is non-conforming and DPL < CPL then
    // INTERRUPT TO INNER PRIVILEGE
    if(IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && cs_descriptor.dpl < CPL)
    {
      Bit16u old_SS, old_CS, SS_for_cpl_x;
      Bit32u ESP_for_cpl_x, old_EIP, old_ESP;
      bx_descriptor_t ss_descriptor;
      bx_selector_t   ss_selector;
      int is_v8086_mode = v8086_mode();

      BX_DEBUG(("interrupt(): INTERRUPT TO INNER PRIVILEGE"));

      // check selector and descriptor for new stack in current TSS
      get_SS_ESP_from_TSS(cs_descriptor.dpl,
                              &SS_for_cpl_x, &ESP_for_cpl_x);

      if (is_v8086_mode && cs_descriptor.dpl != 0) {
        // if code segment DPL != 0 then #GP(new code segment selector)
        BX_ERROR(("interrupt(): code segment DPL(%d) != 0 in v8086 mode", cs_descriptor.dpl));
        exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc);
      }

      // Selector must be non-null else #TS(EXT)
      if ((SS_for_cpl_x & 0xfffc) == 0) {
        BX_ERROR(("interrupt(): SS selector null"));
        exception(BX_TS_EXCEPTION, 0); /* TS(ext) */
      }

      // selector index must be within its descriptor table limits
      // else #TS(SS selector + EXT)
      parse_selector(SS_for_cpl_x, &ss_selector);
      // fetch 2 dwords of descriptor; call handles out of limits checks
      fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_TS_EXCEPTION);
      parse_descriptor(dword1, dword2, &ss_descriptor);

      // selector rpl must = dpl of code segment,
      // else #TS(SS selector + ext)
      if (ss_selector.rpl != cs_descriptor.dpl) {
        BX_ERROR(("interrupt(): SS.rpl != CS.dpl"));
        exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
      }

      // stack seg DPL must = DPL of code segment,
      // else #TS(SS selector + ext)
      if (ss_descriptor.dpl != cs_descriptor.dpl) {
        BX_ERROR(("interrupt(): SS.dpl != CS.dpl"));
        exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
      }

      // descriptor must indicate writable data segment,
      // else #TS(SS selector + EXT)
      if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
           IS_CODE_SEGMENT(ss_descriptor.type) ||
          !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor.type))
      {
        BX_ERROR(("interrupt(): SS is not writable data segment"));
        exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
      }

      // seg must be present, else #SS(SS selector + ext)
      if (! IS_PRESENT(ss_descriptor)) {
        BX_ERROR(("interrupt(): SS not present"));
        exception(BX_SS_EXCEPTION, SS_for_cpl_x & 0xfffc);
      }

      // IP must be within CS segment boundaries, else #GP(0)
      if (gate_dest_offset > cs_descriptor.u.segment.limit_scaled) {
        BX_ERROR(("interrupt(): gate EIP > CS.limit"));
        exception(BX_GP_EXCEPTION, 0);
      }

      old_ESP = ESP;
      old_SS  = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
      old_EIP = EIP;
      old_CS  = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;

      // Prepare new stack segment
      bx_segment_reg_t new_stack;
      new_stack.selector = ss_selector;
      new_stack.cache = ss_descriptor;
      new_stack.selector.rpl = cs_descriptor.dpl;
      // add cpl to the selector value
      new_stack.selector.value = (0xfffc & new_stack.selector.value) |
        new_stack.selector.rpl;

      if (ss_descriptor.u.segment.d_b) {
        Bit32u temp_ESP = ESP_for_cpl_x;

        if (is_v8086_mode)
        {
          if (gate_descriptor.type>=14) { // 386 int/trap gate
            write_new_stack_dword_32(&new_stack, temp_ESP-4,  cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
            write_new_stack_dword_32(&new_stack, temp_ESP-8,  cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
            write_new_stack_dword_32(&new_stack, temp_ESP-12, cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
            write_new_stack_dword_32(&new_stack, temp_ESP-16, cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
            temp_ESP -= 16;
          }
          else {
            write_new_stack_word_32(&new_stack, temp_ESP-2, cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
            write_new_stack_word_32(&new_stack, temp_ESP-4, cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
            write_new_stack_word_32(&new_stack, temp_ESP-6, cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
            write_new_stack_word_32(&new_stack, temp_ESP-8, cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
            temp_ESP -= 8;
          }
        }

        if (gate_descriptor.type>=14) { // 386 int/trap gate
          // push long pointer to old stack onto new stack
          write_new_stack_dword_32(&new_stack, temp_ESP-4,  cs_descriptor.dpl, old_SS);
          write_new_stack_dword_32(&new_stack, temp_ESP-8,  cs_descriptor.dpl, old_ESP);
          write_new_stack_dword_32(&new_stack, temp_ESP-12, cs_descriptor.dpl, read_eflags());
          write_new_stack_dword_32(&new_stack, temp_ESP-16, cs_descriptor.dpl, old_CS);
          write_new_stack_dword_32(&new_stack, temp_ESP-20, cs_descriptor.dpl, old_EIP);
          temp_ESP -= 20;

          if (push_error) {
            temp_ESP -= 4;
            write_new_stack_dword_32(&new_stack, temp_ESP, cs_descriptor.dpl, error_code);
          }
        }
        else {                          // 286 int/trap gate
          // push long pointer to old stack onto new stack
          write_new_stack_word_32(&new_stack, temp_ESP-2,  cs_descriptor.dpl, old_SS);
          write_new_stack_word_32(&new_stack, temp_ESP-4,  cs_descriptor.dpl, (Bit16u) old_ESP);
          write_new_stack_word_32(&new_stack, temp_ESP-6,  cs_descriptor.dpl, (Bit16u) read_eflags());
          write_new_stack_word_32(&new_stack, temp_ESP-8,  cs_descriptor.dpl, old_CS);
          write_new_stack_word_32(&new_stack, temp_ESP-10, cs_descriptor.dpl, (Bit16u) old_EIP);
          temp_ESP -= 10;

          if (push_error) {
            temp_ESP -= 2;
            write_new_stack_word_32(&new_stack, temp_ESP, cs_descriptor.dpl, error_code);
          }
        }

        ESP = temp_ESP;
      }
      else {
        Bit16u temp_SP = (Bit16u) ESP_for_cpl_x;

        if (is_v8086_mode)
        {
          if (gate_descriptor.type>=14) { // 386 int/trap gate
            write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-4),  cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
            write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-8),  cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
            write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-12), cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
            write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-16), cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
            temp_SP -= 16;
          }
          else {
            write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-2), cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
            write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
            write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-6), cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
            write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl,
                BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
            temp_SP -= 8;
          }
        }

        if (gate_descriptor.type>=14) { // 386 int/trap gate
          // push long pointer to old stack onto new stack
          write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-4),  cs_descriptor.dpl, old_SS);
          write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-8),  cs_descriptor.dpl, old_ESP);
          write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-12), cs_descriptor.dpl, read_eflags());
          write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-16), cs_descriptor.dpl, old_CS);
          write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-20), cs_descriptor.dpl, old_EIP);
          temp_SP -= 20;

          if (push_error) {
            temp_SP -= 4;
            write_new_stack_dword_32(&new_stack, temp_SP, cs_descriptor.dpl, error_code);
          }
        }
        else {                          // 286 int/trap gate
          // push long pointer to old stack onto new stack
          write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-2),  cs_descriptor.dpl, old_SS);
          write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-4),  cs_descriptor.dpl, (Bit16u) old_ESP);
          write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-6),  cs_descriptor.dpl, (Bit16u) read_eflags());
          write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-8),  cs_descriptor.dpl, old_CS);
          write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-10), cs_descriptor.dpl, (Bit16u) old_EIP);
          temp_SP -= 10;

          if (push_error) {
            temp_SP -= 2;
            write_new_stack_word_32(&new_stack, temp_SP, cs_descriptor.dpl, error_code);
          }
        }

        SP = temp_SP;
      }

      // load new CS:eIP values from gate
      // set CPL to new code segment DPL
      // set RPL of CS to CPL
      load_cs(&cs_selector, &cs_descriptor, cs_descriptor.dpl);

      // load new SS:eSP values from TSS
      load_ss(&ss_selector, &ss_descriptor, cs_descriptor.dpl);

      if (is_v8086_mode)
      {
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.valid = 0;
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value = 0;
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.valid = 0;
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value = 0;
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = 0;
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value = 0;
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.valid = 0;
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value = 0;
      }
    }
    else
    {
      BX_DEBUG(("interrupt(): INTERRUPT TO SAME PRIVILEGE"));

      if (v8086_mode() && (IS_CODE_SEGMENT_CONFORMING(cs_descriptor.type) || cs_descriptor.dpl != 0)) {
        // if code segment DPL != 0 then #GP(new code segment selector)
        BX_ERROR(("interrupt(): code segment conforming or DPL(%d) != 0 in v8086 mode", cs_descriptor.dpl));
        exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc);
      }

      // EIP must be in CS limit else #GP(0)
      if (gate_dest_offset > cs_descriptor.u.segment.limit_scaled) {
        BX_ERROR(("interrupt(): IP > CS descriptor limit"));
        exception(BX_GP_EXCEPTION, 0);
      }

      // push flags onto stack
      // push current CS selector onto stack
      // push return offset onto stack
      if (gate_descriptor.type >= 14) { // 386 gate
        push_32(read_eflags());
        push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
        push_32(EIP);
        if (push_error)
          push_32(error_code);
      }
      else { // 286 gate
        push_16((Bit16u) read_eflags());
        push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
        push_16(IP);
        if (push_error)
          push_16(error_code);
      }

      // load CS:IP from gate
      // load CS descriptor
      // set the RPL field of CS to CPL
      load_cs(&cs_selector, &cs_descriptor, CPL);
    }

    EIP = gate_dest_offset;

    // if interrupt gate then set IF to 0
    if (!(gate_descriptor.type & 1)) // even is int-gate
      BX_CPU_THIS_PTR clear_IF();
    BX_CPU_THIS_PTR clear_TF();
    BX_CPU_THIS_PTR clear_NT();
    BX_CPU_THIS_PTR clear_VM();
    BX_CPU_THIS_PTR clear_RF();
    return;

  default:
    BX_PANIC(("bad descriptor type in interrupt()!"));
    break;
  }
}
Пример #20
0
void
bx_ne2k_c::write_cr(Bit32u value)
{
  BX_DEBUG ("wrote 0x%02x to CR", value);

  // Validate remote-DMA
  if ((value & 0x38) == 0x00) {
    BX_DEBUG("CR write - invalid rDMA value 0");
    value |= 0x20; /* dma_cmd == 4 is a safe default */
	//value = 0x22; /* dma_cmd == 4 is a safe default */
  }

  // Check for s/w reset
  if (value & 0x01) {
    BX_NE2K_THIS s.ISR.reset = 1;
    BX_NE2K_THIS s.CR.stop   = 1;
  } else {
    BX_NE2K_THIS s.CR.stop = 0;
  }

  BX_NE2K_THIS s.CR.rdma_cmd = (value & 0x38) >> 3;
  
  // If start command issued, the RST bit in the ISR
  // must be cleared
  if ((value & 0x02) && !BX_NE2K_THIS s.CR.start) {
    BX_NE2K_THIS s.ISR.reset = 0;
  }

  BX_NE2K_THIS s.CR.start = ((value & 0x02) == 0x02);
  BX_NE2K_THIS s.CR.pgsel = (value & 0xc0) >> 6;

    // Check for send-packet command
    if (BX_NE2K_THIS s.CR.rdma_cmd == 3) {
	// Set up DMA read from receive ring
		BX_NE2K_THIS s.remote_start = BX_NE2K_THIS s.remote_dma =
			BX_NE2K_THIS s.bound_ptr * 256;
		BX_NE2K_THIS s.remote_bytes = *((Bit16u*) &
			BX_NE2K_THIS s.mem[BX_NE2K_THIS s.bound_ptr * 256 + 2 - BX_NE2K_MEMSTART]);
		BX_INFO("Sending buffer #x%x length %d",
			BX_NE2K_THIS s.remote_start,
			BX_NE2K_THIS s.remote_bytes);
    }

  // Check for start-tx
    if ((value & 0x04) && BX_NE2K_THIS s.TCR.loop_cntl) {
		// loopback mode
		if (BX_NE2K_THIS s.TCR.loop_cntl != 1) {
			BX_INFO("Loop mode %d not supported.", BX_NE2K_THIS s.TCR.loop_cntl);
		} else {
			rx_frame (& BX_NE2K_THIS s.mem[BX_NE2K_THIS s.tx_page_start*256 -
				BX_NE2K_MEMSTART],
				BX_NE2K_THIS s.tx_bytes);

			// do a TX interrupt
			// Generate an interrupt if not masked and not one in progress
			if (BX_NE2K_THIS s.IMR.tx_inte && !BX_NE2K_THIS s.ISR.pkt_tx) {
				//LOG_MSG("tx complete interrupt");
				PIC_ActivateIRQ(s.base_irq);
			}
			BX_NE2K_THIS s.ISR.pkt_tx = 1;
		}
    } else if (value & 0x04) {
		// start-tx and no loopback
		if (BX_NE2K_THIS s.CR.stop || !BX_NE2K_THIS s.CR.start)
			BX_PANIC(("CR write - tx start, dev in reset"));
	    
		if (BX_NE2K_THIS s.tx_bytes == 0)
			BX_PANIC(("CR write - tx start, tx bytes == 0"));

#ifdef notdef    
    // XXX debug stuff
    printf("packet tx (%d bytes):\t", BX_NE2K_THIS s.tx_bytes);
    for (int i = 0; i < BX_NE2K_THIS s.tx_bytes; i++) {
      printf("%02x ", BX_NE2K_THIS s.mem[BX_NE2K_THIS s.tx_page_start*256 - 
				BX_NE2K_MEMSTART + i]);
      if (i && (((i+1) % 16) == 0)) 
	printf("\t");
    }
    printf("");
#endif    

    // Send the packet to the system driver
	/* TODO: Transmit packet */
    //BX_NE2K_THIS ethdev->sendpkt(& BX_NE2K_THIS s.mem[BX_NE2K_THIS s.tx_page_start*256 - BX_NE2K_MEMSTART], BX_NE2K_THIS s.tx_bytes);
        NE2000_PrivelegeEscalate();
	pcap_sendpacket(adhandle,&s.mem[s.tx_page_start*256 - BX_NE2K_MEMSTART], s.tx_bytes);
        NE2000_PrivelegeDrop();

	// some more debug
	if (BX_NE2K_THIS s.tx_timer_active) {
      BX_PANIC(("CR write, tx timer still active"));
	  PIC_RemoveEvents(NE2000_TX_Event);
	}
	//LOG_MSG("send packet command");
	//s.tx_timer_index = (64 + 96 + 4*8 + BX_NE2K_THIS s.tx_bytes*8)/10;
	s.tx_timer_active = 1;
	PIC_AddEvent(NE2000_TX_Event,(float)((64 + 96 + 4*8 + BX_NE2K_THIS s.tx_bytes*8)/10000.0),0);
    // Schedule a timer to trigger a tx-complete interrupt
    // The number of microseconds is the bit-time / 10.
    // The bit-time is the preamble+sfd (64 bits), the
    // inter-frame gap (96 bits), the CRC (4 bytes), and the
    // the number of bits in the frame (s.tx_bytes * 8).
    //

	/* TODO: Code transmit timer */
	/*
    bx_pc_system.activate_timer(BX_NE2K_THIS s.tx_timer_index,
				(64 + 96 + 4*8 + BX_NE2K_THIS s.tx_bytes*8)/10,
				0); // not continuous
	*/
  } // end transmit-start branch

  // Linux probes for an interrupt by setting up a remote-DMA read
  // of 0 bytes with remote-DMA completion interrupts enabled.
  // Detect this here
  if (BX_NE2K_THIS s.CR.rdma_cmd == 0x01 &&
      BX_NE2K_THIS s.CR.start &&
      BX_NE2K_THIS s.remote_bytes == 0) {
    BX_NE2K_THIS s.ISR.rdma_done = 1;
    if (BX_NE2K_THIS s.IMR.rdma_inte) {
		PIC_ActivateIRQ(s.base_irq);
      //DEV_pic_raise_irq(BX_NE2K_THIS s.base_irq);
    }
  }
}
Пример #21
0
// vector:     0..255: vector in IDT
// error_code: if exception generates and error, push this error code
// trap:       override exception class to TRAP
void BX_CPU_C::exception(unsigned vector, Bit16u error_code)
{
  BX_INSTR_EXCEPTION(BX_CPU_ID, vector, error_code);

#if BX_DEBUGGER
  bx_dbg_exception(BX_CPU_ID, vector, error_code);
#endif

  BX_DEBUG(("exception(0x%02x): error_code=%04x", vector, error_code));

  unsigned exception_type = 0;
  unsigned exception_class = BX_EXCEPTION_CLASS_FAULT;
  bx_bool push_error = 0;

  if (vector < BX_CPU_HANDLED_EXCEPTIONS) {
     push_error = exceptions_info[vector].push_error;
     exception_class = exceptions_info[vector].exception_class;
     exception_type = exceptions_info[vector].exception_type;
  }
  else {
     BX_PANIC(("exception(%u): bad vector", vector));
  }

  if (vector != BX_PF_EXCEPTION && vector != BX_DF_EXCEPTION) {
    // Page faults have different format
    error_code = (error_code & 0xfffe) | BX_CPU_THIS_PTR EXT;
  }

#if BX_SUPPORT_VMX
  VMexit_Event(0, BX_HARDWARE_EXCEPTION, vector, error_code, push_error);
#endif

  if (BX_CPU_THIS_PTR errorno > 0) {
    if (BX_CPU_THIS_PTR errorno > 2 || BX_CPU_THIS_PTR curr_exception == BX_ET_DOUBLE_FAULT) {
      // restore RIP/RSP to value before error occurred
      RIP = BX_CPU_THIS_PTR prev_rip;
      if (BX_CPU_THIS_PTR speculative_rsp)
        RSP = BX_CPU_THIS_PTR prev_rsp;

      debug(BX_CPU_THIS_PTR prev_rip); // print debug information to the log
#if BX_SUPPORT_VMX
      VMexit_TripleFault();
#endif
#if BX_DEBUGGER
      // trap into debugger (similar as done when PANIC occured)
      bx_debug_break();
#endif
      if (SIM->get_param_bool(BXPN_RESET_ON_TRIPLE_FAULT)->get()) {
        BX_ERROR(("exception(): 3rd (%d) exception with no resolution, shutdown status is %02xh, resetting", vector, DEV_cmos_get_reg(0x0f)));
        bx_pc_system.Reset(BX_RESET_HARDWARE);
      }
      else {
        BX_PANIC(("exception(): 3rd (%d) exception with no resolution", vector));
        BX_ERROR(("WARNING: Any simulation after this point is completely bogus !"));
        shutdown();
      }
      longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
    }
  }

  // note: fault-class exceptions _except_ #DB set RF in
  //       eflags image.
  if (exception_class == BX_EXCEPTION_CLASS_FAULT)
  {
    // restore RIP/RSP to value before error occurred
    RIP = BX_CPU_THIS_PTR prev_rip;
    if (BX_CPU_THIS_PTR speculative_rsp)
      RSP = BX_CPU_THIS_PTR prev_rsp;

    if (vector != BX_DB_EXCEPTION) BX_CPU_THIS_PTR assert_RF();
  }

  if (vector == BX_DB_EXCEPTION) {
    // Commit debug events to DR6
#if BX_CPU_LEVEL <= 4
    // On 386/486 bit12 is settable
    BX_CPU_THIS_PTR dr6.val32 = (BX_CPU_THIS_PTR dr6.val32 & 0xffff0ff0) |
                          (BX_CPU_THIS_PTR debug_trap & 0x0000f00f);
#else
    // On Pentium+, bit12 is always zero
    BX_CPU_THIS_PTR dr6.val32 = (BX_CPU_THIS_PTR dr6.val32 & 0xffff0ff0) |
                          (BX_CPU_THIS_PTR debug_trap & 0x0000e00f);
#endif
    // clear GD flag in the DR7 prior entering debug exception handler
    BX_CPU_THIS_PTR dr7.set_GD(0);
  }

  BX_CPU_THIS_PTR EXT = 1;

  /* if we've already had 1st exception, see if 2nd causes a
   * Double Fault instead.  Otherwise, just record 1st exception
   */
  if (BX_CPU_THIS_PTR errorno > 0 && exception_type != BX_ET_DOUBLE_FAULT) {
    if (! is_exception_OK[BX_CPU_THIS_PTR curr_exception][exception_type]) {
      exception(BX_DF_EXCEPTION, 0);
    }
  }

  BX_CPU_THIS_PTR curr_exception = exception_type;
  BX_CPU_THIS_PTR errorno++;

  if (real_mode()) {
    push_error = 0; // not INT, no error code pushed
    error_code = 0;
  }

  interrupt(vector, BX_HARDWARE_EXCEPTION, push_error, error_code);
  BX_CPU_THIS_PTR errorno = 0; // error resolved
  longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
}
Пример #22
0
/* Get CPU version information. */
Bit32u BX_CPU_C::get_cpu_version_information(void)
{
  Bit32u family = 0, model = 0, stepping = 0;
  Bit32u extended_model = 0;
  Bit32u extended_family = 0;

#if BX_CPU_LEVEL > 3

  /* ****** */
  /*  i486  */
  /* ****** */

#if BX_CPU_LEVEL == 4
  family = 4;

#if BX_SUPPORT_FPU
  model = 1;            // 486dx
  stepping = 3;
#else
  model = 2;            // 486sx
  stepping = 3;
#endif

  /* **************** */
  /*  i586 (Pentium)  */
  /* **************** */

#elif BX_CPU_LEVEL == 5	
  family   = 5;
#if BX_SUPPORT_MMX
  model    = 4;         // Pentium MMX
#else
  model    = 1;         // Pentium 60/66
#endif
  stepping = 3;

  /* ****** */
  /*  i686  */
  /* ****** */

#elif BX_CPU_LEVEL == 6

#if BX_SUPPORT_SSE >= 2 // Pentium 4 processor
/*
     The model, family, and processor type for the first
     processor in the Intel Pentium 4 family is as follows:
		* Model-0000B
		* Family-1111B
		* Processor Type-00B (OEM)
                * Stepping-0B
*/
  model    = 0;
  family   = 0xf;
  stepping = 0;

#if BX_SUPPORT_X86_64
  model    = 2;         // Hammer returns what?
#endif

#else	                // Pentium Pro/Pentium II/Pentium III processor
  family   = 6;
  model    = 8;
  stepping = 3;
#endif

#else
  BX_PANIC(("CPUID family ID not implemented for CPU LEVEL > 6"));
#endif

#endif  // BX_CPU_LEVEL > 3

  return (extended_family << 20) |
         (extended_model << 16) |
         (family << 8) |
         (model<<4) | stepping;
}
Пример #23
0
  Boolean
bx_cpu_c::can_push(bx_descriptor_t *descriptor, Bit32u esp, Bit32u bytes)
{
  if ( real_mode() ) { /* code not needed ??? */
    BX_PANIC(("can_push(): called in real mode"));
    return(0); /* never gets here */
    }

  // small stack compares against 16-bit SP
  if (!descriptor->u.segment.d_b)
    esp &= 0x0000ffff;


  if (descriptor->valid==0) {
    BX_PANIC(("can_push(): SS invalidated."));
    return(0);
    }

  if (descriptor->p==0) {
    BX_PANIC(("can_push(): not present"));
    return(0);
    }


  if (descriptor->u.segment.c_ed) { /* expand down segment */
    Bit32u expand_down_limit;

    if (descriptor->u.segment.d_b)
      expand_down_limit = 0xffffffff;
    else
      expand_down_limit = 0x0000ffff;

    if (esp==0) {
      BX_PANIC(("can_push(): esp=0, wraparound?"));
      return(0);
      }

    if (esp < bytes) {
      BX_PANIC(("can_push(): expand-down: esp < N"));
      return(0);
      }
    if ( (esp - bytes) <= descriptor->u.segment.limit_scaled ) {
      BX_PANIC(("can_push(): expand-down: esp-N < limit"));
      return(0);
      }
    if ( esp > expand_down_limit ) {
      BX_PANIC(("can_push(): esp > expand-down-limit"));
      return(0);
      }
    return(1);
    }
  else { /* normal (expand-up) segment */
    if (descriptor->u.segment.limit_scaled==0) {
      BX_PANIC(("can_push(): found limit of 0"));
      return(0);
      }

    // Look at case where esp==0.  Possibly, it's an intentional wraparound
    // If so, limit must be the maximum for the given stack size
    if (esp==0) {
      if (descriptor->u.segment.d_b && (descriptor->u.segment.limit_scaled==0xffffffff))
        return(1);
      if ((descriptor->u.segment.d_b==0) && (descriptor->u.segment.limit_scaled>=0xffff))
        return(1);
      BX_PANIC(("can_push(): esp=0, normal, wraparound? limit=%08x",
        descriptor->u.segment.limit_scaled));
      return(0);
      }

    if (esp < bytes) {
      BX_INFO(("can_push(): expand-up: esp < N"));
      return(0);
      }
    if ((esp-1) > descriptor->u.segment.limit_scaled) {
      BX_INFO(("can_push(): expand-up: SP > limit"));
      return(0);
      }
    /* all checks pass */
    return(1);
    }
}
Пример #24
0
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PFPNACC_PqQq(bxInstruction_c *i)
{
  BX_PANIC(("PFPNACC_PqQq: 3DNow! instruction still not implemented"));
}
Пример #25
0
/*
 * This function will panic if errors occur when attempting to open an image
 * file. Now if only I could use exceptions to handle the errors in an elegant
 * fashion...
 */
int vmware3_image_t::open(const char * pathname)
{
    COW_Header header;
    int file;
    int flags = O_RDWR;
#ifdef O_BINARY
    flags |= O_BINARY;
#endif

    // Set so close doesn't segfault, in case something goes wrong
    images = NULL;

    /* Open the virtual disk */
    file = ::open(pathname, flags);

    if(file < 0)
        return -1;

    /* Read the header */
    if(read_header(file, header) < 0)
        BX_PANIC(("unable to read vmware3 COW Disk header from file '%s'", pathname));

    /* Make sure it's a valid header */
    if(!is_valid_header(header))
        BX_PANIC(("invalid vmware3 COW Disk image"));

    ::close(file);

    tlb_size  = header.tlb_size_sectors * 512;
    slb_count = (1 << FL_SHIFT) / tlb_size;

    // we must have at least one chain
    unsigned count = header.number_of_chains;
    if (count < 1) count = 1;

    images = new COW_Image [count];

    off_t offset = 0;
    for (unsigned i = 0; i < count; ++i)
    {
        char * filename = generate_cow_name(pathname, i);
        current = &images[i];

        current->fd = ::open(filename, flags);
        if(current->fd < 0)
            BX_PANIC(("unable to open vmware3 COW Disk file '%s'", filename));

        if(read_header(current->fd, current->header) < 0)
            BX_PANIC(("unable to read header or invalid header in vmware3 COW Disk file '%s'", filename));

        if(!is_valid_header(current->header))
            BX_PANIC(("invalid vmware3 COW Disk file '%s'", filename));

        current->flb = new unsigned [current->header.flb_count];
        if(current->flb == 0)
            BX_PANIC(("cannot allocate %d bytes for flb in vmware3 COW Disk '%s'", current->header.flb_count * 4, filename));

        current->slb = new unsigned * [current->header.flb_count];
        if(current->slb == 0)
            BX_PANIC(("cannot allocate %d bytes for slb in vmware3 COW Disk '%s'", current->header.flb_count * 4, filename));

        unsigned j;
        for(j = 0; j < current->header.flb_count; ++j)
        {
            current->slb[j] = new unsigned [slb_count];
            if(current->slb[j] == 0)
                BX_PANIC(("cannot allocate %d bytes for slb[] in vmware3 COW Disk '%s'", slb_count * 4, filename));
        }

        current->tlb = new Bit8u [tlb_size];
        if(current->tlb == 0)
            BX_PANIC(("cannot allocate %d bytes for tlb in vmware3 COW Disk '%s'", tlb_size, filename));

        if(::lseek(current->fd, current->header.flb_offset_sectors * 512, SEEK_SET) < 0)
            BX_PANIC(("unable to seek vmware3 COW Disk file '%s'", filename));

        if(read_ints(current->fd, current->flb, current->header.flb_count) < 0)
            BX_PANIC(("unable to read flb from vmware3 COW Disk file '%s'", filename));

        for(j = 0; j < current->header.flb_count; ++j)
            if(current->flb[j] != 0)
            {
                if(::lseek(current->fd, current->flb[j] * 512, SEEK_SET) < 0)
                    BX_PANIC(("unable to seek vmware3 COW Disk file '%s'", filename));
                if(read_ints(current->fd, current->slb[j], slb_count) < 0)
                    BX_PANIC(("unable to read slb from vmware3 COW Disk file '%s'", filename));
            }

        current->min_offset = offset;
        offset += current->header.total_sectors * 512;
        current->max_offset = offset;

        current->offset = INVALID_OFFSET;
        current->synced = true;
        delete[] filename;
    }
    current = &images[0];
    requested_offset = 0;
    if (header.total_sectors_in_disk!=0) {
        cylinders = header.cylinders_in_disk;
        heads = header.heads_in_disk;
        spt = header.sectors_in_disk;
        hd_size = header.total_sectors_in_disk * 512;
    } else {
        cylinders = header.cylinders;
        heads = header.heads;
        spt = header.sectors;
        hd_size = header.total_sectors * 512;
    }

    return 1;
}
Пример #26
0
BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
{
  bx_selector_t cs_selector;
  Bit32u dword1, dword2;
  bx_descriptor_t cs_descriptor;

  /* new cs selector must not be null, else #GP(0) */
  if ((cs_raw & 0xfffc) == 0) {
    BX_ERROR(("call_protected: CS selector null"));
    exception(BX_GP_EXCEPTION, 0, 0);
  }

  parse_selector(cs_raw, &cs_selector);
  // check new CS selector index within its descriptor limits,
  // else #GP(new CS selector)
  fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
  parse_descriptor(dword1, dword2, &cs_descriptor);

  // examine AR byte of selected descriptor for various legal values
  if (cs_descriptor.valid==0) {
    BX_ERROR(("call_protected: invalid CS descriptor"));
    exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
  }

  if (cs_descriptor.segment)   // normal segment
  {
    check_cs(&cs_descriptor, cs_raw, BX_SELECTOR_RPL(cs_raw), CPL);

#if BX_SUPPORT_X86_64
    if (i->os64L()) {
      // push return address onto stack (CS padded to 64bits)
      push_64((Bit64u) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
      push_64(RIP);
    }
    else
#endif
    if (i->os32L()) {
      // push return address onto stack (CS padded to 32bits)
      push_32((Bit32u) BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
      push_32(EIP);
    }
    else {
      // push return address onto stack
      push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
      push_16(IP);
    }

    // load code segment descriptor into CS cache
    // load CS with new code segment selector
    // set RPL of CS to CPL
    branch_far64(&cs_selector, &cs_descriptor, disp, CPL);

    return;
  }
  else { // gate & special segment
    bx_descriptor_t  gate_descriptor = cs_descriptor;
    bx_selector_t    gate_selector = cs_selector;
    Bit32u new_EIP;
    Bit16u dest_selector;
    Bit16u          raw_tss_selector;
    bx_selector_t   tss_selector;
    bx_descriptor_t tss_descriptor;
    Bit32u temp_eIP;

    // descriptor DPL must be >= CPL else #GP(gate selector)
    if (gate_descriptor.dpl < CPL) {
      BX_ERROR(("call_protected: descriptor.dpl < CPL"));
      exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
    }

    // descriptor DPL must be >= gate selector RPL else #GP(gate selector)
    if (gate_descriptor.dpl < gate_selector.rpl) {
      BX_ERROR(("call_protected: descriptor.dpl < selector.rpl"));
      exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
    }

#if BX_SUPPORT_X86_64
    if (long_mode()) {
      // call gate type is higher priority than non-present bit check
      if (gate_descriptor.type != BX_386_CALL_GATE) {
        BX_ERROR(("call_protected: gate type %u unsupported in long mode", (unsigned) gate_descriptor.type));
        exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
      }
    }
    else
#endif
    {
      switch (gate_descriptor.type) {
        case BX_SYS_SEGMENT_AVAIL_286_TSS:
        case BX_SYS_SEGMENT_AVAIL_386_TSS:
        case BX_TASK_GATE:
        case BX_286_CALL_GATE:
        case BX_386_CALL_GATE:
          break;
        default:
          BX_ERROR(("call_protected(): gate.type(%u) unsupported", (unsigned) gate_descriptor.type));
          exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
      }
    }

    // gate descriptor must be present else #NP(gate selector)
    if (! IS_PRESENT(gate_descriptor)) {
      BX_ERROR(("call_protected: gate not present"));
      exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
    }

#if BX_SUPPORT_X86_64
    if (long_mode()) {
      call_gate64(&gate_selector);
      return;
    }
#endif

    switch (gate_descriptor.type) {
      case BX_SYS_SEGMENT_AVAIL_286_TSS:
      case BX_SYS_SEGMENT_AVAIL_386_TSS:

        if (gate_descriptor.type==BX_SYS_SEGMENT_AVAIL_286_TSS)
          BX_DEBUG(("call_protected: 16bit available TSS"));
        else
          BX_DEBUG(("call_protected: 32bit available TSS"));

        // SWITCH_TASKS _without_ nesting to TSS
        task_switch(&gate_selector, &gate_descriptor,
          BX_TASK_FROM_CALL_OR_INT, dword1, dword2);

        // EIP must be in code seg limit, else #GP(0)
        if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
          BX_ERROR(("call_protected: EIP not within CS limits"));
          exception(BX_GP_EXCEPTION, 0, 0);
        }
        return;

      case BX_TASK_GATE:
        // examine selector to TSS, given in Task Gate descriptor
        // must specify global in the local/global bit else #TS(TSS selector)
        raw_tss_selector = gate_descriptor.u.taskgate.tss_selector;
        parse_selector(raw_tss_selector, &tss_selector);

        if (tss_selector.ti) {
          BX_ERROR(("call_protected: tss_selector.ti=1"));
          exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
        }

        // index must be within GDT limits else #TS(TSS selector)
        fetch_raw_descriptor(&tss_selector, &dword1, &dword2, BX_GP_EXCEPTION);

        parse_descriptor(dword1, dword2, &tss_descriptor);

        // descriptor AR byte must specify available TSS
        //   else #GP(TSS selector)
        if (tss_descriptor.valid==0 || tss_descriptor.segment) {
          BX_ERROR(("call_protected: TSS selector points to bad TSS"));
          exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
        }
        if (tss_descriptor.type!=BX_SYS_SEGMENT_AVAIL_286_TSS &&
            tss_descriptor.type!=BX_SYS_SEGMENT_AVAIL_386_TSS)
        {
          BX_ERROR(("call_protected: TSS selector points to bad TSS"));
          exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
        }

        // task state segment must be present, else #NP(tss selector)
        if (! IS_PRESENT(tss_descriptor)) {
          BX_ERROR(("call_protected: task descriptor.p == 0"));
          exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
        }

        // SWITCH_TASKS without nesting to TSS
        task_switch(&tss_selector, &tss_descriptor,
                    BX_TASK_FROM_CALL_OR_INT, dword1, dword2);

        // EIP must be within code segment limit, else #TS(0)
        if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b)
          temp_eIP = EIP;
        else
          temp_eIP =  IP;

        if (temp_eIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
        {
          BX_ERROR(("call_protected: EIP > CS.limit"));
          exception(BX_GP_EXCEPTION, 0, 0);
        }
        return;

      case BX_286_CALL_GATE:
      case BX_386_CALL_GATE:
        // examine code segment selector in call gate descriptor
        BX_DEBUG(("call_protected: call gate"));
        dest_selector = gate_descriptor.u.gate.dest_selector;
        new_EIP       = gate_descriptor.u.gate.dest_offset;

        // selector must not be null else #GP(0)
        if ((dest_selector & 0xfffc) == 0) {
          BX_ERROR(("call_protected: selector in gate null"));
          exception(BX_GP_EXCEPTION, 0, 0);
        }

        parse_selector(dest_selector, &cs_selector);
        // selector must be within its descriptor table limits,
        //   else #GP(code segment selector)
        fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
        parse_descriptor(dword1, dword2, &cs_descriptor);

        // AR byte of selected descriptor must indicate code segment,
        //   else #GP(code segment selector)
        // DPL of selected descriptor must be <= CPL,
        // else #GP(code segment selector)
        if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
            IS_DATA_SEGMENT(cs_descriptor.type) ||
            cs_descriptor.dpl > CPL)
        {
          BX_ERROR(("call_protected: selected descriptor is not code"));
          exception(BX_GP_EXCEPTION, dest_selector & 0xfffc, 0);
        }

        // code segment must be present else #NP(selector)
        if (! IS_PRESENT(cs_descriptor)) {
          BX_ERROR(("call_protected: code segment not present !"));
          exception(BX_NP_EXCEPTION, dest_selector & 0xfffc, 0);
        }

        // CALL GATE TO MORE PRIVILEGE
        // if non-conforming code segment and DPL < CPL then
        if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && (cs_descriptor.dpl < CPL))
        {
          Bit16u SS_for_cpl_x;
          Bit32u ESP_for_cpl_x;
          bx_selector_t   ss_selector;
          bx_descriptor_t ss_descriptor;
          Bit16u   return_SS, return_CS;
          Bit32u   return_ESP, return_EIP;
          Bit16u   parameter_word[32];
          Bit32u   parameter_dword[32];

          BX_DEBUG(("CALL GATE TO MORE PRIVILEGE LEVEL"));

          // get new SS selector for new privilege level from TSS
          get_SS_ESP_from_TSS(cs_descriptor.dpl, &SS_for_cpl_x, &ESP_for_cpl_x);

          // check selector & descriptor for new SS:
          // selector must not be null, else #TS(0)
          if ((SS_for_cpl_x & 0xfffc) == 0) {
            BX_ERROR(("call_protected: new SS null"));
            exception(BX_TS_EXCEPTION, 0, 0);
          }

          // selector index must be within its descriptor table limits,
          //   else #TS(SS selector)
          parse_selector(SS_for_cpl_x, &ss_selector);
          fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_TS_EXCEPTION);
          parse_descriptor(dword1, dword2, &ss_descriptor);

          // selector's RPL must equal DPL of code segment,
          //   else #TS(SS selector)
          if (ss_selector.rpl != cs_descriptor.dpl) {
            BX_ERROR(("call_protected: SS selector.rpl != CS descr.dpl"));
            exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
          }

          // stack segment DPL must equal DPL of code segment,
          //   else #TS(SS selector)
          if (ss_descriptor.dpl != cs_descriptor.dpl) {
            BX_ERROR(("call_protected: SS descr.rpl != CS descr.dpl"));
            exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
          }

          // descriptor must indicate writable data segment,
          //   else #TS(SS selector)
          if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
               IS_CODE_SEGMENT(ss_descriptor.type) ||
              !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor.type))
          {
            BX_ERROR(("call_protected: ss descriptor is not writable data seg"));
            exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
          }

          // segment must be present, else #SS(SS selector)
          if (! IS_PRESENT(ss_descriptor)) {
            BX_ERROR(("call_protected: ss descriptor not present"));
            exception(BX_SS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
          }

          // get word count from call gate, mask to 5 bits
          unsigned param_count = gate_descriptor.u.gate.param_count & 0x1f;

          // save return SS:eSP to be pushed on new stack
          return_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
          if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
            return_ESP = ESP;
          else
            return_ESP =  SP;

          // save return CS:eIP to be pushed on new stack
          return_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
          if (cs_descriptor.u.segment.d_b)
            return_EIP = EIP;
          else
            return_EIP = IP;

          if (gate_descriptor.type==BX_286_CALL_GATE) {
            for (unsigned i=0; i<param_count; i++) {
              parameter_word[i] = read_virtual_word(BX_SEG_REG_SS, return_ESP + i*2);
            }
          }
          else {
            for (unsigned i=0; i<param_count; i++) {
              parameter_dword[i] = read_virtual_dword(BX_SEG_REG_SS, return_ESP + i*4);
            }
          }

          // Prepare new stack segment
          bx_segment_reg_t new_stack;
          new_stack.selector = ss_selector;
          new_stack.cache = ss_descriptor;
          new_stack.selector.rpl = cs_descriptor.dpl;
          // add cpl to the selector value
          new_stack.selector.value = (0xfffc & new_stack.selector.value) |
            new_stack.selector.rpl;

          /* load new SS:SP value from TSS */
          if (ss_descriptor.u.segment.d_b) {
            Bit32u temp_ESP = ESP_for_cpl_x;

            // push pointer of old stack onto new stack
            if (gate_descriptor.type==BX_386_CALL_GATE) {
              write_new_stack_dword_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, return_SS);
              write_new_stack_dword_32(&new_stack, temp_ESP-8, cs_descriptor.dpl, return_ESP);
              temp_ESP -= 8;

              for (unsigned i=param_count; i>0; i--) {
                temp_ESP -= 4;
                write_new_stack_dword_32(&new_stack, temp_ESP, cs_descriptor.dpl, parameter_dword[i-1]);
              }
              // push return address onto new stack
              write_new_stack_dword_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, return_CS);
              write_new_stack_dword_32(&new_stack, temp_ESP-8, cs_descriptor.dpl, return_EIP);
              temp_ESP -= 8;
            }
            else {
              write_new_stack_word_32(&new_stack, temp_ESP-2, cs_descriptor.dpl, return_SS);
              write_new_stack_word_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) return_ESP);
              temp_ESP -= 4;

              for (unsigned i=param_count; i>0; i--) {
                temp_ESP -= 2;
                write_new_stack_word_32(&new_stack, temp_ESP, cs_descriptor.dpl, parameter_word[i-1]);
              }
              // push return address onto new stack
              write_new_stack_word_32(&new_stack, temp_ESP-2, cs_descriptor.dpl, return_CS);
              write_new_stack_word_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) return_EIP);
              temp_ESP -= 4;
            }

            ESP = temp_ESP;
          }
          else {
            Bit16u temp_SP = (Bit16u) ESP_for_cpl_x;

            // push pointer of old stack onto new stack
            if (gate_descriptor.type==BX_386_CALL_GATE) {
              write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, return_SS);
              write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl, return_ESP);
              temp_SP -= 8;

              for (unsigned i=param_count; i>0; i--) {
                temp_SP -= 4;
                write_new_stack_dword_32(&new_stack, temp_SP, cs_descriptor.dpl, parameter_dword[i-1]);
              }
              // push return address onto new stack
              write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, return_CS);
              write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl, return_EIP);
              temp_SP -= 8;
            }
            else {
              write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-2), cs_descriptor.dpl, return_SS);
              write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, (Bit16u) return_ESP);
              temp_SP -= 4;

              for (unsigned i=param_count; i>0; i--) {
                temp_SP -= 2;
                write_new_stack_word_32(&new_stack, temp_SP, cs_descriptor.dpl, parameter_word[i-1]);
              }
              // push return address onto new stack
              write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-2), cs_descriptor.dpl, return_CS);
              write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, (Bit16u) return_EIP);
              temp_SP -= 4;
            }

            SP = temp_SP;
          }

          // new eIP must be in code segment limit else #GP(0)
          if (new_EIP > cs_descriptor.u.segment.limit_scaled) {
            BX_ERROR(("call_protected: EIP not within CS limits"));
            exception(BX_GP_EXCEPTION, 0, 0);
          }

          /* load SS descriptor */
          load_ss(&ss_selector, &ss_descriptor, cs_descriptor.dpl);

          /* load new CS:IP value from gate */
          /* load CS descriptor */
          /* set CPL to stack segment DPL */
          /* set RPL of CS to CPL */
          load_cs(&cs_selector, &cs_descriptor, cs_descriptor.dpl);
          EIP = new_EIP;
        }
        else   // CALL GATE TO SAME PRIVILEGE
        {
          BX_DEBUG(("CALL GATE TO SAME PRIVILEGE"));

          if (gate_descriptor.type == BX_386_CALL_GATE) {
            // call gate 32bit, push return address onto stack
            push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
            push_32(EIP);
          }
          else {
            // call gate 16bit, push return address onto stack
            push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
            push_16(IP);
          }

          // load CS:EIP from gate
          // load code segment descriptor into CS register
          // set RPL of CS to CPL
          branch_far32(&cs_selector, &cs_descriptor, new_EIP, CPL);
        }
        return;

      default: // can't get here
        BX_PANIC(("call_protected: gate type %u unsupported", (unsigned) cs_descriptor.type));
        exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
    }
  }
}
Пример #27
0
/**
 * Loads the specified keymap file using convertStringToSymbol to convert strings to client constants.
 **/
void bx_keymap_c::loadKeymap(u32 stringToSymbol (const char*),
                             const char *filename)
{
  FILE*   keymapFile;
  char    baseSym[256];
  char    modSym[256];
  char    hostSym[256];
  s32     ascii = 0;
  u32     baseKey;
  u32     modKey;
  u32     hostKey;

  if((keymapFile = fopen(filename, "r")) == NULL)
  {
    BX_PANIC(("Can not open keymap file '%s'.", filename));
    return;
  }

  BX_INFO(("Loading keymap from '%s'", filename));
  init_parse();

  // Read keymap file one line at a time
  while(1)
  {
    if(get_next_keymap_line(keymapFile, baseSym, modSym, &ascii, hostSym) < 0)
    {
      break;
    }

    // convert X_KEY_* symbols to values
    baseKey = convertStringToBXKey(baseSym);
    modKey = convertStringToBXKey(modSym);
    hostKey = 0;
    if(stringToSymbol != NULL)
      hostKey = stringToSymbol(hostSym);

    BX_DEBUG((
               "baseKey='%s' (%d), modSym='%s' (%d), ascii=%d, guisym='%s' (%d)",
               baseSym, baseKey, modSym, modKey, ascii, hostSym, hostKey));

    // Check if data is valid
    if(baseKey == BX_KEYMAP_UNKNOWN)
    {
      BX_PANIC(("line %d: unknown BX_KEY constant '%s'", lineCount, baseSym));
      continue;
    }

    if(hostKey == BX_KEYMAP_UNKNOWN)
    {
      BX_PANIC((
                 "line %d: unknown host key name '%s' (wrong keymap ?)", lineCount,
                 hostSym));
      continue;
    }

    CHECK_REALLOCATION(keymapTable,
                       realloc(keymapTable, (keymapCount + 1) * sizeof(BXKeyEntry)),
                             BXKeyEntry);

    if(keymapTable == NULL)
      BX_PANIC(("Can not allocate memory for keymap table."));

    keymapTable[keymapCount].baseKey = baseKey;
    keymapTable[keymapCount].modKey = modKey;
    keymapTable[keymapCount].ascii = ascii;
    keymapTable[keymapCount].hostKey = hostKey;

    keymapCount++;
  }

  BX_INFO(("Loaded %d symbols", keymapCount));

  fclose(keymapFile);
}
Пример #28
0
void logfunctions::ask(int level, const char *prefix, const char *fmt, va_list ap)
{
  // Guard against reentry on ask() function.  The danger is that some
  // function that's called within ask() could trigger another
  // BX_PANIC that could call ask() again, leading to infinite
  // recursion and infinite asks.
  static char in_ask_already = 0;
  char buf1[1024];
  if (in_ask_already) {
    fprintf(stderr, "logfunctions::ask() should not reenter!!\n");
    return;
  }
  in_ask_already = 1;
  vsnprintf(buf1, sizeof(buf1), fmt, ap);
  // FIXME: facility set to 0 because it's unknown.

  // update vga screen.  This is useful because sometimes useful messages
  // are printed on the screen just before a panic.  It's also potentially
  // dangerous if this function calls ask again...  That's why I added
  // the reentry check above.
  if (SIM->get_init_done()) DEV_vga_refresh();

#if !BX_EXTERNAL_DEBUGGER
  // ensure the text screen is showing
  SIM->set_display_mode(DISP_MODE_CONFIG);
  int val = SIM->log_msg(prefix, level, buf1);
  switch(val)
  {
    case BX_LOG_ASK_CHOICE_CONTINUE:
      break;
    case BX_LOG_ASK_CHOICE_CONTINUE_ALWAYS:
      // user said continue, and don't "ask" for this facility again.
      setonoff(level, ACT_REPORT);
      break;
    case BX_LOG_ASK_CHOICE_DIE:
    case BX_LOG_NOTIFY_FAILED:
      bx_user_quit = (val==BX_LOG_ASK_CHOICE_DIE)?1:0;
      in_ask_already = 0;  // because fatal will longjmp out
      fatal(prefix, buf1, ap, 1);
      // should never get here
      BX_PANIC(("in ask(), fatal() should never return!"));
      break;
    case BX_LOG_ASK_CHOICE_DUMP_CORE:
      fprintf(stderr, "User chose to dump core...\n");
#if BX_HAVE_ABORT
      abort();
#else
      // do something highly illegal that should kill the process.
      // Hey, this is fun!
      {
        char *crashptr = (char *)0; char c = *crashptr;
      }
      fprintf(stderr, "Sorry, I couldn't find your abort() function.  Exiting.");
      exit(0);
#endif
#if BX_DEBUGGER
    case BX_LOG_ASK_CHOICE_ENTER_DEBUG:
      // user chose debugger.  To "drop into the debugger" we just set the
      // interrupt_requested bit and continue execution.  Before the next
      // instruction, it should notice the user interrupt and return to
      // the debugger.
      bx_debug_break();
      break;
#elif BX_GDBSTUB
    case BX_LOG_ASK_CHOICE_ENTER_DEBUG:
      bx_gdbstub_break();
      break;
#endif
    default:
      // this happens if panics happen before the callback is initialized
      // in gui/control.cc.
      fprintf(stderr, "WARNING: log_msg returned unexpected value %d\n", val);
  }
#else
  // external debugger ask code goes here
#endif
  // return to simulation mode
  SIM->set_display_mode(DISP_MODE_SIM);
  in_ask_already = 0;
}
Пример #29
0
void BIOSCALL int18_panic_msg(void)
{
    BX_PANIC("INT18: BOOT FAILURE\n");
}
Пример #30
0
void bx_generic_cpuid_t::init_cpu_extensions_bitmask(void)
{
  Bit32u features_bitmask = 0;

#if BX_SUPPORT_APIC
  static unsigned apic_enabled = SIM->get_param_enum(BXPN_CPUID_APIC)->get();
  // determine SSE in runtime
  switch (apic_enabled) {
    case BX_CPUID_SUPPORT_X2APIC:
      features_bitmask |= BX_CPU_X2APIC;
    case BX_CPUID_SUPPORT_XAPIC:
      features_bitmask |= BX_CPU_XAPIC;
    case BX_CPUID_SUPPORT_LEGACY_APIC:
    default:
      break;
  };

  // I would like to allow XAPIC configuration with i586 together
  if (apic_enabled >= BX_CPUID_SUPPORT_X2APIC && BX_CPU_LEVEL < 6) {
    BX_PANIC(("PANIC: X2APIC require CPU_LEVEL >= 6 !"));
    return;
  }
#endif

#if BX_CPU_LEVEL >= 5
  features_bitmask |= BX_CPU_VME;
  features_bitmask |= BX_CPU_DEBUG_EXTENSIONS;
  features_bitmask |= BX_CPU_PSE;

#if BX_CPU_LEVEL >= 6
  features_bitmask |= BX_CPU_PAE;
  features_bitmask |= BX_CPU_PGE;
#if BX_PHY_ADDRESS_LONG
  features_bitmask |= BX_CPU_PSE36;
#endif
  features_bitmask |= BX_CPU_MTRR;
  features_bitmask |= BX_CPU_PAT;

  static bx_bool smep_enabled = SIM->get_param_bool(BXPN_CPUID_SMEP)->get();
  if (smep_enabled)
    features_bitmask |= BX_CPU_SMEP;

#if BX_SUPPORT_X86_64
  static bx_bool x86_64_enabled = SIM->get_param_bool(BXPN_CPUID_X86_64)->get();
  if (x86_64_enabled) {
    features_bitmask |= BX_CPU_LONG_MODE | BX_CPU_FFXSR | BX_CPU_NX;

    static bx_bool pcid_enabled = SIM->get_param_bool(BXPN_CPUID_PCID)->get();
    if (pcid_enabled)
      features_bitmask |= BX_CPU_PCID;

    static bx_bool xlarge_pages = SIM->get_param_bool(BXPN_CPUID_1G_PAGES)->get();
    if (xlarge_pages)
      features_bitmask |= BX_CPU_1G_PAGES;
  }
#endif

#endif // CPU_LEVEL >= 6

#endif // CPU_LEVEL >= 5

  this->cpu_extensions_bitmask = features_bitmask;
}