示例#1
0
int gic_init(void)
{
	mmio_write32(GICC_BASE + GICC_CTLR, GICC_CTLR_GRPEN1);
	mmio_write32(GICC_BASE + GICC_PMR, GICC_PMR_DEFAULT);

	return 0;
}
示例#2
0
文件: e1000.c 项目: scyphus/aos
/*
 * Setup TX descriptor
 */
int
e1000_setup_tx_desc(struct e1000_device *dev)
{
    struct e1000_tx_desc *txdesc;
    int i;

    dev->tx_tail = 0;
    dev->tx_bufsz = 128;

    /* ToDo: 16 bytes for alignment */
    dev->tx_base = (u64)kmalloc(dev->tx_bufsz
                                   * sizeof(struct e1000_tx_desc) + 16);
    for ( i = 0; i < dev->tx_bufsz; i++ ) {
        txdesc = (struct e1000_tx_desc *)(dev->tx_base
                                          + i * sizeof(struct e1000_tx_desc));
        txdesc->address = (u64)kmalloc(8192 + 16);
        txdesc->cmd = 0;
        txdesc->sta = 0;
        txdesc->cso = 0;
        txdesc->css = 0;
        txdesc->special = 0;
    }
    mmio_write32(dev->mmio, E1000_REG_TDBAH, dev->tx_base >> 32);
    mmio_write32(dev->mmio, E1000_REG_TDBAL, dev->tx_base & 0xffffffff);
    mmio_write32(dev->mmio, E1000_REG_TDLEN,
                 dev->tx_bufsz * sizeof(struct e1000_tx_desc));
    mmio_write32(dev->mmio, E1000_REG_TDH, 0);
    mmio_write32(dev->mmio, E1000_REG_TDT, 0);
    mmio_write32(dev->mmio, E1000_REG_TCTL,
                 E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_MULR);

    return 0;
}
示例#3
0
static void send_xapic_ipi(u32 apic_id, u32 icr_lo)
{
	while (read_xapic(APIC_REG_ICR) & APIC_ICR_DS_PENDING)
		cpu_relax();
	mmio_write32(xapic_page + XAPIC_REG(APIC_REG_ICR_HI),
		     apic_id << XAPIC_DEST_SHIFT);
	mmio_write32(xapic_page + XAPIC_REG(APIC_REG_ICR), icr_lo);
}
示例#4
0
static void ioapic_reg_write(struct phys_ioapic *ioapic, unsigned int reg,
			     u32 value)
{
	spin_lock(&ioapic->lock);

	mmio_write32(ioapic->reg_base + IOAPIC_REG_INDEX, reg);
	mmio_write32(ioapic->reg_base + IOAPIC_REG_DATA, value);

	spin_unlock(&ioapic->lock);
}
示例#5
0
int vtd_cell_init(struct cell *cell)
{
	struct jailhouse_cell_desc *config = cell->config;
	const struct jailhouse_memory *mem =
		jailhouse_cell_mem_regions(config);
	const struct jailhouse_pci_device *dev =
		jailhouse_cell_pci_devices(cell->config);
	void *reg_base = dmar_reg_base;
	int n, err;

	// HACK for QEMU
	if (dmar_units == 0)
		return 0;

	if (cell->id >= dmar_num_did)
		return -ERANGE;

	cell->vtd.pg_structs.root_paging = vtd_paging;
	cell->vtd.pg_structs.root_table = page_alloc(&mem_pool, 1);
	if (!cell->vtd.pg_structs.root_table)
		return -ENOMEM;

	for (n = 0; n < config->num_memory_regions; n++, mem++) {
		err = vtd_map_memory_region(cell, mem);
		if (err)
			/* FIXME: release vtd.pg_structs.root_table */
			return err;
	}

	for (n = 0; n < config->num_pci_devices; n++)
		if (!vtd_add_device_to_cell(cell, &dev[n]))
			/* FIXME: release vtd.pg_structs.root_table,
			 * revert device additions*/
			return -ENOMEM;

	if (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES))
		for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) {
			mmio_write64(reg_base + VTD_RTADDR_REG,
				     page_map_hvirt2phys(root_entry_table));
			mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_SRTP);
			while (!(mmio_read32(reg_base + VTD_GSTS_REG) &
				 VTD_GSTS_SRTP))
				cpu_relax();

			vtd_flush_dmar_caches(reg_base, VTD_CCMD_CIRG_GLOBAL,
					      VTD_IOTLB_IIRG_GLOBAL);

			mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_TE);
			while (!(mmio_read32(reg_base + VTD_GSTS_REG) &
				 VTD_GSTS_TES))
				cpu_relax();
		}

	return 0;
}
示例#6
0
bool vmxnet3::check_version()
{
  uint32_t maj_ver = mmio_read32(this->iobase + 0x0);
  uint32_t min_ver = mmio_read32(this->iobase + 0x8);
  INFO("vmxnet3", "Version %d.%d", maj_ver, min_ver);

  // select version we support
  mmio_write32(this->iobase + 0x0, 0x1);
  mmio_write32(this->iobase + 0x8, 0x1);
  return true;
}
示例#7
0
void ioapic_pin_set_vector(unsigned int pin,
			   enum ioapic_trigger_mode trigger_mode,
			   unsigned int vector)
{
	mmio_write32(IOAPIC_BASE + IOAPIC_REG_INDEX,
		     IOAPIC_REDIR_TBL_START + pin * 2 + 1);
	mmio_write32(IOAPIC_BASE + IOAPIC_REG_DATA, cpu_id() << (56 - 32));

	mmio_write32(IOAPIC_BASE + IOAPIC_REG_INDEX,
		     IOAPIC_REDIR_TBL_START + pin * 2);
	mmio_write32(IOAPIC_BASE + IOAPIC_REG_DATA, trigger_mode | vector);
}
示例#8
0
文件: e1000.c 项目: scyphus/aos
int
e1000_recvpkt(u8 *pkt, u32 len, struct netdev *netdev)
{
    u32 rdh;
    struct e1000_device *dev;
    int rx_que;
    struct e1000_rx_desc *rxdesc;
    int ret;

    dev = (struct e1000_device *)netdev->vendor;
    rdh = mmio_read32(dev->mmio, E1000_REG_RDH);

    rx_que = (dev->rx_bufsz - dev->rx_tail + rdh) % dev->rx_bufsz;
    if ( rx_que > 0 ) {
        /* Check the head of RX ring buffer */
        rxdesc = (struct e1000_rx_desc *)
            (dev->rx_base + (dev->rx_tail % dev->rx_bufsz)
             * sizeof(struct e1000_rx_desc));
        ret = len < rxdesc->length ? len : rxdesc->length;
        kmemcpy(pkt, (void *)rxdesc->address, ret);

        mmio_write32(dev->mmio, E1000_REG_RDT, dev->rx_tail);
        dev->rx_tail = (dev->rx_tail + 1) % dev->rx_bufsz;

        return ret;
    }

    return -1;
}
示例#9
0
void bootvid_putch(char c)
{
	/* Wait for the transmitter to be idle */
	while(!mmio_read32(AUX_MU_LSR_REG) & 0x20);
	/* Write the data */
	mmio_write32(AUX_MU_IO_REG, (unsigned int)c);
}
示例#10
0
void serial_putchar(sio_fd_t fd, uint32_t c)
{
  uint32_t *uart_tx = fd + UART_TX;
  while(!serial_ready(fd))
    ; /* Wait for empty transmit */
  mmio_write32(uart_tx, c);
}
示例#11
0
文件: e1000.c 项目: scyphus/aos
int
e1000_sendpkt(const u8 *pkt, u32 len, struct netdev *netdev)
{
    u32 tdh;
    struct e1000_device *dev;
    int tx_avl;
    struct e1000_tx_desc *txdesc;

    dev = (struct e1000_device *)netdev->vendor;
    tdh = mmio_read32(dev->mmio, E1000_REG_TDH);

    tx_avl = dev->tx_bufsz - ((dev->tx_bufsz - tdh + dev->tx_tail)
                              % dev->tx_bufsz);

    if ( tx_avl > 0 ) {
        /* Check the head of TX ring buffer */
        txdesc = (struct e1000_tx_desc *)
            (dev->tx_base + (dev->tx_tail % dev->tx_bufsz)
             * sizeof(struct e1000_tx_desc));
        kmemcpy((void *)txdesc->address, pkt, len);
        txdesc->length = len;
        txdesc->sta = 0;
        txdesc->css = 0;
        txdesc->cso = 0;
        txdesc->special = 0;
        txdesc->cmd = (1<<3) | (1<<1) | 1;

        dev->tx_tail = (dev->tx_tail + 1) % dev->tx_bufsz;
        mmio_write32(dev->mmio, E1000_REG_TDT, dev->tx_tail);

        return len;
    }

    return -1;
}
示例#12
0
static void packet_reception_done(void)
{
	unsigned int idx = rx_idx;

	rx_ring[idx].dd = 0;
	rx_idx = (rx_idx + 1) % RX_DESCRIPTORS;
	mmio_write32(mmiobar + E1000_REG_RDT, idx);
}
示例#13
0
void uart_putc(unsigned int ch)
{
	while(1) {
		if (mmio_read32(AUX_MU_LSR_REG) & 0x20)
			break;
	}
	mmio_write32(AUX_MU_IO_REG, ch);
}
示例#14
0
static void vtd_init_fault_nmi(void)
{
	void *reg_base = dmar_reg_base;
	struct per_cpu *cpu_data;
	unsigned int apic_id;
	int i;

	/* Assume that at least one bit is set somewhere as
	* we don't support configurations when Linux is left with no CPUs */
	for (i = 0; root_cell.cpu_set->bitmap[i] == 0; i++)
		/* Empty loop */;
	cpu_data = per_cpu(ffsl(root_cell.cpu_set->bitmap[i]));
	apic_id = cpu_data->apic_id;

	/* Save this value globally to avoid multiple reporting
	 * of the same case from different CPUs*/
	fault_reporting_cpu_id = cpu_data->cpu_id;

	for (i = 0; i < dmar_units; i++, reg_base += PAGE_SIZE) {
		/* Mask events*/
		mmio_write32_field(reg_base+VTD_FECTL_REG, VTD_FECTL_IM_MASK,
				   VTD_FECTL_IM_SET);

		/* We use xAPIC mode. Hence, TRGM and LEVEL aren't required.
		 Set Delivery Mode to NMI */
		mmio_write32(reg_base + VTD_FEDATA_REG, APIC_MSI_DATA_DM_NMI);

		/* The vector information is ignored in the case of NMI,
		* hence there's no need to set that field.
		* Redirection mode is set to use physical address by default */
		mmio_write32(reg_base + VTD_FEADDR_REG,
			((apic_id << APIC_MSI_ADDR_DESTID_SHIFT) &
			 APIC_MSI_ADDR_DESTID_MASK) | APIC_MSI_ADDR_FIXED_VAL);

		/* APIC ID can exceed 8-bit value for x2APIC mode */
		if (using_x2apic)
			mmio_write32(reg_base + VTD_FEUADDR_REG, apic_id);

		/* Unmask events */
		mmio_write32_field(reg_base+VTD_FECTL_REG, VTD_FECTL_IM_MASK,
				   VTD_FECTL_IM_CLEAR);
	}
}
示例#15
0
void vtd_shutdown(void)
{
	void *reg_base = dmar_reg_base;
	unsigned int n;

	for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) {
		mmio_write32(reg_base + VTD_GCMD_REG, 0);
		while (mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)
			cpu_relax();
	}
}
示例#16
0
void pci_msix_set_vector(u16 bdf, unsigned int vector, u32 index)
{
	int cap = pci_find_cap(bdf, PCI_CAP_MSIX);
	unsigned int bar;
	u64 msix_table = 0;
	u32 addr;
	u16 ctrl;
	u32 table;

	if (cap < 0)
		return;
	ctrl = pci_read_config(bdf, cap + 2, 2);
	/* bounds check */
	if (index > (ctrl & 0x3ff))
		return;
	table = pci_read_config(bdf, cap + 4, 4);
	bar = (table & 7) * 4 + PCI_CFG_BAR;
	addr = pci_read_config(bdf, bar, 4);

	if ((addr & 6) == PCI_BAR_64BIT) {
		msix_table = pci_read_config(bdf, bar + 4, 4);
		msix_table <<= 32;
	}
	msix_table |= addr & ~0xf;
	msix_table += table & ~7;

	/* enable and mask */
	ctrl |= (MSIX_CTRL_ENABLE | MSIX_CTRL_FMASK);
	pci_write_config(bdf, cap + 2, ctrl, 2);

	msix_table += 16 * index;
	mmio_write32((u32 *)msix_table, 0xfee00000 | cpu_id() << 12);
	mmio_write32((u32 *)(msix_table + 4), 0);
	mmio_write32((u32 *)(msix_table + 8), vector);
	mmio_write32((u32 *)(msix_table + 12), 0);

	/* enable and unmask */
	ctrl &= ~MSIX_CTRL_FMASK;
	pci_write_config(bdf, cap + 2, ctrl, 2);
}
示例#17
0
static u32 ioapic_reg_read(struct phys_ioapic *ioapic, unsigned int reg)
{
	u32 value;

	spin_lock(&ioapic->lock);

	mmio_write32(ioapic->reg_base + IOAPIC_REG_INDEX, reg);
	value = mmio_read32(ioapic->reg_base + IOAPIC_REG_DATA);

	spin_unlock(&ioapic->lock);

	return value;
}
示例#18
0
/**
 * Write to PCI config space.
 * @param bdf		16-bit bus/device/function ID of target.
 * @param address	Config space access address.
 * @param value		Value to be written.
 * @param size		Access size (1, 2 or 4 bytes).
 *
 * @see pci_read_config
 */
void pci_write_config(u16 bdf, u16 address, u32 value, unsigned int size)
{
	void *mmcfg_addr = pci_get_device_mmcfg_base(bdf) + address;

	if (!pci_space || PCI_BUS(bdf) > end_bus)
		return arch_pci_write_config(bdf, address, value, size);

	if (size == 1)
		mmio_write8(mmcfg_addr, value);
	else if (size == 2)
		mmio_write16(mmcfg_addr, value);
	else
		mmio_write32(mmcfg_addr, value);
}
示例#19
0
static void send_packet(void *buffer, unsigned int size)
{
	unsigned int idx = tx_idx;

	memset(&tx_ring[idx], 0, sizeof(struct e1000_txd));
	tx_ring[idx].addr = (unsigned long)buffer;
	tx_ring[idx].len = size;
	tx_ring[idx].rs = 1;
	tx_ring[idx].ifcs = 1;
	tx_ring[idx].eop = 1;

	tx_idx = (tx_idx + 1) % TX_DESCRIPTORS;
	mmio_write32(mmiobar + E1000_REG_TDT, tx_idx);

	while (!tx_ring[idx].dd)
		cpu_relax();
}
示例#20
0
static int lua_mmio_write32(lua_State *L) {
    mmio_t *mmio;
    uint32_t value;
    uintptr_t offset;
    int ret;

    mmio = luaL_checkudata(L, 1, "periphery.MMIO");
    lua_mmio_checktype(L, 2, LUA_TNUMBER);
    lua_mmio_checktype(L, 3, LUA_TNUMBER);

    offset = lua_tounsigned(L, 2);
    value = lua_tounsigned(L, 3);

    if ((ret = mmio_write32(mmio, offset, value)) < 0)
        return lua_mmio_error(L, ret, mmio_errno(mmio), "Error: %s", mmio_errmsg(mmio));

    return 0;
}
示例#21
0
sio_fd_t serial_open(void)
{
	unsigned divisor = DIV_ROUND_CLOSEST(UART_CLK, 16 * UART_BAUDRATE);
  sio_fd_t uart_base = (void*)UART7_BASE;

  mmio_write32(UART_CLOCK_REG,
      mmio_read32(UART_CLOCK_REG) |
      (1 << UART_GATE_NR));

	mmio_write32(uart_base + UART_LCR, UART_LCR_8N1);
	mmio_write32(uart_base + UART_IER, 0); /* IRQ off */
	mmio_write32(uart_base + UART_FCR, 7); /* FIFO reset and enable */
	mmio_write32(uart_base + UART_MCR, 7); /* DTR + RTS on */
	/* Set Divisor Latch Access Bit */
	mmio_write32(uart_base + UART_LCR, UART_LCR_DLAB | mmio_read32(uart_base + UART_LCR));
	/* Program baudrate */
	mmio_write32(uart_base + UART_DLL, 0xff & divisor); /* Divisor Latch Low Register */
	mmio_write32(uart_base + UART_DLM, 0xff & (divisor >> 8)); /* Divisor Latch High Register */
	mmio_write32(uart_base + UART_LCR, ~UART_LCR_DLAB & mmio_read32(uart_base + UART_LCR));
  return uart_base;
}
示例#22
0
文件: e1000.c 项目: scyphus/aos
/*
 * Setup RX descriptor
 */
int
e1000_setup_rx_desc(struct e1000_device *dev)
{
    struct e1000_rx_desc *rxdesc;
    int i;

    dev->rx_tail = 0;
    dev->rx_bufsz = 128;

    /* Cache */
    dev->rx_head_cache = 0;
    dev->tx_head_cache = 0;

    /* ToDo: 16 bytes for alignment */
    dev->rx_mem_base = kmalloc(dev->rx_bufsz * sizeof(struct e1000_rx_desc) + 16);
    if ( 0 == dev->rx_mem_base ) {
        kfree(dev);
        return NULL;
    }
    dev->rx_base = ((u64)dev->rx_mem_base + 0xf) & ~(u64)0xf;
    for ( i = 0; i < dev->rx_bufsz; i++ ) {
        rxdesc = (struct e1000_rx_desc *)(dev->rx_base
                                          + i * sizeof(struct e1000_rx_desc));
        rxdesc->address = (u64)kmalloc(8192 + 16);
        /* FIXME: Memory check */
        rxdesc->checksum = 0;
        rxdesc->status = 0;
        rxdesc->errors = 0;
        rxdesc->special = 0;
    }
    mmio_write32(dev->mmio, E1000_REG_RDBAH, dev->rx_base >> 32);
    mmio_write32(dev->mmio, E1000_REG_RDBAL, dev->rx_base & 0xffffffff);
    mmio_write32(dev->mmio, E1000_REG_RDLEN,
                 dev->rx_bufsz * sizeof(struct e1000_rx_desc));
    mmio_write32(dev->mmio, E1000_REG_RDH, 0);
    /* RDT must be larger than 0 for the initial value to receive the first
       packet but I don't know why */
    mmio_write32(dev->mmio, E1000_REG_RDT, dev->rx_bufsz - 1);
    mmio_write32(dev->mmio, E1000_REG_RCTL,
                 E1000_RCTL_SBP | E1000_RCTL_UPE
                 | E1000_RCTL_MPE | E1000_RCTL_LPE | E1000_RCTL_BAM
                 | E1000_RCTL_BSIZE_8192 | E1000_RCTL_SECRC);
    /* Enable */
    mmio_write32(dev->mmio, E1000_REG_RCTL,
                 mmio_read32(dev->mmio, E1000_REG_RCTL) | E1000_RCTL_EN);

    return 0;
}
示例#23
0
void mmio_perform_access(void *base, struct mmio_access *mmio)
{
	void *addr = base + mmio->address;

	if (mmio->is_write)
		switch (mmio->size) {
		case 1:
			mmio_write8(addr, mmio->value);
			break;
		case 2:
			mmio_write16(addr, mmio->value);
			break;
		case 4:
			mmio_write32(addr, mmio->value);
			break;
#if BITS_PER_LONG == 64
		case 8:
			mmio_write64(addr, mmio->value);
			break;
#endif
		}
	else
		switch (mmio->size) {
		case 1:
			mmio->value = mmio_read8(addr);
			break;
		case 2:
			mmio->value = mmio_read16(addr);
			break;
		case 4:
			mmio->value = mmio_read32(addr);
			break;
#if BITS_PER_LONG == 64
		case 8:
			mmio->value = mmio_read64(addr);
			break;
#endif
		}
}
示例#24
0
void intc_priv_setup_handler(int irq, void (*entry)(void), void **pdata,
		unsigned int level, void *data)
{
	unsigned long	handler_offset;
	uint32_t	intpr;

	/* Do not attempt to initialize the same irq twice */
	assert(*pdata == NULL);

	/* Level must be a number between 0 and 3 inclusive */
	assert(!(level & ~INTLEV_MASK));

	handler_offset = (unsigned long)entry - (unsigned long)_evba;

	/* The low-level handler must not be placed too far from EVBA */
	assert(!(handler_offset & ~HANDLER_OFFSET_MASK));

	intpr = handler_offset;
	intpr |= level << INTLEV_SHIFT;
	mmio_write32((void *)(INTC_BASE + 4 * irq), intpr);
	*pdata = data;
}
示例#25
0
static void uart_pl011_write(struct uart_chip *chip, char c)
{
	mmio_write32(chip->base + UARTDR, c);
}
示例#26
0
文件: e1000.c 项目: scyphus/aos
struct e1000_device *
e1000_init_hw(struct pci_device *pcidev)
{
    struct e1000_device *dev;
    u16 m16;
    u32 m32;
    int i;

    dev = kmalloc(sizeof(struct e1000_device));

    /* Assert */
    if ( 0x8086 != pcidev->vendor_id ) {
        kfree(dev);
        return NULL;
    }

    /* Read MMIO */
    dev->mmio = pci_read_mmio(pcidev->bus, pcidev->slot, pcidev->func);
    if ( 0 == dev->mmio ) {
        kfree(dev);
        return NULL;
    }

    /* Initialize */
    mmio_write32(dev->mmio, E1000_REG_IMC, 0xffffffff);
    mmio_write32(dev->mmio, E1000_REG_CTRL,
                 mmio_read32(dev->mmio, E1000_REG_CTRL)
                 | E1000_CTRL_RST);
    arch_busy_usleep(100);
    mmio_write32(dev->mmio, E1000_REG_CTRL,
                 mmio_read32(dev->mmio, E1000_REG_CTRL)
                 | E1000_CTRL_SLU);
    mmio_write32(dev->mmio, E1000_REG_CTRL_EXT,
                 mmio_read32(dev->mmio, E1000_REG_CTRL_EXT)
                 & ~E1000_CTRL_EXT_LINK_MODE_MASK);
#if 0
    mmio_write32(dev->mmio, E1000_REG_TXDCTL,
                 E1000_TXDCTL_GRAN_DESC
                 | (128 << E1000_TXDCTL_HTHRESH_SHIFT)
                 | (8 << E1000_TXDCTL_PTHRESH_SHIFT));
#endif

    switch ( pcidev->device_id ) {
    case E1000_PRO1000MT:
    case E1000_82545EM:
        /* Read MAC address */
        m16 = e1000_eeprom_read_8254x(dev->mmio, 0);
        dev->macaddr[0] = m16 & 0xff;
        dev->macaddr[1] = (m16 >> 8) & 0xff;
        m16 = e1000_eeprom_read_8254x(dev->mmio, 1);
        dev->macaddr[2] = m16 & 0xff;
        dev->macaddr[3] = (m16 >> 8) & 0xff;
        m16 = e1000_eeprom_read_8254x(dev->mmio, 2);
        dev->macaddr[4] = m16 & 0xff;
        dev->macaddr[5] = (m16 >> 8) & 0xff;
        break;

    case E1000_82541PI:
    case E1000_82573L:
        /* Read MAC address */
        m16 = e1000_eeprom_read(dev->mmio, 0);
        dev->macaddr[0] = m16 & 0xff;
        dev->macaddr[1] = (m16 >> 8) & 0xff;
        m16 = e1000_eeprom_read(dev->mmio, 1);
        dev->macaddr[2] = m16 & 0xff;
        dev->macaddr[3] = (m16 >> 8) & 0xff;
        m16 = e1000_eeprom_read(dev->mmio, 2);
        dev->macaddr[4] = m16 & 0xff;
        dev->macaddr[5] = (m16 >> 8) & 0xff;
        break;

    case E1000_82567LM:
    case E1000_82577LM:
    case E1000_82579LM:
        /* Read MAC address */
        m32 = mmio_read32(dev->mmio, E1000_REG_RAL);
        dev->macaddr[0] = m32 & 0xff;
        dev->macaddr[1] = (m32 >> 8) & 0xff;
        dev->macaddr[2] = (m32 >> 16) & 0xff;
        dev->macaddr[3] = (m32 >> 24) & 0xff;
        m32 = mmio_read32(dev->mmio, E1000_REG_RAH);
        dev->macaddr[4] = m32 & 0xff;
        dev->macaddr[5] = (m32 >> 8) & 0xff;
        break;
    }

    /* Link up */
    mmio_write32(dev->mmio, E1000_REG_CTRL,
                 mmio_read32(dev->mmio, E1000_REG_CTRL)
                 | E1000_CTRL_SLU | E1000_CTRL_VME);

    /* Multicast array table */
    for ( i = 0; i < 128; i++ ) {
        mmio_write32(dev->mmio, E1000_REG_MTA + i * 4, 0);
    }

    /* Start TX/RX */
    e1000_setup_rx_desc(dev);
    e1000_setup_tx_desc(dev);

    /* Store the parent device information */
    dev->pci_device = pcidev;

    /* Enable interrupt (REG_IMS <- 0x1F6DC, then read REG_ICR ) */
    mmio_write32(dev->mmio, E1000_REG_IMS, 0x908e);
    (void)mmio_read32(dev->mmio, E1000_REG_ICR);
    /* Register IRQ handler */
    register_irq_handler((((pcidev->intr_pin -1) + pcidev->slot) % 4) + 0x10,
                         &e1000_irq_handler, dev);
#if 0
    kprintf("PCI: %x %x %x %x %x\r\n", pcidev->intr_pin, pcidev->intr_line,
            (((pcidev->intr_pin -1) + pcidev->slot) % 4) + 1,
            mmio_read32(dev->mmio, E1000_REG_IMS),
            mmio_read32(dev->mmio, E1000_REG_ICR));
#endif
    /* http://msdn.microsoft.com/en-us/library/windows/hardware/ff538017(v=vs.85).aspx */
    //mmio_write32(dev->mmio, E1000_REG_ICS, 0x908e);

    return dev;
}
示例#27
0
文件: pci.c 项目: berte/jailhouse
static struct apic_irq_message
pci_translate_msi_vector(struct pci_device *device, unsigned int vector,
			 unsigned int legacy_vectors, union x86_msi_vector msi)
{
	struct apic_irq_message irq_msg = { .valid = 0 };
	unsigned int idx;

	if (iommu_cell_emulates_ir(device->cell)) {
		if (!msi.remap.remapped)
			return irq_msg;

		idx = msi.remap.int_index | (msi.remap.int_index15 << 15);
		if (msi.remap.shv)
			idx += msi.remap.subhandle;
		return iommu_get_remapped_root_int(device->info->iommu,
						   device->info->bdf,
						   vector, idx);
	}

	irq_msg.vector = msi.native.vector;
	if (legacy_vectors > 1) {
		irq_msg.vector &= ~(legacy_vectors - 1);
		irq_msg.vector |= vector;
	}
	irq_msg.delivery_mode = msi.native.delivery_mode;
	irq_msg.level_triggered = 0;
	irq_msg.dest_logical = msi.native.dest_logical;
	irq_msg.redir_hint = msi.native.redir_hint;
	irq_msg.valid = 1;
	irq_msg.destination = msi.native.destination;

	return irq_msg;
}

void arch_pci_suppress_msi(struct pci_device *device,
			   const struct jailhouse_pci_capability *cap)
{
	unsigned int n, vectors = pci_enabled_msi_vectors(device);
	const struct jailhouse_pci_device *info = device->info;
	struct apic_irq_message irq_msg;
	union x86_msi_vector msi = {
		.native.dest_logical = 1,
		.native.redir_hint = 1,
		.native.address = MSI_ADDRESS_VALUE,
	};

	if (!(pci_read_config(info->bdf, PCI_CFG_COMMAND, 2) & PCI_CMD_MASTER))
		return;

	/*
	 * Disable delivery by setting no destination CPU bit in logical
	 * addressing mode.
	 */
	if (info->msi_64bits)
		pci_write_config(info->bdf, cap->start + 8, 0, 4);
	pci_write_config(info->bdf, cap->start + 4, (u32)msi.raw.address, 4);

	/*
	 * Inject MSI vectors to avoid losing events while suppressed.
	 * Linux can handle rare spurious interrupts.
	 */
	msi = pci_get_x86_msi_vector(device);
	for (n = 0; n < vectors; n++) {
		irq_msg = pci_translate_msi_vector(device, n, vectors, msi);
		apic_send_irq(irq_msg);
	}
}

static u32 pci_get_x86_msi_remap_address(unsigned int index)
{
	union x86_msi_vector msi = {
		.remap.int_index15 = index >> 15,
		.remap.shv = 1,
		.remap.remapped = 1,
		.remap.int_index = index,
		.remap.address = MSI_ADDRESS_VALUE,
	};

	return (u32)msi.raw.address;
}

int arch_pci_update_msi(struct pci_device *device,
			const struct jailhouse_pci_capability *cap)
{
	unsigned int n, vectors = pci_enabled_msi_vectors(device);
	union x86_msi_vector msi = pci_get_x86_msi_vector(device);
	const struct jailhouse_pci_device *info = device->info;
	struct apic_irq_message irq_msg;
	u16 bdf = info->bdf;
	int result = 0;

	if (vectors == 0)
		return 0;

	for (n = 0; n < vectors; n++) {
		irq_msg = pci_translate_msi_vector(device, n, vectors, msi);
		result = iommu_map_interrupt(device->cell, bdf, n, irq_msg);
		// HACK for QEMU
		if (result == -ENOSYS) {
			for (n = 1; n < (info->msi_64bits ? 4 : 3); n++)
				pci_write_config(bdf, cap->start + n * 4,
					device->msi_registers.raw[n], 4);
			return 0;
		}
		if (result < 0)
			return result;
	}

	/* set result to the base index again */
	result -= vectors - 1;

	pci_write_config(bdf, cap->start + (info->msi_64bits ? 12 : 8), 0, 2);

	if (info->msi_64bits)
		pci_write_config(bdf, cap->start + 8, 0, 4);
	pci_write_config(bdf, cap->start + 4,
			 pci_get_x86_msi_remap_address(result), 4);

	return 0;
}

int arch_pci_update_msix_vector(struct pci_device *device, unsigned int index)
{
	union x86_msi_vector msi = {
		.raw.address = device->msix_vectors[index].field.address,
		.raw.data = device->msix_vectors[index].field.data,
	};
	struct apic_irq_message irq_msg;
	int result;

	if (!device->msix_registers.field.enable)
		return 0;

	irq_msg = pci_translate_msi_vector(device, index, 0, msi);
	result = iommu_map_interrupt(device->cell, device->info->bdf, index,
				   irq_msg);
	// HACK for QEMU
	if (result == -ENOSYS) {
		mmio_write64(&device->msix_table[index].field.address,
			     device->msix_vectors[index].field.address);
		mmio_write32(&device->msix_table[index].field.data,
			     device->msix_vectors[index].field.data);
		return 0;
	}
	if (result < 0)
		return result;

	mmio_write64(&device->msix_table[index].field.address,
		     pci_get_x86_msi_remap_address(result));
	mmio_write32(&device->msix_table[index].field.data, 0);

	return 0;
}
示例#28
0
void inmate_main(void)
{
	enum { ROLE_UNDEFINED, ROLE_CONTROLLER, ROLE_TARGET } role;
	unsigned long min = -1, max = 0, rtt;
	struct eth_header *rx_packet;
	unsigned long long start;
	bool first_round = true;
	unsigned int n;
	u32 eerd, val;
	u8 mac[6];
	u64 bar;
	int bdf;

	printk_uart_base = UART_BASE;

	bdf = pci_find_device(PCI_ID_ANY, PCI_ID_ANY, 0);
	if (bdf < 0) {
		printk("No device found!\n");
		return;
	}
	printk("Found %04x:%04x at %02x:%02x.%x\n",
	       pci_read_config(bdf, PCI_CFG_VENDOR_ID, 2),
	       pci_read_config(bdf, PCI_CFG_DEVICE_ID, 2),
	       bdf >> 8, (bdf >> 3) & 0x1f, bdf & 0x3);

	bar = pci_read_config(bdf, PCI_CFG_BAR, 4);
	if ((bar & 0x6) == 0x4)
		bar |= (u64)pci_read_config(bdf, PCI_CFG_BAR + 4, 4) << 32;
	mmiobar = (void *)(bar & ~0xfUL);
	map_range(mmiobar, 128 * 1024, MAP_UNCACHED);
	printk("MMIO register BAR at %p\n", mmiobar);

	pci_write_config(bdf, PCI_CFG_COMMAND,
			 PCI_CMD_MEM | PCI_CMD_MASTER, 2);

	mmio_write32(mmiobar + E1000_REG_CTRL, E1000_CTRL_RST);
	delay_us(20000);

	val = mmio_read32(mmiobar + E1000_REG_CTRL);
	val &= ~(E1000_CTRL_LRST | E1000_CTRL_FRCSPD);
	val |= E1000_CTRL_ASDE | E1000_CTRL_SLU;
	mmio_write32(mmiobar + E1000_REG_CTRL, val);
	printk("Reset done, waiting for link...");

	while (!(mmio_read32(mmiobar + E1000_REG_STATUS) & E1000_STATUS_LU))
		cpu_relax();
	printk(" ok\n");

	if (mmio_read32(mmiobar + E1000_REG_RAH) & E1000_RAH_AV) {
		*(u32 *)mac = mmio_read32(mmiobar + E1000_REG_RAL);
		*(u16 *)&mac[4] = mmio_read32(mmiobar + E1000_REG_RAH);
	} else {
		for (n = 0; n < 3; n++) {
			mmio_write32(mmiobar + E1000_REG_EERD,
				     E1000_EERD_START |
				     (n << E1000_EERD_ADDR_SHIFT));
			do {
				eerd = mmio_read32(mmiobar + E1000_REG_EERD);
				cpu_relax();
			} while (!(eerd & E1000_EERD_DONE));
			mac[n * 2] = (u8)(eerd >> E1000_EERD_DATA_SHIFT);
			mac[n * 2 + 1] =
				(u8)(eerd >> (E1000_EERD_DATA_SHIFT + 8));
		}
	}

	printk("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
	       mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);

	mmio_write32(mmiobar + E1000_REG_RAL, *(u32 *)mac);
	mmio_write32(mmiobar + E1000_REG_RAH, *(u16 *)&mac[4] | E1000_RAH_AV);

	for (n = 0; n < RX_DESCRIPTORS; n++)
		rx_ring[n].addr = (unsigned long)&buffer[n * RX_BUFFER_SIZE];
	mmio_write32(mmiobar + E1000_REG_RDBAL, (unsigned long)&rx_ring);
	mmio_write32(mmiobar + E1000_REG_RDBAH, 0);
	mmio_write32(mmiobar + E1000_REG_RDLEN, sizeof(rx_ring));
	mmio_write32(mmiobar + E1000_REG_RDH, 0);
	mmio_write32(mmiobar + E1000_REG_RDT, RX_DESCRIPTORS - 1);

	val = mmio_read32(mmiobar + E1000_REG_RCTL);
	val |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_BSIZE_2048 |
		E1000_RCTL_SECRC;
	mmio_write32(mmiobar + E1000_REG_RCTL, val);

	mmio_write32(mmiobar + E1000_REG_TDBAL, (unsigned long)&tx_ring);
	mmio_write32(mmiobar + E1000_REG_TDBAH, 0);
	mmio_write32(mmiobar + E1000_REG_TDLEN, sizeof(tx_ring));
	mmio_write32(mmiobar + E1000_REG_TDH, 0);
	mmio_write32(mmiobar + E1000_REG_TDT, 0);

	val = mmio_read32(mmiobar + E1000_REG_TCTL);
	val |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_CT_DEF |
		E1000_TCTL_COLD_DEF;
	mmio_write32(mmiobar + E1000_REG_TCTL, val);
	mmio_write32(mmiobar + E1000_REG_TIPG,
		     E1000_TIPG_IPGT_DEF | E1000_TIPG_IPGR1_DEF |
		     E1000_TIPG_IPGR2_DEF);

	role = ROLE_UNDEFINED;

	memcpy(tx_packet.src, mac, sizeof(tx_packet.src));
	memset(tx_packet.dst, 0xff, sizeof(tx_packet.dst));
	tx_packet.type = FRAME_TYPE_ANNOUNCE;
	send_packet(&tx_packet, sizeof(tx_packet));

	start = pm_timer_read();
	while (pm_timer_read() - start < NS_PER_MSEC &&
	       role == ROLE_UNDEFINED) {
		rx_packet = packet_received();
		if (!rx_packet)
			continue;

		if (rx_packet->type == FRAME_TYPE_TARGET_ROLE) {
			role = ROLE_TARGET;
			memcpy(tx_packet.dst, rx_packet->src,
			       sizeof(tx_packet.dst));
		}
		packet_reception_done();
	}

	if (role == ROLE_UNDEFINED) {
		role = ROLE_CONTROLLER;
		printk("Waiting for peer\n");
		while (1) {
			rx_packet = packet_received();
			if (!rx_packet)
				continue;

			if (rx_packet->type == FRAME_TYPE_ANNOUNCE) {
				memcpy(tx_packet.dst, rx_packet->src,
				       sizeof(tx_packet.dst));
				packet_reception_done();

				tx_packet.type = FRAME_TYPE_TARGET_ROLE;
				send_packet(&tx_packet, sizeof(tx_packet));
				break;
			} else {
				packet_reception_done();
			}
		}
	}

	mmio_write32(mmiobar + E1000_REG_RCTL,
		     mmio_read32(mmiobar + E1000_REG_RCTL) & ~E1000_RCTL_BAM);

	if (role == ROLE_CONTROLLER) {
		printk("Running as controller\n");
		tx_packet.type = FRAME_TYPE_PING;
		while (1) {
			start = pm_timer_read();
			send_packet(&tx_packet, sizeof(tx_packet));

			do
				rx_packet = packet_received();
			while (!rx_packet ||
			       rx_packet->type != FRAME_TYPE_PONG);
			packet_reception_done();

			if (!first_round) {
				rtt = pm_timer_read() - start;
				if (rtt < min)
					min = rtt;
				if (rtt > max)
					max = rtt;
				printk("Received pong, RTT: %6ld ns, "
				       "min: %6ld ns, max: %6ld ns\n",
				       rtt, min, max);
			}
			first_round = false;
			delay_us(100000);
		}
	} else {
		printk("Running as target\n");
		tx_packet.type = FRAME_TYPE_PONG;
		while (1) {
			rx_packet = packet_received();
			if (!rx_packet || rx_packet->type != FRAME_TYPE_PING)
				continue;
			packet_reception_done();
			send_packet(&tx_packet, sizeof(tx_packet));
		}
	}
}
示例#29
0
static void write_xapic(unsigned int reg, u32 val)
{
	mmio_write32(xapic_page + XAPIC_REG(reg), val);
}
示例#30
0
void serial_irq_rx_enable(sio_fd_t fd)
{
  void *uart_ier = fd + UART_IER;
  mmio_write32(uart_ier, 5 | mmio_read32(uart_ier)); /* ERBFI + ELSI */
}