Esempio n. 1
0
/*
 * Look for an ACPI System Resource Affinity Table ("SRAT")
 */
static void
parse_srat(void *dummy)
{
	int error;

	if (resource_disabled("srat", 0))
		return;

	srat_physaddr = acpi_find_table(ACPI_SIG_SRAT);
	if (srat_physaddr == 0)
		return;

	/*
	 * Make a pass over the table to populate the cpus[] and
	 * mem_info[] tables.
	 */
	srat = acpi_map_table(srat_physaddr, ACPI_SIG_SRAT);
	error = 0;
	srat_walk_table(srat_parse_entry, &error);
	acpi_unmap_table(srat);
	srat = NULL;
	if (error || check_domains() != 0 || check_phys_avail() != 0) {
		srat_physaddr = 0;
		return;
	}

	renumber_domains();

	/* Point vm_phys at our memory affinity table. */
	mem_affinity = mem_info;
}
Esempio n. 2
0
/*
 * Look for an ACPI System Resource Affinity Table ("SRAT")
 */
static int
parse_srat(void)
{
	int error;

	if (resource_disabled("srat", 0))
		return (-1);

	srat_physaddr = acpi_find_table(ACPI_SIG_SRAT);
	if (srat_physaddr == 0)
		return (-1);

	/*
	 * Make a pass over the table to populate the cpus[] and
	 * mem_info[] tables.
	 */
	srat = acpi_map_table(srat_physaddr, ACPI_SIG_SRAT);
	error = 0;
	srat_walk_table(srat_parse_entry, &error);
	acpi_unmap_table(srat);
	srat = NULL;
	if (error || check_domains() != 0 || check_phys_avail() != 0 ||
	    renumber_domains() != 0) {
		srat_physaddr = 0;
		return (-1);
	}

#ifdef VM_NUMA_ALLOC
	/* Point vm_phys at our memory affinity table. */
	vm_ndomains = ndomain;
	mem_affinity = mem_info;
#endif

	return (0);
}
Esempio n. 3
0
static int
gic_v3_acpi_count_regions(device_t dev)
{
	struct gic_v3_softc *sc;
	ACPI_TABLE_MADT *madt;
	vm_paddr_t physaddr;

	sc = device_get_softc(dev);

	physaddr = acpi_find_table(ACPI_SIG_MADT);
	if (physaddr == 0)
		return (ENXIO);

	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
	if (madt == NULL) {
		device_printf(dev, "Unable to map the MADT\n");
		return (ENXIO);
	}

	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
	    madt_count_redistrib, sc);
	acpi_unmap_table(madt);

	return (sc->gic_redists.nregions > 0 ? 0 : ENXIO);
}
Esempio n. 4
0
/*
 * Look for an ACPI System Locality Distance Information Table ("SLIT")
 */
static int
parse_slit(void)
{

	if (resource_disabled("slit", 0)) {
		return (-1);
	}

	slit_physaddr = acpi_find_table(ACPI_SIG_SLIT);
	if (slit_physaddr == 0) {
		return (-1);
	}

	/*
	 * Make a pass over the table to populate the cpus[] and
	 * mem_info[] tables.
	 */
	slit = acpi_map_table(slit_physaddr, ACPI_SIG_SLIT);
	slit_parse_table(slit);
	acpi_unmap_table(slit);
	slit = NULL;

#ifdef VM_NUMA_ALLOC
	/* Tell the VM about it! */
	mem_locality = vm_locality_table;
#endif
	return (0);
}
Esempio n. 5
0
static bool
arm_gic_add_children(device_t dev)
{
	struct arm_gic_softc *sc = device_get_softc(dev);
	ACPI_TABLE_MADT *madt;
	vm_paddr_t physaddr;

	/* This should return a valid address as it did in gic_acpi_identify */
	physaddr = acpi_find_table(ACPI_SIG_MADT);
	if (physaddr == 0)
		return (false);

	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
	if (madt == NULL) {
		device_printf(dev, "gic: Unable to map the MADT\n");
		return (false);
	}

	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
	    madt_gicv2m_handler, sc);

	acpi_unmap_table(madt);

	return (true);
}
Esempio n. 6
0
/*
 * Look for an ACPI Multiple APIC Description Table ("APIC")
 */
static int
madt_probe(void)
{

	madt_physaddr = acpi_find_table(ACPI_SIG_MADT);
	if (madt_physaddr == 0)
		return (ENXIO);
	return (-50);
}
Esempio n. 7
0
static void
gic_v3_acpi_identify(driver_t *driver, device_t parent)
{
	struct madt_table_data madt_data;
	ACPI_TABLE_MADT *madt;
	vm_paddr_t physaddr;
	device_t dev;

	physaddr = acpi_find_table(ACPI_SIG_MADT);
	if (physaddr == 0)
		return;

	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
	if (madt == NULL) {
		device_printf(parent, "gic: Unable to map the MADT\n");
		return;
	}

	madt_data.parent = parent;
	madt_data.dist = NULL;
	madt_data.count = 0;

	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
	    madt_handler, &madt_data);
	if (madt_data.dist == NULL) {
		device_printf(parent,
		    "No gic interrupt or distributor table\n");
		goto out;
	}
	/* This is for the wrong GIC version */
	if (madt_data.dist->Version != ACPI_MADT_GIC_VERSION_V3)
		goto out;

	dev = BUS_ADD_CHILD(parent, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE,
	    "gic", -1);
	if (dev == NULL) {
		device_printf(parent, "add gic child failed\n");
		goto out;
	}

	/* Add the MADT data */
	BUS_SET_RESOURCE(parent, dev, SYS_RES_MEMORY, 0,
	    madt_data.dist->BaseAddress, 128 * 1024);

	madt_data.dev = dev;
	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
	    rdist_map, &madt_data);

	acpi_set_private(dev, (void *)(uintptr_t)madt_data.dist->Version);

out:
	acpi_unmap_table(madt);
}
Esempio n. 8
0
/*
 * Look for an ACPI System Resource Affinity Table ("SRAT"),
 * allocate space for cpu information, and initialize globals.
 */
int
acpi_pxm_init(int ncpus, vm_paddr_t maxphys)
{
	unsigned int idx, size;
	vm_paddr_t addr;

	if (resource_disabled("srat", 0))
		return (-1);

	max_cpus = ncpus;
	last_cpu = -1;
	maxphyaddr = maxphys;
	srat_physaddr = acpi_find_table(ACPI_SIG_SRAT);
	if (srat_physaddr == 0)
		return (-1);

	/*
	 * Allocate data structure:
	 *
	 * Find the last physical memory region and steal some memory from
	 * it. This is done because at this point in the boot process
	 * malloc is still not usable.
	 */
	for (idx = 0; phys_avail[idx + 1] != 0; idx += 2);
	KASSERT(idx != 0, ("phys_avail is empty!"));
	idx -= 2;

	size =  sizeof(*cpus) * max_cpus;
	addr = trunc_page(phys_avail[idx + 1] - size);
	KASSERT(addr >= phys_avail[idx],
	    ("Not enough memory for SRAT table items"));
	phys_avail[idx + 1] = addr - 1;

	/*
	 * We cannot rely on PHYS_TO_DMAP because this code is also used in
	 * i386, so use pmap_mapbios to map the memory, this will end up using
	 * the default memory attribute (WB), and the DMAP when available.
	 */
	cpus = (struct cpu_info *)pmap_mapbios(addr, size);
	bzero(cpus, size);
	return (0);
}
Esempio n. 9
0
static void
gic_v3_acpi_bus_attach(device_t dev)
{
	ACPI_TABLE_MADT *madt;
	vm_paddr_t physaddr;

	physaddr = acpi_find_table(ACPI_SIG_MADT);
	if (physaddr == 0)
		return;

	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
	if (madt == NULL) {
		device_printf(dev, "Unable to map the MADT to add children\n");
		return;
	}

	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
	    gic_v3_add_children, dev);

	acpi_unmap_table(madt);

	bus_generic_attach(dev);
}
Esempio n. 10
0
void
hpet_init(void)
{
	// Try to find the HPET ACPI table.
	TRACE(("hpet_init: Looking for HPET...\n"));
	acpi_hpet *hpet = (acpi_hpet *)acpi_find_table(ACPI_HPET_SIGNATURE);

	if (hpet == NULL) {
		// No HPET table in the RSDT.
		// Since there are no other methods for finding it,
		// assume we don't have one.
		TRACE(("hpet_init: HPET not found.\n"));
		gKernelArgs.arch_args.hpet_phys = 0;
		gKernelArgs.arch_args.hpet = NULL;
		return;
	}

	TRACE(("hpet_init: found HPET at 0x%" B_PRIx64 ".\n",
		hpet->hpet_address.address));
	gKernelArgs.arch_args.hpet_phys = hpet->hpet_address.address;
	gKernelArgs.arch_args.hpet = (void *)mmu_map_physical_memory(
		gKernelArgs.arch_args.hpet_phys, B_PAGE_SIZE, kDefaultPageFlags);
}
Esempio n. 11
0
/*
 * Look for an ACPI System Locality Distance Information Table ("SLIT")
 */
static int
parse_slit(void)
{

	if (resource_disabled("slit", 0)) {
		return (-1);
	}

	slit_physaddr = acpi_find_table(ACPI_SIG_SLIT);
	if (slit_physaddr == 0) {
		return (-1);
	}

	/*
	 * Make a pass over the table to populate the cpus[] and
	 * mem_info[] tables.
	 */
	slit = acpi_map_table(slit_physaddr, ACPI_SIG_SLIT);
	slit_parse_table(slit);
	acpi_unmap_table(slit);
	slit = NULL;

	return (0);
}
Esempio n. 12
0
int vtd_init(void)
{
	unsigned long offset, caps, sllps_caps = ~0UL;
	unsigned int pt_levels, num_did, n;
	const struct acpi_dmar_table *dmar;
	const struct acpi_dmar_drhd *drhd;
	void *reg_base = NULL;
	int err;

	dmar = (struct acpi_dmar_table *)acpi_find_table("DMAR", NULL);
	if (!dmar)
//		return -ENODEV;
		{ printk("WARNING: No VT-d support found!\n"); return 0; }

	if (sizeof(struct acpi_dmar_table) +
	    sizeof(struct acpi_dmar_drhd) > dmar->header.length)
		return -EIO;

	drhd = (struct acpi_dmar_drhd *)dmar->remap_structs;
	if (drhd->header.type != ACPI_DMAR_DRHD)
		return -EIO;

	offset = (void *)dmar->remap_structs - (void *)dmar;
	do {
		if (drhd->header.length < sizeof(struct acpi_dmar_drhd) ||
		    offset + drhd->header.length > dmar->header.length)
			return -EIO;

		/* TODO: support multiple segments */
		if (drhd->segment != 0)
			return -EIO;

		printk("Found DMAR @%p\n", drhd->register_base_addr);

		reg_base = page_alloc(&remap_pool, 1);
		if (!reg_base)
			return -ENOMEM;

		if (dmar_units == 0)
			dmar_reg_base = reg_base;
		else if (reg_base != dmar_reg_base + dmar_units * PAGE_SIZE)
			return -ENOMEM;

		err = page_map_create(&hv_paging_structs,
				      drhd->register_base_addr, PAGE_SIZE,
				      (unsigned long)reg_base,
				      PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
				      PAGE_MAP_NON_COHERENT);
		if (err)
			return err;

		caps = mmio_read64(reg_base + VTD_CAP_REG);
		if (caps & VTD_CAP_SAGAW39)
			pt_levels = 3;
		else if (caps & VTD_CAP_SAGAW48)
			pt_levels = 4;
		else
			return -EIO;
		sllps_caps &= caps;

		if (dmar_pt_levels > 0 && dmar_pt_levels != pt_levels)
			return -EIO;
		dmar_pt_levels = pt_levels;

		if (caps & VTD_CAP_CM)
			return -EIO;

		/* We only support IOTLB registers withing the first page. */
		if (vtd_iotlb_reg_base(reg_base) >= reg_base + PAGE_SIZE)
			return -EIO;

		if (mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)
			return -EBUSY;

		num_did = 1 << (4 + (caps & VTD_CAP_NUM_DID_MASK) * 2);
		if (num_did < dmar_num_did)
			dmar_num_did = num_did;

		dmar_units++;

		offset += drhd->header.length;
		drhd = (struct acpi_dmar_drhd *)
			(((void *)drhd) + drhd->header.length);

		err = vtd_init_fault_reporting(reg_base);
		if (err)
			return err;
	} while (offset < dmar->header.length &&
		 drhd->header.type == ACPI_DMAR_DRHD);

	vtd_init_fault_nmi();

	/*
	 * Derive vdt_paging from very similar x86_64_paging,
	 * replicating 0..3 for 4 levels and 1..3 for 3 levels.
	 */
	memcpy(vtd_paging, &x86_64_paging[4 - dmar_pt_levels],
	       sizeof(struct paging) * dmar_pt_levels);
	for (n = 0; n < dmar_pt_levels; n++)
		vtd_paging[n].set_next_pt = vtd_set_next_pt;
	if (!(sllps_caps & VTD_CAP_SLLPS1G))
		vtd_paging[dmar_pt_levels - 3].page_size = 0;
	if (!(sllps_caps & VTD_CAP_SLLPS2M))
		vtd_paging[dmar_pt_levels - 2].page_size = 0;

	return 0;
}
Esempio n. 13
0
/*
 * Initialize the local APIC on the BSP.
 */
static int
madt_setup_local(void)
{
	ACPI_TABLE_DMAR *dmartbl;
	vm_paddr_t dmartbl_physaddr;
	const char *reason;
	char *hw_vendor;
	u_int p[4];
	int user_x2apic;
	bool bios_x2apic;

	madt = pmap_mapbios(madt_physaddr, madt_length);
	if ((cpu_feature2 & CPUID2_X2APIC) != 0) {
		reason = NULL;

		/*
		 * Automatically detect several configurations where
		 * x2APIC mode is known to cause troubles.  User can
		 * override the setting with hw.x2apic_enable tunable.
		 */
		dmartbl_physaddr = acpi_find_table(ACPI_SIG_DMAR);
		if (dmartbl_physaddr != 0) {
			dmartbl = acpi_map_table(dmartbl_physaddr,
			    ACPI_SIG_DMAR);
			if ((dmartbl->Flags & ACPI_DMAR_X2APIC_OPT_OUT) != 0)
				reason = "by DMAR table";
			acpi_unmap_table(dmartbl);
		}
		if (vm_guest == VM_GUEST_VMWARE) {
			vmware_hvcall(VMW_HVCMD_GETVCPU_INFO, p);
			if ((p[0] & VMW_VCPUINFO_VCPU_RESERVED) != 0 ||
			    (p[0] & VMW_VCPUINFO_LEGACY_X2APIC) == 0)
				reason =
				    "inside VMWare without intr redirection";
		} else if (vm_guest == VM_GUEST_XEN) {
			reason = "due to running under XEN";
		} else if (vm_guest == VM_GUEST_NO &&
		    CPUID_TO_FAMILY(cpu_id) == 0x6 &&
		    CPUID_TO_MODEL(cpu_id) == 0x2a) {
			hw_vendor = kern_getenv("smbios.planar.maker");
			/*
			 * It seems that some Lenovo and ASUS
			 * SandyBridge-based notebook BIOSes have a
			 * bug which prevents booting AP in x2APIC
			 * mode.  Since the only way to detect mobile
			 * CPU is to check northbridge pci id, which
			 * cannot be done that early, disable x2APIC
			 * for all Lenovo and ASUS SandyBridge
			 * machines.
			 */
			if (hw_vendor != NULL) {
				if (!strcmp(hw_vendor, "LENOVO") ||
				    !strcmp(hw_vendor,
				    "ASUSTeK Computer Inc.")) {
					reason =
				    "for a suspected SandyBridge BIOS bug";
				}
				freeenv(hw_vendor);
			}
		}
		bios_x2apic = lapic_is_x2apic();
		if (reason != NULL && bios_x2apic) {
			if (bootverbose)
				printf("x2APIC should be disabled %s but "
				    "already enabled by BIOS; enabling.\n",
				     reason);
			reason = NULL;
		}
		if (reason == NULL)
			x2apic_mode = 1;
		else if (bootverbose)
			printf("x2APIC available but disabled %s\n", reason);
		user_x2apic = x2apic_mode;
		TUNABLE_INT_FETCH("hw.x2apic_enable", &user_x2apic);
		if (user_x2apic != x2apic_mode) {
			if (bios_x2apic && !user_x2apic)
				printf("x2APIC disabled by tunable and "
				    "enabled by BIOS; ignoring tunable.");
			else
				x2apic_mode = user_x2apic;
		}
	}

	lapic_init(madt->Address);
	printf("ACPI APIC Table: <%.*s %.*s>\n",
	    (int)sizeof(madt->Header.OemId), madt->Header.OemId,
	    (int)sizeof(madt->Header.OemTableId), madt->Header.OemTableId);

	/*
	 * We ignore 64-bit local APIC override entries.  Should we
	 * perhaps emit a warning here if we find one?
	 */
	return (0);
}
Esempio n. 14
0
int vtd_init(void)
{
	const struct acpi_dmar_table *dmar;
	const struct acpi_dmar_drhd *drhd;
	unsigned int pt_levels, num_did;
	void *reg_base = NULL;
	unsigned long offset;
	unsigned long caps;
	int err;

	dmar = (struct acpi_dmar_table *)acpi_find_table("DMAR", NULL);
	if (!dmar)
//		return -ENODEV;
		{ printk("WARNING: No VT-d support found!\n"); return 0; }

	if (sizeof(struct acpi_dmar_table) +
	    sizeof(struct acpi_dmar_drhd) > dmar->header.length)
		return -EIO;

	drhd = (struct acpi_dmar_drhd *)dmar->remap_structs;
	if (drhd->header.type != ACPI_DMAR_DRHD)
		return -EIO;

	offset = (void *)dmar->remap_structs - (void *)dmar;
	do {
		if (drhd->header.length < sizeof(struct acpi_dmar_drhd) ||
		    offset + drhd->header.length > dmar->header.length)
			return -EIO;

		/* TODO: support multiple segments */
		if (drhd->segment != 0)
			return -EIO;

		printk("Found DMAR @%p\n", drhd->register_base_addr);

		reg_base = page_alloc(&remap_pool, 1);
		if (!reg_base)
			return -ENOMEM;

		if (dmar_units == 0)
			dmar_reg_base = reg_base;
		else if (reg_base != dmar_reg_base + dmar_units * PAGE_SIZE)
			return -ENOMEM;

		err = page_map_create(hv_page_table, drhd->register_base_addr,
				      PAGE_SIZE, (unsigned long)reg_base,
				      PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
				      PAGE_DEFAULT_FLAGS, PAGE_DIR_LEVELS,
				      PAGE_MAP_NON_COHERENT);
		if (err)
			return err;

		caps = mmio_read64(reg_base + VTD_CAP_REG);
		if (caps & VTD_CAP_SAGAW39)
			pt_levels = 3;
		else if (caps & VTD_CAP_SAGAW48)
			pt_levels = 4;
		else
			return -EIO;

		if (dmar_pt_levels > 0 && dmar_pt_levels != pt_levels)
			return -EIO;
		dmar_pt_levels = pt_levels;

		if (caps & VTD_CAP_CM)
			return -EIO;

		/* We only support IOTLB registers withing the first page. */
		if (vtd_iotlb_reg_base(reg_base) >= reg_base + PAGE_SIZE)
			return -EIO;

		if (mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)
			return -EBUSY;

		num_did = 1 << (4 + (caps & VTD_CAP_NUM_DID_MASK) * 2);
		if (num_did < dmar_num_did)
			dmar_num_did = num_did;

		dmar_units++;

		offset += drhd->header.length;
		drhd = (struct acpi_dmar_drhd *)
			(((void *)drhd) + drhd->header.length);
	} while (offset < dmar->header.length &&
		 drhd->header.type == ACPI_DMAR_DRHD);

	return 0;
}
Esempio n. 15
0
static void
gic_acpi_identify(driver_t *driver, device_t parent)
{
	struct madt_table_data madt_data;
	ACPI_MADT_GENERIC_INTERRUPT *intr;
	ACPI_TABLE_MADT *madt;
	vm_paddr_t physaddr;
	device_t dev;
	int i;

	physaddr = acpi_find_table(ACPI_SIG_MADT);
	if (physaddr == 0)
		return;

	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
	if (madt == NULL) {
		device_printf(parent, "gic: Unable to map the MADT\n");
		return;
	}

	bzero(&madt_data, sizeof(madt_data));
	madt_data.parent = parent;
	madt_data.dist = NULL;

	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
	    madt_handler, &madt_data);

	/* Check the version of the GIC we have */
	switch (madt_data.dist->Version) {
	case ACPI_MADT_GIC_VERSION_NONE:
	case ACPI_MADT_GIC_VERSION_V1:
	case ACPI_MADT_GIC_VERSION_V2:
		break;
	default:
		goto out;
	}

	intr = NULL;
	for (i = 0; i < MAXCPU; i++) {
		if (madt_data.intr[i] != NULL) {
			if (intr == NULL) {
				intr = madt_data.intr[i];
			} else if (intr->BaseAddress !=
			    madt_data.intr[i]->BaseAddress) {
				device_printf(parent,
"gic: Not all CPU interfaces at the same address, this may fail\n");
			}
		}
	}
	if (intr == NULL) {
		device_printf(parent, "gic: No CPU interfaces found\n");
		goto out;
	}

	dev = BUS_ADD_CHILD(parent, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE,
	    "gic", -1);
	if (dev == NULL) {
		device_printf(parent, "add gic child failed\n");
		goto out;
	}

	BUS_SET_RESOURCE(parent, dev, SYS_RES_MEMORY, 0,
	    madt_data.dist->BaseAddress, 4 * 1024);
	BUS_SET_RESOURCE(parent, dev, SYS_RES_MEMORY, 1,
	    intr->BaseAddress, 4 * 1024);

	acpi_set_private(dev, (void *)(uintptr_t)madt_data.dist->Version);
out:
	acpi_unmap_table(madt);
}