Beispiel #1
0
static void __init set_vsmp_pv_ops(void)
{
	void __iomem *address;
	unsigned int cap, ctl, cfg;

	/* set vSMP magic bits to indicate vSMP capable kernel */
	cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
	address = early_ioremap(cfg, 8);
	cap = readl(address);
	ctl = readl(address + 4);
	printk(KERN_INFO "vSMP CTL: capabilities:0x%08x  control:0x%08x\n",
	       cap, ctl);
	if (cap & ctl & (1 << 4)) {
		/* Setup irq ops and turn on vSMP  IRQ fastpath handling */
		pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
		pv_irq_ops.irq_enable  = PV_CALLEE_SAVE(vsmp_irq_enable);
		pv_irq_ops.save_fl  = PV_CALLEE_SAVE(vsmp_save_fl);
		pv_irq_ops.restore_fl  = PV_CALLEE_SAVE(vsmp_restore_fl);
		pv_init_ops.patch = vsmp_patch;

		ctl &= ~(1 << 4);
		writel(ctl, address + 4);
		ctl = readl(address + 4);
		printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl);
	}

	early_iounmap(address, 8);
}
Beispiel #2
0
/*
 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
 * to map the target physical address. The problem is that set_fixmap()
 * provides a single page, and it is possible that the page is not
 * sufficient.
 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
 * i.e. until the next __va_range() call.
 *
 * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
 * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
 * count idx down while incrementing the phys address.
 */
char *__init __acpi_map_table(unsigned long phys, unsigned long size)
{

	if (!phys || !size)
		return NULL;

	return early_ioremap(phys, size);
}
/*
 * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function
 * and introduces section mismatch. So use __ref to make it calm.
 */
static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
{
	if (!phys || !size)
		return NULL;

	if (sfi_use_ioremap)
		return ioremap_cache(phys, size);
	else
		return early_ioremap(phys, size);
}
Beispiel #4
0
static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func)
{
	u64 val64, sz64, mask64;
	void __iomem *base;
	u32 val, sz;
	u8 byte;

	val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
	write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0);
	sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
	write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val);

	if (val == 0xffffffff || sz == 0xffffffff) {
		pr_notice("invalid mmio bar\n");
		return NULL;
	}

	val64	= val & PCI_BASE_ADDRESS_MEM_MASK;
	sz64	= sz & PCI_BASE_ADDRESS_MEM_MASK;
	mask64	= PCI_BASE_ADDRESS_MEM_MASK;

	if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
		val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
		write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0);
		sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
		write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val);

		val64	|= (u64)val << 32;
		sz64	|= (u64)sz << 32;
		mask64	|= ~0ULL << 32;
	}

	sz64 &= mask64;

	if (!sz64) {
		pr_notice("invalid mmio address\n");
		return NULL;
	}

	sz64 = 1ULL << __ffs64(sz64);

	/* Check if the mem space is enabled: */
	byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND);
	if (!(byte & PCI_COMMAND_MEMORY)) {
		byte |= PCI_COMMAND_MEMORY;
		write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte);
	}

	xdbc.xhci_start = val64;
	xdbc.xhci_length = sz64;
	base = early_ioremap(val64, sz64);

	return base;
}
Beispiel #5
0
/*
 * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function
 * and introduces section mismatch. So use __ref to make it calm.
 */
static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
{
	pr_emerg("Entering sfi_map_memory, phys = %llx, size = %d\n", phys, size);

	if (!phys || !size)
		return NULL;

	if (sfi_use_ioremap)
		return ioremap_cache(phys, size);
	else
		return early_ioremap(phys, size);
}
Beispiel #6
0
int __init
efi_setup_pcdp_console(char *cmdline)
{
	struct pcdp *pcdp;
	struct pcdp_uart *uart;
	struct pcdp_device *dev, *end;
	int i, serial = 0;
	int rc = -ENODEV;

	if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
		return -ENODEV;

	pcdp = early_ioremap(efi.hcdp, 4096);
	printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);

	if (strstr(cmdline, "console=hcdp")) {
		if (pcdp->rev < 3)
			serial = 1;
	} else if (strstr(cmdline, "console=")) {
		printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n");
		goto out;
	}

	if (pcdp->rev < 3 && efi_uart_console_only())
		serial = 1;

	for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) {
		if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) {
			if (uart->type == PCDP_CONSOLE_UART) {
				rc = setup_serial_console(uart);
				goto out;
			}
		}
	}

	end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length);
	for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts);
	     dev < end;
	     dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
		if (dev->flags & PCDP_PRIMARY_CONSOLE) {
			if (dev->type == PCDP_CONSOLE_VGA) {
				rc = setup_vga_console(dev);
				goto out;
			}
		}
	}

out:
	early_iounmap(pcdp, 4096);
	return rc;
}
Beispiel #7
0
static void __init reserve_setup_data(void)
{
	struct setup_data *data;
	unsigned long pa_data;
	char buf[32];

	if (boot_params.hdr.version < 0x0209)
		return;
	pa_data = boot_params.hdr.setup_data;
	while (pa_data) {
		data = early_ioremap(pa_data, sizeof(*data));
		sprintf(buf, "setup data %x", data->type);
		reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
		pa_data = data->next;
		early_iounmap(data, sizeof(*data));
	}
}
Beispiel #8
0
/**
 * Because of the size limitation of struct boot_params, only first
 * 128 E820 memory entries are passed to kernel via
 * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
 * linked list of struct setup_data, which is parsed here.
 */
void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data)
{
	u32 map_len;
	int entries;
	struct e820entry *extmap;

	entries = sdata->len / sizeof(struct e820entry);
	map_len = sdata->len + sizeof(struct setup_data);
	if (map_len > PAGE_SIZE)
		sdata = early_ioremap(pa_data, map_len);
	extmap = (struct e820entry *)(sdata->data);
	__append_e820_map(extmap, entries);
	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
	if (map_len > PAGE_SIZE)
		early_iounmap(sdata, map_len);
	printk(KERN_INFO "extended physical RAM map:\n");
	e820_print_map("extended");
}
Beispiel #9
0
static __meminit void *alloc_low_page(unsigned long *phys)
{ 
	unsigned long pfn = table_end++;
	void *adr;

	if (after_bootmem) {
		adr = (void *)get_zeroed_page(GFP_ATOMIC);
		*phys = __pa(adr);
		return adr;
	}

	if (pfn >= end_pfn) 
		panic("alloc_low_page: ran out of memory"); 

	adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
	memset(adr, 0, PAGE_SIZE);
	*phys  = pfn * PAGE_SIZE;
	return adr;
}
Beispiel #10
0
void __init setup_olpc_ofw_pgd(void)
{
	pgd_t *base, *ofw_pde;

	if (!olpc_ofw_cif)
		return;

	/* fetch OFW's PDE */
	base = early_ioremap(olpc_ofw_pgd, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
	if (!base) {
		printk(KERN_ERR "failed to remap OFW's pgd - disabling OFW!\n");
		olpc_ofw_cif = NULL;
		return;
	}
	ofw_pde = &base[OLPC_OFW_PDE_NR];

	/* install OFW's PDE permanently into the kernel's pgtable */
	set_pgd(&swapper_pg_dir[OLPC_OFW_PDE_NR], *ofw_pde);
	/* implicit optimization barrier here due to uninline function return */

	early_iounmap(base, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
}
void __init setup_olpc_ofw_pgd(void)
{
	pgd_t *base, *ofw_pde;

	if (!olpc_ofw_cif)
		return;

	
	base = early_ioremap(olpc_ofw_pgd, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
	if (!base) {
		printk(KERN_ERR "failed to remap OFW's pgd - disabling OFW!\n");
		olpc_ofw_cif = NULL;
		return;
	}
	ofw_pde = &base[OLPC_OFW_PDE_NR];

	
	set_pgd(&swapper_pg_dir[OLPC_OFW_PDE_NR], *ofw_pde);
	

	early_iounmap(base, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
}
Beispiel #12
0
void __init trap_init(void)
{
	int i;

#ifdef CONFIG_EISA
	void __iomem *p = early_ioremap(0x0FFFD9, 4);

	if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
		EISA_bus = 1;
	early_iounmap(p, 4);
#endif

	set_intr_gate(0, &divide_error);
	set_intr_gate_ist(1, &debug, DEBUG_STACK);
	set_intr_gate_ist(2, &nmi, NMI_STACK);
	/* int3 can be called from all */
	set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
	/* int4 can be called from all */
	set_system_intr_gate(4, &overflow);
	set_intr_gate(5, &bounds);
	set_intr_gate(6, &invalid_op);
	set_intr_gate(7, &device_not_available);
#ifdef CONFIG_X86_32
	set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
#else
	set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
#endif
	set_intr_gate(9, &coprocessor_segment_overrun);
	set_intr_gate(10, &invalid_TSS);
	set_intr_gate(11, &segment_not_present);
	set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
	set_intr_gate(13, &general_protection);
	set_intr_gate(14, &page_fault);
	set_intr_gate(15, &spurious_interrupt_bug);
	set_intr_gate(16, &coprocessor_error);
	set_intr_gate(17, &alignment_check);
#ifdef CONFIG_X86_MCE
	set_intr_gate_ist(18, &machine_check, MCE_STACK);
#endif
	set_intr_gate(19, &simd_coprocessor_error);

#ifdef CONFIG_IA32_EMULATION
	set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
#endif

#ifdef CONFIG_X86_32
	if (cpu_has_fxsr) {
		printk(KERN_INFO "Enabling fast FPU save and restore... ");
		set_in_cr4(X86_CR4_OSFXSR);
		printk("done.\n");
	}
	if (cpu_has_xmm) {
		printk(KERN_INFO
			"Enabling unmasked SIMD FPU exception support... ");
		set_in_cr4(X86_CR4_OSXMMEXCPT);
		printk("done.\n");
	}

	set_system_trap_gate(SYSCALL_VECTOR, &system_call);
#endif

	/* Reserve all the builtin and the syscall vector: */
	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
		set_bit(i, used_vectors);

#ifdef CONFIG_X86_64
	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#else
	set_bit(SYSCALL_VECTOR, used_vectors);
#endif
	/*
	 * Should be a barrier for any external CPU state:
	 */
	cpu_init();

#ifdef CONFIG_X86_32
	trap_init_hook();
#endif
}
Beispiel #13
0
void __init acpi_initrd_override(void *data, size_t size)
{
	int sig, no, table_nr = 0, total_offset = 0;
	long offset = 0;
	struct acpi_table_header *table;
	char cpio_path[32] = "kernel/firmware/acpi/";
	struct cpio_data file;

	if (data == NULL || size == 0)
		return;

	for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
		file = find_cpio_data(cpio_path, data, size, &offset);
		if (!file.data)
			break;

		data += offset;
		size -= offset;

		if (file.size < sizeof(struct acpi_table_header)) {
			pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
				cpio_path, file.name);
			continue;
		}

		table = file.data;

		for (sig = 0; table_sigs[sig]; sig++)
			if (!memcmp(table->signature, table_sigs[sig], 4))
				break;

		if (!table_sigs[sig]) {
			pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
				cpio_path, file.name);
			continue;
		}
		if (file.size != table->length) {
			pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
				cpio_path, file.name);
			continue;
		}
		if (acpi_table_checksum(file.data, table->length)) {
			pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
				cpio_path, file.name);
			continue;
		}

		pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
			table->signature, cpio_path, file.name, table->length);

		all_tables_size += table->length;
		acpi_initrd_files[table_nr].data = file.data;
		acpi_initrd_files[table_nr].size = file.size;
		table_nr++;
	}
	if (table_nr == 0)
		return;

	acpi_tables_addr =
		memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
				       all_tables_size, PAGE_SIZE);
	if (!acpi_tables_addr) {
		WARN_ON(1);
		return;
	}
	/*
	 * Only calling e820_add_reserve does not work and the
	 * tables are invalid (memory got used) later.
	 * memblock_reserve works as expected and the tables won't get modified.
	 * But it's not enough on X86 because ioremap will
	 * complain later (used by acpi_os_map_memory) that the pages
	 * that should get mapped are not marked "reserved".
	 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
	 * works fine.
	 */
	memblock_reserve(acpi_tables_addr, all_tables_size);
	arch_reserve_mem_area(acpi_tables_addr, all_tables_size);

	/*
	 * early_ioremap only can remap 256k one time. If we map all
	 * tables one time, we will hit the limit. Need to map chunks
	 * one by one during copying the same as that in relocate_initrd().
	 */
	for (no = 0; no < table_nr; no++) {
		unsigned char *src_p = acpi_initrd_files[no].data;
		phys_addr_t size = acpi_initrd_files[no].size;
		phys_addr_t dest_addr = acpi_tables_addr + total_offset;
		phys_addr_t slop, clen;
		char *dest_p;

		total_offset += size;

		while (size) {
			slop = dest_addr & ~PAGE_MASK;
			clen = size;
			if (clen > MAP_CHUNK_SIZE - slop)
				clen = MAP_CHUNK_SIZE - slop;
			dest_p = early_ioremap(dest_addr & PAGE_MASK,
						 clen + slop);
			memcpy(dest_p + slop, src_p, clen);
			early_iounmap(dest_p, clen + slop);
			src_p += clen;
			dest_addr += clen;
			size -= clen;
		}
	}
}
Beispiel #14
0
void __init acpi_initrd_override(void *data, size_t size)
{
	int sig, no, table_nr = 0, total_offset = 0;
	long offset = 0;
	struct acpi_table_header *table;
	char cpio_path[32] = "kernel/firmware/acpi/";
	struct cpio_data file;
	struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
	char *p;

	if (data == NULL || size == 0)
		return;

	for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
		file = find_cpio_data(cpio_path, data, size, &offset);
		if (!file.data)
			break;

		data += offset;
		size -= offset;

		if (file.size < sizeof(struct acpi_table_header))
			INVALID_TABLE("Table smaller than ACPI header",
				      cpio_path, file.name);

		table = file.data;

		for (sig = 0; table_sigs[sig]; sig++)
			if (!memcmp(table->signature, table_sigs[sig], 4))
				break;

		if (!table_sigs[sig])
			INVALID_TABLE("Unknown signature",
				      cpio_path, file.name);
		if (file.size != table->length)
			INVALID_TABLE("File length does not match table length",
				      cpio_path, file.name);
		if (acpi_table_checksum(file.data, table->length))
			INVALID_TABLE("Bad table checksum",
				      cpio_path, file.name);

		pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
			table->signature, cpio_path, file.name, table->length);

		all_tables_size += table->length;
		early_initrd_files[table_nr].data = file.data;
		early_initrd_files[table_nr].size = file.size;
		table_nr++;
	}
	if (table_nr == 0)
		return;

	acpi_tables_addr =
		memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
				       all_tables_size, PAGE_SIZE);
	if (!acpi_tables_addr) {
		WARN_ON(1);
		return;
	}
	/*
	 * Only calling e820_add_reserve does not work and the
	 * tables are invalid (memory got used) later.
	 * memblock_reserve works as expected and the tables won't get modified.
	 * But it's not enough on X86 because ioremap will
	 * complain later (used by acpi_os_map_memory) that the pages
	 * that should get mapped are not marked "reserved".
	 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
	 * works fine.
	 */
	memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size);
	arch_reserve_mem_area(acpi_tables_addr, all_tables_size);

	p = early_ioremap(acpi_tables_addr, all_tables_size);

	for (no = 0; no < table_nr; no++) {
		memcpy(p + total_offset, early_initrd_files[no].data,
		       early_initrd_files[no].size);
		total_offset += early_initrd_files[no].size;
	}
	early_iounmap(p, all_tables_size);
}
Beispiel #15
0
int __init get_memcfg_from_srat(void)
{
    struct acpi_table_header *header = NULL;
    struct acpi_table_rsdp *rsdp = NULL;
    struct acpi_table_rsdt *rsdt = NULL;
    acpi_native_uint rsdp_address = 0;
    struct acpi_static_rsdt saved_rsdt;
    int tables = 0;
    int i = 0;

    rsdp_address = acpi_os_get_root_pointer();
    if (!rsdp_address) {
        printk("%s: System description tables not found\n",
               __func__);
        goto out_err;
    }

    printk("%s: assigning address to rsdp\n", __func__);
    rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
    if (!rsdp) {
        printk("%s: Didn't find ACPI root!\n", __func__);
        goto out_err;
    }

    printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
        rsdp->oem_id);

    if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
        printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __func__);
        goto out_err;
    }

    rsdt = (struct acpi_table_rsdt *)
        early_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));

    if (!rsdt) {
        printk(KERN_WARNING
               "%s: ACPI: Invalid root system description tables (RSDT)\n",
               __func__);
        goto out_err;
    }

    header = &rsdt->header;

    if (strncmp(header->signature, ACPI_SIG_RSDT, strlen(ACPI_SIG_RSDT))) {
        printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
        goto out_err;
    }

    /* 
     * The number of tables is computed by taking the 
     * size of all entries (header size minus total 
     * size of RSDT) divided by the size of each entry
     * (4-byte table pointers).
     */
    tables = (header->length - sizeof(struct acpi_table_header)) / 4;

    if (!tables)
        goto out_err;

    memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));

    if (saved_rsdt.table.header.length > sizeof(saved_rsdt)) {
        printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
               saved_rsdt.table.header.length);
        goto out_err;
    }

    printk("Begin SRAT table scan....\n");

    for (i = 0; i < tables; i++) {
        /* Map in header, then map in full table length. */
        header = (struct acpi_table_header *)
            early_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
        if (!header)
            break;
        header = (struct acpi_table_header *)
            early_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
        if (!header)
            break;

        if (strncmp((char *) &header->signature, ACPI_SIG_SRAT, 4))
            continue;

        /* we've found the srat table. don't need to look at any more tables */
        return acpi20_parse_srat((struct acpi_table_srat *)header);
    }
out_err:
    remove_all_active_ranges();
    printk("failed to get NUMA memory information from SRAT table\n");
    return 0;
}