static int setup_kdump_elfcorehdr(void)
{
	size_t elfcorebuf_sz;
	char *elfcorebuf;

	if (!OLDMEM_BASE || is_kdump_kernel())
		return -EINVAL;
	s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz);
	elfcorehdr_addr = (unsigned long long) elfcorebuf;
	elfcorehdr_size = elfcorebuf_sz;
	return 0;
}
Example #2
0
/* IPoIB mlx5 netdev profile */
static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
				   struct mlx5e_params *params)
{
	/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
	mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);

	/* RQ size in ipoib by default is 512 */
	params->log_rq_size = is_kdump_kernel() ?
		MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
		MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;

	params->lro_en = false;
}
Example #3
0
static int iommu_load_old_irte(struct intel_iommu *iommu)
{
	struct irte *old_ir_table;
	phys_addr_t irt_phys;
	unsigned int i;
	size_t size;
	u64 irta;

	if (!is_kdump_kernel()) {
		pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
			iommu->name);
		clear_ir_pre_enabled(iommu);
		iommu_disable_irq_remapping(iommu);
		return -EINVAL;
	}

	/* Check whether the old ir-table has the same size as ours */
	irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
	if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
	     != INTR_REMAP_TABLE_REG_SIZE)
		return -EINVAL;

	irt_phys = irta & VTD_PAGE_MASK;
	size     = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);

	/* Map the old IR table */
	old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
	if (!old_ir_table)
		return -ENOMEM;

	/* Copy data over */
	memcpy(iommu->ir_table->base, old_ir_table, size);

	__iommu_flush_cache(iommu, iommu->ir_table->base, size);

	/*
	 * Now check the table for used entries and mark those as
	 * allocated in the bitmap
	 */
	for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
		if (iommu->ir_table->base[i].present)
			bitmap_set(iommu->ir_table->bitmap, i, 1);
	}

	memunmap(old_ir_table);

	return 0;
}
Example #4
0
File: ipoib.c Project: metux/linux
/* IPoIB mlx5 netdev profile */
static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
				   struct mlx5e_params *params)
{
	/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false);
	mlx5e_set_rq_type(mdev, params);
	mlx5e_init_rq_type_params(mdev, params);

	/* RQ size in ipoib by default is 512 */
	params->log_rq_mtu_frames = is_kdump_kernel() ?
		MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
		MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;

	params->lro_en = false;
	params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
}
Example #5
0
static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
{
	if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE)
		return;
	if (is_kdump_kernel())
		return;
	if (cpu >= NR_CPUS) {
		pr_warning("CPU %i exceeds the maximum %i and is excluded from "
			   "the dump\n", cpu, NR_CPUS - 1);
		return;
	}
	zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
	while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
		cpu_relax();
	memcpy_real(zfcpdump_save_areas[cpu],
		    (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
		    sizeof(struct save_area));
}
Example #6
0
static void iommu_table_setparms(struct pci_controller *phb,
				 struct device_node *dn,
				 struct iommu_table *tbl)
{
	struct device_node *node;
	const unsigned long *basep;
	const u32 *sizep;

	node = phb->dn;

	basep = of_get_property(node, "linux,tce-base", NULL);
	sizep = of_get_property(node, "linux,tce-size", NULL);
	if (basep == NULL || sizep == NULL) {
		printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has "
				"missing tce entries !\n", dn->full_name);
		return;
	}

	tbl->it_base = (unsigned long)__va(*basep);

	if (!is_kdump_kernel())
		memset((void *)tbl->it_base, 0, *sizep);

	tbl->it_busno = phb->bus->number;

	/* Units of tce entries */
	tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;

	/* Test if we are going over 2GB of DMA space */
	if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
		udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
		panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
	}

	phb->dma_window_base_cur += phb->dma_window_size;

	/* Set the tce table size - measured in entries */
	tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;

	tbl->it_index = 0;
	tbl->it_blocksize = 16;
	tbl->it_type = TCE_PCI;
}
Example #7
0
static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{
	struct ir_table *ir_table;
	struct fwnode_handle *fn;
	unsigned long *bitmap;
	struct page *pages;

	if (iommu->ir_table)
		return 0;

	ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
	if (!ir_table)
		return -ENOMEM;

	pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
				 INTR_REMAP_PAGE_ORDER);
	if (!pages) {
		pr_err("IR%d: failed to allocate pages of order %d\n",
		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
		goto out_free_table;
	}

	bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
			 sizeof(long), GFP_ATOMIC);
	if (bitmap == NULL) {
		pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
		goto out_free_pages;
	}

	fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
	if (!fn)
		goto out_free_bitmap;

	iommu->ir_domain =
		irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
					    0, INTR_REMAP_TABLE_ENTRIES,
					    fn, &intel_ir_domain_ops,
					    iommu);
	irq_domain_free_fwnode(fn);
	if (!iommu->ir_domain) {
		pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
		goto out_free_bitmap;
	}
	iommu->ir_msi_domain =
		arch_create_remap_msi_irq_domain(iommu->ir_domain,
						 "INTEL-IR-MSI",
						 iommu->seq_id);

	ir_table->base = page_address(pages);
	ir_table->bitmap = bitmap;
	iommu->ir_table = ir_table;

	/*
	 * If the queued invalidation is already initialized,
	 * shouldn't disable it.
	 */
	if (!iommu->qi) {
		/*
		 * Clear previous faults.
		 */
		dmar_fault(-1, iommu);
		dmar_disable_qi(iommu);

		if (dmar_enable_qi(iommu)) {
			pr_err("Failed to enable queued invalidation\n");
			goto out_free_bitmap;
		}
	}

	init_ir_status(iommu);

	if (ir_pre_enabled(iommu)) {
		if (!is_kdump_kernel()) {
			pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
				iommu->name);
			clear_ir_pre_enabled(iommu);
			iommu_disable_irq_remapping(iommu);
		} else if (iommu_load_old_irte(iommu))
			pr_err("Failed to copy IR table for %s from previous kernel\n",
			       iommu->name);
		else
			pr_info("Copied IR table for %s from previous kernel\n",
				iommu->name);
	}

	iommu_set_irq_remapping(iommu, eim_mode);

	return 0;

out_free_bitmap:
	kfree(bitmap);
out_free_pages:
	__free_pages(pages, INTR_REMAP_PAGE_ORDER);
out_free_table:
	kfree(ir_table);

	iommu->ir_table  = NULL;

	return -ENOMEM;
}
Example #8
0
File: setup.c Project: 08opt/linux
static void __init
setup_memory(void)
{
        unsigned long bootmap_size;
	unsigned long start_pfn, end_pfn;
	int i;

	/*
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	start_pfn = PFN_UP(__pa(&_end));
	end_pfn = max_pfn = PFN_DOWN(memory_end);

#ifdef CONFIG_BLK_DEV_INITRD
	/*
	 * Move the initrd in case the bitmap of the bootmem allocater
	 * would overwrite it.
	 */

	if (INITRD_START && INITRD_SIZE) {
		unsigned long bmap_size;
		unsigned long start;

		bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
		bmap_size = PFN_PHYS(bmap_size);

		if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
			start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;

#ifdef CONFIG_CRASH_DUMP
			if (OLDMEM_BASE) {
				/* Move initrd behind kdump oldmem */
				if (start + INITRD_SIZE > OLDMEM_BASE &&
				    start < OLDMEM_BASE + OLDMEM_SIZE)
					start = OLDMEM_BASE + OLDMEM_SIZE;
			}
#endif
			if (start + INITRD_SIZE > memory_end) {
				pr_err("initrd extends beyond end of "
				       "memory (0x%08lx > 0x%08lx) "
				       "disabling initrd\n",
				       start + INITRD_SIZE, memory_end);
				INITRD_START = INITRD_SIZE = 0;
			} else {
				pr_info("Moving initrd (0x%08lx -> "
					"0x%08lx, size: %ld)\n",
					INITRD_START, start, INITRD_SIZE);
				memmove((void *) start, (void *) INITRD_START,
					INITRD_SIZE);
				INITRD_START = start;
			}
		}
	}
#endif

	/*
	 * Initialize the boot-time allocator
	 */
	bootmap_size = init_bootmem(start_pfn, end_pfn);

	/*
	 * Register RAM areas with the bootmem allocator.
	 */

	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
		unsigned long start_chunk, end_chunk, pfn;

		if (memory_chunk[i].type != CHUNK_READ_WRITE &&
		    memory_chunk[i].type != CHUNK_CRASHK)
			continue;
		start_chunk = PFN_DOWN(memory_chunk[i].addr);
		end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
		end_chunk = min(end_chunk, end_pfn);
		if (start_chunk >= end_chunk)
			continue;
		memblock_add_node(PFN_PHYS(start_chunk),
				  PFN_PHYS(end_chunk - start_chunk), 0);
		pfn = max(start_chunk, start_pfn);
		for (; pfn < end_chunk; pfn++)
			page_set_storage_key(PFN_PHYS(pfn),
					     PAGE_DEFAULT_KEY, 0);
	}

	psw_set_key(PAGE_DEFAULT_KEY);

	free_bootmem_with_active_regions(0, max_pfn);

	/*
	 * Reserve memory used for lowcore/command line/kernel image.
	 */
	reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
	reserve_bootmem((unsigned long)_stext,
			PFN_PHYS(start_pfn) - (unsigned long)_stext,
			BOOTMEM_DEFAULT);
	/*
	 * Reserve the bootmem bitmap itself as well. We do this in two
	 * steps (first step was init_bootmem()) because this catches
	 * the (very unlikely) case of us accidentally initializing the
	 * bootmem allocator with an invalid RAM area.
	 */
	reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
			BOOTMEM_DEFAULT);

#ifdef CONFIG_CRASH_DUMP
	if (crashk_res.start)
		reserve_bootmem(crashk_res.start,
				crashk_res.end - crashk_res.start + 1,
				BOOTMEM_DEFAULT);
	if (is_kdump_kernel())
		reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
				PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
	if (INITRD_START && INITRD_SIZE) {
		if (INITRD_START + INITRD_SIZE <= memory_end) {
			reserve_bootmem(INITRD_START, INITRD_SIZE,
					BOOTMEM_DEFAULT);
			initrd_start = INITRD_START;
			initrd_end = initrd_start + INITRD_SIZE;
		} else {
			pr_err("initrd extends beyond end of "
			       "memory (0x%08lx > 0x%08lx) "
			       "disabling initrd\n",
			       initrd_start + INITRD_SIZE, memory_end);
			initrd_start = initrd_end = 0;
		}
	}
#endif
}
Example #9
0
File: bus.c Project: 710leo/LVS
static void acpi_bus_osc_support(void)
{
	u32 capbuf[2];
	struct acpi_osc_context context = {
		.uuid_str = sb_uuid_str,
		.rev = 1,
		.cap.length = 8,
		.cap.pointer = capbuf,
	};
	acpi_handle handle;

	capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
	capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
			defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
	capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
#endif

#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
	capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
#endif
	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
		return;

	if (is_uv_system() && is_kdump_kernel()) {
		/*
		 * There is no need to parse the OS Capabilities table
		 * in the crash kernel. And it should not be done, as
		 * that parsing includes destructive writes to io ports to
		 * initialize UV system controller interrupts.
		 */
		return;
	}

	if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
		kfree(context.ret.pointer);
	/* do we need to check the returned cap? Sounds no */
}

/* --------------------------------------------------------------------------
                                Event Management
   -------------------------------------------------------------------------- */

#ifdef CONFIG_ACPI_PROC_EVENT
static DEFINE_SPINLOCK(acpi_bus_event_lock);

LIST_HEAD(acpi_bus_event_list);
DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue);

extern int event_is_open;

int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id, u8 type, int data)
{
	struct acpi_bus_event *event;
	unsigned long flags = 0;

	/* drop event on the floor if no one's listening */
	if (!event_is_open)
		return 0;

	event = kmalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC);
	if (!event)
		return -ENOMEM;

	strcpy(event->device_class, device_class);
	strcpy(event->bus_id, bus_id);
	event->type = type;
	event->data = data;

	spin_lock_irqsave(&acpi_bus_event_lock, flags);
	list_add_tail(&event->node, &acpi_bus_event_list);
	spin_unlock_irqrestore(&acpi_bus_event_lock, flags);

	wake_up_interruptible(&acpi_bus_event_queue);

	return 0;

}

EXPORT_SYMBOL_GPL(acpi_bus_generate_proc_event4);

int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
{
	if (!device)
		return -EINVAL;
	return acpi_bus_generate_proc_event4(device->pnp.device_class,
					     device->pnp.bus_id, type, data);
}

EXPORT_SYMBOL(acpi_bus_generate_proc_event);

int acpi_bus_receive_event(struct acpi_bus_event *event)
{
	unsigned long flags = 0;
	struct acpi_bus_event *entry = NULL;

	DECLARE_WAITQUEUE(wait, current);


	if (!event)
		return -EINVAL;

	if (list_empty(&acpi_bus_event_list)) {

		set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&acpi_bus_event_queue, &wait);

		if (list_empty(&acpi_bus_event_list))
			schedule();

		remove_wait_queue(&acpi_bus_event_queue, &wait);
		set_current_state(TASK_RUNNING);

		if (signal_pending(current))
			return -ERESTARTSYS;
	}

	spin_lock_irqsave(&acpi_bus_event_lock, flags);
	if (!list_empty(&acpi_bus_event_list)) {
		entry = list_entry(acpi_bus_event_list.next,
				   struct acpi_bus_event, node);
		list_del(&entry->node);
	}
Example #10
0
static void __init smp_detect_cpus(void)
{
	unsigned int cpu, c_cpus, s_cpus;
	struct sclp_cpu_info *info;
	u16 boot_cpu_addr, cpu_addr;

	c_cpus = 1;
	s_cpus = 0;
	boot_cpu_addr = __cpu_logical_map[0];
	info = kmalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		panic("smp_detect_cpus failed to allocate memory\n");
#ifdef CONFIG_CRASH_DUMP
	if (OLDMEM_BASE && !is_kdump_kernel()) {
		struct save_area *save_area;

		save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
		if (!save_area)
			panic("could not allocate memory for save area\n");
		copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
				 0x200, 0);
		zfcpdump_save_areas[0] = save_area;
	}
#endif
	/* Use sigp detection algorithm if sclp doesn't work. */
	if (sclp_get_cpu_info(info)) {
		smp_use_sigp_detection = 1;
		for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
			if (cpu == boot_cpu_addr)
				continue;
			if (!raw_cpu_stopped(cpu))
				continue;
			smp_get_save_area(c_cpus, cpu);
			c_cpus++;
		}
		goto out;
	}

	if (info->has_cpu_type) {
		for (cpu = 0; cpu < info->combined; cpu++) {
			if (info->cpu[cpu].address == boot_cpu_addr) {
				smp_cpu_type = info->cpu[cpu].type;
				break;
			}
		}
	}

	for (cpu = 0; cpu < info->combined; cpu++) {
		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
			continue;
		cpu_addr = info->cpu[cpu].address;
		if (cpu_addr == boot_cpu_addr)
			continue;
		if (!raw_cpu_stopped(cpu_addr)) {
			s_cpus++;
			continue;
		}
		smp_get_save_area(c_cpus, cpu_addr);
		c_cpus++;
	}
out:
	kfree(info);
	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
	get_online_cpus();
	__smp_rescan_cpus();
	put_online_cpus();
}