Exemple #1
0
int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
		     struct device_node *of_node)
{
	int size;

	if (!irq_count)
		return -EINVAL;

	size = BITS_TO_LONGS(irq_count) * sizeof(long);
	pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size);

	bmp->bitmap_from_slab = slab_is_available();
	if (bmp->bitmap_from_slab)
		bmp->bitmap = kzalloc(size, GFP_KERNEL);
	else {
		bmp->bitmap = memblock_virt_alloc(size, 0);
		/* the bitmap won't be freed from memblock allocator */
		kmemleak_not_leak(bmp->bitmap);
	}

	if (!bmp->bitmap) {
		pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n");
		return -ENOMEM;
	}

	/* We zalloc'ed the bitmap, so all irqs are free by default */
	spin_lock_init(&bmp->lock);
	bmp->of_node = of_node_get(of_node);
	bmp->irq_count = irq_count;

	return 0;
}
Exemple #2
0
void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
{
	struct device_node *phbn;
	const __be64 *prop64;
	u64 hub_id;
	void *tce_mem;
	uint64_t tce_per_phb;
	int64_t rc;
	int phb_count = 0;

	pr_info("Probing p5ioc2 IO-Hub %s\n", np->full_name);

	prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
	if (!prop64) {
		pr_err(" Missing \"ibm,opal-hubid\" property !\n");
		return;
	}
	hub_id = be64_to_cpup(prop64);
	pr_info(" HUB-ID : 0x%016llx\n", hub_id);

	/* Count child PHBs and calculate TCE space per PHB */
	for_each_child_of_node(np, phbn) {
		if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
		    of_device_is_compatible(phbn, "ibm,p5ioc2-pciex"))
			phb_count++;
	}

	if (phb_count <= 0) {
		pr_info(" No PHBs for Hub %s\n", np->full_name);
		return;
	}

	tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count);
	pr_info(" Allocating %lld MB of TCE memory per PHB\n",
		tce_per_phb >> 20);

	/* Currently allocate 16M of TCE memory for every Hub
	 *
	 * XXX TODO: Make it chip local if possible
	 */
	tce_mem = memblock_virt_alloc(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY);
	pr_debug(" TCE    : 0x%016lx..0x%016lx\n",
		__pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1);
	rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem),
					P5IOC2_TCE_MEMORY);
	if (rc != OPAL_SUCCESS) {
		pr_err(" Failed to allocate TCE memory, OPAL error %lld\n", rc);
		return;
	}

	/* Initialize PHBs */
	for_each_child_of_node(np, phbn) {
		if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
		    of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) {
			pnv_pci_init_p5ioc2_phb(phbn, hub_id,
					tce_mem, tce_per_phb);
			tce_mem += tce_per_phb;
		}
	}
}
Exemple #3
0
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
	void *v_overflow_buffer;
	unsigned long i, bytes;

	bytes = nslabs << IO_TLB_SHIFT;

	io_tlb_nslabs = nslabs;
	io_tlb_start = __pa(tlb);
	io_tlb_end = io_tlb_start + bytes;

	/*
	 * Get the overflow emergency buffer
	 */
	v_overflow_buffer = memblock_virt_alloc_low_nopanic(
						PAGE_ALIGN(io_tlb_overflow),
						PAGE_SIZE);
	if (!v_overflow_buffer)
		return -ENOMEM;

	io_tlb_overflow_buffer = __pa(v_overflow_buffer);

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = memblock_virt_alloc(
				PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
				PAGE_SIZE);
	io_tlb_orig_addr = memblock_virt_alloc(
				PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
				PAGE_SIZE);
	for (i = 0; i < io_tlb_nslabs; i++) {
		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
	}
	io_tlb_index = 0;

	if (verbose)
		swiotlb_print_info();

	swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
	return 0;
}
Exemple #4
0
static void __init prealloc(struct ps3_prealloc *p)
{
	if (!p->size)
		return;

	p->address = memblock_virt_alloc(p->size, p->align);

	printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
	       p->address);
}
void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
	void *p;

	if (slab_is_available())
		p = kzalloc(size, mask);
	else {
		p = memblock_virt_alloc(size, 0);
	}
	return p;
}
Exemple #6
0
/**
 * omap2_clk_legacy_provider_init - initialize a legacy clock provider
 * @index: index for the clock provider
 * @mem: iomem pointer for the clock provider memory area
 *
 * Initializes a legacy clock provider memory mapping.
 */
void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
{
	struct clk_iomap *io;

	ti_clk_ll_ops = &omap_clk_ll_ops;

	io = memblock_virt_alloc(sizeof(*io), 0);

	io->mem = mem;

	clk_memmaps[index] = io;
}
Exemple #7
0
static void __init setup_resources(void)
{
    struct resource *res, *std_res, *sub_res;
    struct memblock_region *reg;
    int j;

    code_resource.start = (unsigned long) &_text;
    code_resource.end = (unsigned long) &_etext - 1;
    data_resource.start = (unsigned long) &_etext;
    data_resource.end = (unsigned long) &_edata - 1;
    bss_resource.start = (unsigned long) &__bss_start;
    bss_resource.end = (unsigned long) &__bss_stop - 1;

    for_each_memblock(memory, reg) {
        res = memblock_virt_alloc(sizeof(*res), 8);
        res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;

        res->name = "System RAM";
        res->start = reg->base;
        res->end = reg->base + reg->size - 1;
        request_resource(&iomem_resource, res);

        for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
            std_res = standard_resources[j];
            if (std_res->start < res->start ||
                    std_res->start > res->end)
                continue;
            if (std_res->end > res->end) {
                sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
                *sub_res = *std_res;
                sub_res->end = res->end;
                std_res->start = res->end + 1;
                request_resource(res, sub_res);
            } else {
                request_resource(res, std_res);
            }
        }
    }
Exemple #8
0
/*
 * Initialize the context management stuff.
 */
void __init mmu_context_init(void)
{
	/* Mark init_mm as being active on all possible CPUs since
	 * we'll get called with prev == init_mm the first time
	 * we schedule on a given CPU
	 */
	init_mm.context.active = NR_CPUS;

	/*
	 *   The MPC8xx has only 16 contexts.  We rotate through them on each
	 * task switch.  A better way would be to keep track of tasks that
	 * own contexts, and implement an LRU usage.  That way very active
	 * tasks don't always have to pay the TLB reload overhead.  The
	 * kernel pages are mapped shared, so the kernel can run on behalf
	 * of any task that makes a kernel entry.  Shared does not mean they
	 * are not protected, just that the ASID comparison is not performed.
	 *      -- Dan
	 *
	 * The IBM4xx has 256 contexts, so we can just rotate through these
	 * as a way of "switching" contexts.  If the TID of the TLB is zero,
	 * the PID/TID comparison is disabled, so we can use a TID of zero
	 * to represent all kernel pages as shared among all contexts.
	 * 	-- Dan
	 *
	 * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
	 * should normally never have to steal though the facility is
	 * present if needed.
	 *      -- BenH
	 */
	if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
		first_context = 0;
		last_context = 15;
		no_selective_tlbil = true;
	} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
		first_context = 1;
		last_context = 65535;
		no_selective_tlbil = false;
	} else {
		first_context = 1;
		last_context = 255;
		no_selective_tlbil = false;
	}

#ifdef DEBUG_CLAMP_LAST_CONTEXT
	last_context = DEBUG_CLAMP_LAST_CONTEXT;
#endif
	/*
	 * Allocate the maps used by context management
	 */
	context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0);
	context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0);
#ifndef CONFIG_SMP
	stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
#else
	stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);

	register_cpu_notifier(&mmu_context_cpu_nb);
#endif

	printk(KERN_INFO
	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
	       2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
	       last_context - first_context + 1);

	/*
	 * Some processors have too few contexts to reserve one for
	 * init_mm, and require using context 0 for a normal task.
	 * Other processors reserve the use of context zero for the kernel.
	 * This code assumes first_context < 32.
	 */
	context_map[0] = (1 << first_context) - 1;
	next_context = first_context;
	nr_free_contexts = last_context - first_context + 1;
}
Exemple #9
0
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
	return memblock_virt_alloc(size, align);
}
Exemple #10
0
static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
					   void *tce_mem, u64 tce_size)
{
	struct pnv_phb *phb;
	const __be64 *prop64;
	u64 phb_id;
	int64_t rc;
	static int primary = 1;
	struct iommu_table_group *table_group;
	struct iommu_table *tbl;

	pr_info(" Initializing p5ioc2 PHB %s\n", np->full_name);

	prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
	if (!prop64) {
		pr_err("  Missing \"ibm,opal-phbid\" property !\n");
		return;
	}
	phb_id = be64_to_cpup(prop64);
	pr_devel("  PHB-ID  : 0x%016llx\n", phb_id);
	pr_devel("  TCE AT  : 0x%016lx\n", __pa(tce_mem));
	pr_devel("  TCE SZ  : 0x%016llx\n", tce_size);

	rc = opal_pci_set_phb_tce_memory(phb_id, __pa(tce_mem), tce_size);
	if (rc != OPAL_SUCCESS) {
		pr_err("  Failed to set TCE memory, OPAL error %lld\n", rc);
		return;
	}

	phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
	phb->hose = pcibios_alloc_controller(np);
	if (!phb->hose) {
		pr_err("  Failed to allocate PCI controller\n");
		return;
	}

	spin_lock_init(&phb->lock);
	phb->hose->first_busno = 0;
	phb->hose->last_busno = 0xff;
	phb->hose->private_data = phb;
	phb->hose->controller_ops = pnv_pci_p5ioc2_controller_ops;
	phb->hub_id = hub_id;
	phb->opal_id = phb_id;
	phb->type = PNV_PHB_P5IOC2;
	phb->model = PNV_PHB_MODEL_P5IOC2;

	phb->regs = of_iomap(np, 0);

	if (phb->regs == NULL)
		pr_err("  Failed to map registers !\n");
	else {
		pr_devel("  P_BUID     = 0x%08x\n", in_be32(phb->regs + 0x100));
		pr_devel("  P_IOSZ     = 0x%08x\n", in_be32(phb->regs + 0x1b0));
		pr_devel("  P_IO_ST    = 0x%08x\n", in_be32(phb->regs + 0x1e0));
		pr_devel("  P_MEM1_H   = 0x%08x\n", in_be32(phb->regs + 0x1a0));
		pr_devel("  P_MEM1_L   = 0x%08x\n", in_be32(phb->regs + 0x190));
		pr_devel("  P_MSZ1_L   = 0x%08x\n", in_be32(phb->regs + 0x1c0));
		pr_devel("  P_MEM_ST   = 0x%08x\n", in_be32(phb->regs + 0x1d0));
		pr_devel("  P_MEM2_H   = 0x%08x\n", in_be32(phb->regs + 0x2c0));
		pr_devel("  P_MEM2_L   = 0x%08x\n", in_be32(phb->regs + 0x2b0));
		pr_devel("  P_MSZ2_H   = 0x%08x\n", in_be32(phb->regs + 0x2d0));
		pr_devel("  P_MSZ2_L   = 0x%08x\n", in_be32(phb->regs + 0x2e0));
	}

	/* Interpret the "ranges" property */
	/* This also maps the I/O region and sets isa_io/mem_base */
	pci_process_bridge_OF_ranges(phb->hose, np, primary);
	primary = 0;

	phb->hose->ops = &pnv_pci_ops;

	/* Setup MSI support */
	pnv_pci_init_p5ioc2_msis(phb);

	/* Setup TCEs */
	phb->dma_dev_setup = pnv_pci_p5ioc2_dma_dev_setup;
	pnv_pci_setup_iommu_table(&phb->p5ioc2.iommu_table,
				  tce_mem, tce_size, 0,
				  IOMMU_PAGE_SHIFT_4K);
	/*
	 * We do not allocate iommu_table as we do not support
	 * hotplug or SRIOV on P5IOC2 and therefore iommu_free_table()
	 * should not be called for phb->p5ioc2.table_group.tables[0] ever.
	 */
	tbl = phb->p5ioc2.table_group.tables[0] = &phb->p5ioc2.iommu_table;
	table_group = &phb->p5ioc2.table_group;
	table_group->tce32_start = tbl->it_offset << tbl->it_page_shift;
	table_group->tce32_size = tbl->it_size << tbl->it_page_shift;
}
Exemple #11
0
static void __init setup_lowcore(void)
{
    struct lowcore *lc;

    /*
     * Setup lowcore for boot cpu
     */
    BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
    lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
    lc->restart_psw.mask = PSW_KERNEL_BITS;
    lc->restart_psw.addr = (unsigned long) restart_int_handler;
    lc->external_new_psw.mask = PSW_KERNEL_BITS |
                                PSW_MASK_DAT | PSW_MASK_MCHECK;
    lc->external_new_psw.addr = (unsigned long) ext_int_handler;
    lc->svc_new_psw.mask = PSW_KERNEL_BITS |
                           PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
    lc->svc_new_psw.addr = (unsigned long) system_call;
    lc->program_new_psw.mask = PSW_KERNEL_BITS |
                               PSW_MASK_DAT | PSW_MASK_MCHECK;
    lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
    lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
    lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
    lc->io_new_psw.mask = PSW_KERNEL_BITS |
                          PSW_MASK_DAT | PSW_MASK_MCHECK;
    lc->io_new_psw.addr = (unsigned long) io_int_handler;
    lc->clock_comparator = -1ULL;
    lc->kernel_stack = ((unsigned long) &init_thread_union)
                       + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
    lc->async_stack = (unsigned long)
                      memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
                      + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
    lc->panic_stack = (unsigned long)
                      memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
                      + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
    lc->current_task = (unsigned long)&init_task;
    lc->lpp = LPP_MAGIC;
    lc->machine_flags = S390_lowcore.machine_flags;
    lc->preempt_count = S390_lowcore.preempt_count;
    lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
    memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
           MAX_FACILITY_BIT/8);
    if (MACHINE_HAS_VX)
        lc->vector_save_area_addr =
            (unsigned long) &lc->vector_save_area;
    lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
    lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
    lc->async_enter_timer = S390_lowcore.async_enter_timer;
    lc->exit_timer = S390_lowcore.exit_timer;
    lc->user_timer = S390_lowcore.user_timer;
    lc->system_timer = S390_lowcore.system_timer;
    lc->steal_timer = S390_lowcore.steal_timer;
    lc->last_update_timer = S390_lowcore.last_update_timer;
    lc->last_update_clock = S390_lowcore.last_update_clock;

    restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
    restart_stack += ASYNC_SIZE;

    /*
     * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
     * restart data to the absolute zero lowcore. This is necessary if
     * PSW restart is done on an offline CPU that has lowcore zero.
     */
    lc->restart_stack = (unsigned long) restart_stack;
    lc->restart_fn = (unsigned long) do_restart;
    lc->restart_data = 0;
    lc->restart_source = -1UL;

    /* Setup absolute zero lowcore */
    mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
    mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
    mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
    mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
    mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);

#ifdef CONFIG_SMP
    lc->spinlock_lockval = arch_spin_lockval(0);
#endif

    set_prefix((u32)(unsigned long) lc);
    lowcore_ptr[0] = lc;
}
Exemple #12
0
/**
 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
 * @mask: pointer to cpumask_var_t where the cpumask is returned
 *
 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 * a nop (in <linux/cpumask.h>).
 * Either returns an allocated (zero-filled) cpumask, or causes the
 * system to panic.
 */
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
	*mask = memblock_virt_alloc(cpumask_size(), 0);
}