Example #1
0
void __init wii_memory_fixups(void)
{
	struct memblock_property *p = memblock.memory.region;


	BUG_ON(memblock.memory.cnt != 2);
	BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));

	p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
	p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE);

	wii_hole_start = p[0].base + p[0].size;
	wii_hole_size = p[1].base - wii_hole_start;

	pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
	pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
	pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);

	p[0].size += wii_hole_size + p[1].size;

	memblock.memory.cnt = 1;
	memblock_analyze();

	/* reserve the hole */
	memblock_reserve(wii_hole_start, wii_hole_size);

	/* allow ioremapping the address space in the hole */
	__allow_ioremap_reserved = 1;
}
void add_usable_mem_rgns(unsigned long long base, unsigned long long size)
{
	int i;
	unsigned long long end = base + size;
	unsigned long long ustart, uend;

	base = _ALIGN_DOWN(base, getpagesize());
	end = _ALIGN_UP(end, getpagesize());

	for (i = 0; i < usablemem_rgns.size; i++) {
		ustart = usablemem_rgns.ranges[i].start;
		uend = usablemem_rgns.ranges[i].end;
		if (base < uend && end > ustart) {
			if ((base >= ustart) && (end <= uend))
				return;
			if (base < ustart && end > uend) {
				usablemem_rgns.ranges[i].start = base;
				usablemem_rgns.ranges[i].end = end;
				return;
			} else if (base < ustart) {
				usablemem_rgns.ranges[i].start = base;
				return;
			} else if (end > uend) {
				usablemem_rgns.ranges[i].end = end;
				return;
			}
		}
	}
	usablemem_rgns.ranges[usablemem_rgns.size].start = base;
	usablemem_rgns.ranges[usablemem_rgns.size++].end = end;

	dbgprintf("usable memory rgns size:%u base:%llx size:%llx\n",
		usablemem_rgns.size, base, size);
}
Example #3
0
/* purgatory code need this info to patch the EFI memmap
 */
static void add_loaded_segments_info(struct mem_ehdr *ehdr)
{
	unsigned i = 0;
	while(i < ehdr->e_phnum) {
                struct mem_phdr *phdr;
                phdr = &ehdr->e_phdr[i];
		if (phdr->p_type != PT_LOAD) {
			i++;
                        continue;
		}

		loaded_segments[loaded_segments_num].start =
			_ALIGN_DOWN(phdr->p_paddr, ELF_PAGE_SIZE);
                loaded_segments[loaded_segments_num].end =
			loaded_segments[loaded_segments_num].start;

		/* Consolidate consecutive PL_LOAD segments into one.
		 * The end addr of the last PL_LOAD segment, calculated by
		 * adding p_memsz to p_paddr & rounded up to ELF_PAGE_SIZE,
		 * will be the end address of this loaded_segments entry.
		 */
		while (i < ehdr->e_phnum) {
			phdr = &ehdr->e_phdr[i];
	                if (phdr->p_type != PT_LOAD)
	                        break;
			loaded_segments[loaded_segments_num].end =
				_ALIGN(phdr->p_paddr + phdr->p_memsz,
				       ELF_PAGE_SIZE);
			i++;
		}
		loaded_segments_num++;
	}
}
Example #4
0
void __ref vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
{
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
	unsigned long page_order = get_order(page_size);
	unsigned long alt_start = ~0, alt_end = ~0;
	unsigned long base_pfn;

	start = _ALIGN_DOWN(start, page_size);
	if (altmap) {
		alt_start = altmap->base_pfn;
		alt_end = altmap->base_pfn + altmap->reserve +
			  altmap->free + altmap->alloc + altmap->align;
	}

	pr_debug("vmemmap_free %lx...%lx\n", start, end);

	for (; start < end; start += page_size) {
		unsigned long nr_pages, addr;
		struct page *section_base;
		struct page *page;

		/*
		 * the section has already be marked as invalid, so
		 * vmemmap_populated() true means some other sections still
		 * in this page, so skip it.
		 */
		if (vmemmap_populated(start, page_size))
			continue;

		addr = vmemmap_list_free(start);
		if (!addr)
			continue;

		page = pfn_to_page(addr >> PAGE_SHIFT);
		section_base = pfn_to_page(vmemmap_section_start(start));
		nr_pages = 1 << page_order;
		base_pfn = PHYS_PFN(addr);

		if (base_pfn >= alt_start && base_pfn < alt_end) {
			vmem_altmap_free(altmap, nr_pages);
		} else if (PageReserved(page)) {
			/* allocated from bootmem */
			if (page_size < PAGE_SIZE) {
				/*
				 * this shouldn't happen, but if it is
				 * the case, leave the memory there
				 */
				WARN_ON_ONCE(1);
			} else {
				while (nr_pages--)
					free_reserved_page(page++);
			}
		} else {
			free_pages((unsigned long)(__va(addr)), page_order);
		}

		vmemmap_remove_mapping(start, page_size);
	}
}
Example #5
0
void TestCommitDecommit(RPageMove& pagemove, RChunk& aChunk)
	{
	test.Printf(_L("Attempt to move a page while it is being committed and decommited\n"));
	RThread thread;
	TRequestStatus s;
	test_KErrNone(thread.Create(_L("CommitDecommit"), &CommitDecommit, KDefaultStackSize, NULL, (TAny*)&aChunk));
	thread.Logon(s);
	thread.SetPriority(EPriorityMore);
	thread.Resume();

	TUint8* firstpage=(TUint8*)_ALIGN_DOWN((TLinAddr)aChunk.Base(), PageSize);
	for (TInt i=0; i < Repitions; i++)
		{
		TInt r = pagemove.TryMovingUserPage(firstpage, ETrue);
		// Allow all valid return codes as we are only testing that this doesn't 
		// crash the kernel and the page could be commited, paged out or decommited
		// at any one time.
		test_Value(r, r <= KErrNone);
		}

	thread.Kill(KErrNone);
	User::WaitForRequest(s);
	test_Equal(EExitKill,thread.ExitType());
	test_KErrNone(thread.ExitReason());
	thread.Close();
	}
Example #6
0
void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
{
    pte_t *pte;
    pte_t *start_pte;
    unsigned long flags;

    addr = _ALIGN_DOWN(addr, PMD_SIZE);
    /* Note: Normally, we should only ever use a batch within a
     * PTE locked section. This violates the rule, but will work
     * since we don't actually modify the PTEs, we just flush the
     * hash while leaving the PTEs intact (including their reference
     * to being hashed). This is not the most performance oriented
     * way to do things but is fine for our needs here.
     */
    local_irq_save(flags);
    arch_enter_lazy_mmu_mode();
    start_pte = pte_offset_map(pmd, addr);
    for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
        unsigned long pteval = pte_val(*pte);
        if (pteval & _PAGE_HASHPTE)
            hpte_need_flush(mm, addr, pte, pteval, 0);
        addr += PAGE_SIZE;
    }
    arch_leave_lazy_mmu_mode();
    local_irq_restore(flags);
}
Example #7
0
static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
{
	int result;
	u64 muid;

	r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);

	DBG("%s:%d requested  %lxh\n", __func__, __LINE__, size);
	DBG("%s:%d actual     %llxh\n", __func__, __LINE__, r->size);
	DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
		size - r->size, (size - r->size) / 1024 / 1024);

	if (r->size == 0) {
		DBG("%s:%d: size == 0\n", __func__, __LINE__);
		result = -1;
		goto zero_region;
	}

	result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
		ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);

	if (result || r->base < map.rm.size) {
		DBG("%s:%d: lv1_allocate_memory failed: %s\n",
			__func__, __LINE__, ps3_result(result));
		goto zero_region;
	}

	r->destroy = 1;
	r->offset = r->base - map.rm.size;
	return result;

zero_region:
	r->size = r->base = r->offset = 0;
	return result;
}
Example #8
0
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
{
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/* Note: Normally, we should only ever use a batch within a
	 * PTE locked section. This violates the rule, but will work
	 * since we don't actually modify the PTEs, we just flush the
	 * hash while leaving the PTEs intact (including their reference
	 * to being hashed). This is not the most performance oriented
	 * way to do things but is fine for our needs here.
	 */
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_linux_pte(mm->pgd, start);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (!(pte & _PAGE_HASHPTE))
			continue;
		hpte_need_flush(mm, start, ptep, pte, 0);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);
}
Example #9
0
unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
				    unsigned long max_addr)
{
	long i, j;
	unsigned long base = 0;

	BUG_ON(0 == size);

#ifdef CONFIG_PPC32
	/* On 32-bit, make sure we allocate lowmem */
	if (max_addr == LMB_ALLOC_ANYWHERE)
		max_addr = __max_low_memory;
#endif
	for (i = lmb.memory.cnt-1; i >= 0; i--) {
		unsigned long lmbbase = lmb.memory.region[i].base;
		unsigned long lmbsize = lmb.memory.region[i].size;

		if (max_addr == LMB_ALLOC_ANYWHERE)
			base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
		else if (lmbbase < max_addr) {
			base = min(lmbbase + lmbsize, max_addr);
			base = _ALIGN_DOWN(base - size, align);
		} else
			continue;

		while ((lmbbase <= base) &&
		       ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
			base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
					   align);

		if ((base != 0) && (lmbbase <= base))
			break;
	}

	if (i < 0)
		return 0;

	lmb_add_region(&lmb.reserved, base, size);

	return base;
}
void __init wii_memory_fixups(void)
{
	struct memblock_region *p = memblock.memory.regions;

	/*
	 * This is part of a workaround to allow the use of two
	 * discontinuous RAM ranges on the Wii, even if this is
	 * currently unsupported on 32-bit PowerPC Linux.
	 *
	 * We coalesce the two memory ranges of the Wii into a
	 * single range, then create a reservation for the "hole"
	 * between both ranges.
	 */

	BUG_ON(memblock.memory.cnt != 2);
	BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));

	p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
	p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE);

	wii_hole_start = p[0].base + p[0].size;
	wii_hole_size = p[1].base - wii_hole_start;

	pr_info("MEM1: <%08llx %08llx>\n",
		(unsigned long long) p[0].base, (unsigned long long) p[0].size);
	pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
	pr_info("MEM2: <%08llx %08llx>\n",
		(unsigned long long) p[1].base, (unsigned long long) p[1].size);

	p[0].size += wii_hole_size + p[1].size;

	memblock.memory.cnt = 1;
	memblock_analyze();

	/* reserve the hole */
	memblock_reserve(wii_hole_start, wii_hole_size);

	/* allow ioremapping the address space in the hole */
	__allow_ioremap_reserved = 1;
}
Example #11
0
u64
lmb_alloc_base(u64 size, u64 align, u64 max_addr)
{
	long i, j;
	u64 base = 0;
	u64 offset = reloc_offset();
	struct lmb *_lmb = PTRRELOC(&lmb);
	struct lmb_region *_mem = &(_lmb->memory);
	struct lmb_region *_rsv = &(_lmb->reserved);

	for (i=_mem->cnt-1; i >= 0; i--) {
		u64 lmbbase = _mem->region[i].base;
		u64 lmbsize = _mem->region[i].size;

		if ( max_addr == LMB_ALLOC_ANYWHERE )
			base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
		else if ( lmbbase < max_addr )
			base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align);
		else
			continue;

		while ( (lmbbase <= base) &&
			((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) {
			base = _ALIGN_DOWN(_rsv->region[j].base-size, align);
		}

		if ( (base != 0) && (lmbbase <= base) )
			break;
	}

	if ( i < 0 )
		return 0;

	lmb_add_region(_rsv, base, size);

	return base;
}
Example #12
0
void add_usable_mem_rgns(unsigned long long base, unsigned long long size)
{
	unsigned int i;
	unsigned long long end = base + size;
	unsigned long long ustart, uend;

	base = _ALIGN_DOWN(base, getpagesize());
	end = _ALIGN_UP(end, getpagesize());

	for (i=0; i < usablemem_rgns.size; i++) {
		ustart = usablemem_rgns.ranges[i].start;
		uend = usablemem_rgns.ranges[i].end;
		if (base < uend && end > ustart) {
			if ((base >= ustart) && (end <= uend))
				return;
			if (base < ustart && end > uend) {
				usablemem_rgns.ranges[i].start = base;
				usablemem_rgns.ranges[i].end = end;
#ifdef DEBUG
				fprintf(stderr, "usable memory rgn %u: new base:%llx new size:%llx\n",
					i, base, size);
#endif
				return;
			} else if (base < ustart) {
				usablemem_rgns.ranges[i].start = base;
#ifdef DEBUG
				fprintf(stderr, "usable memory rgn %u: new base:%llx new size:%llx",
					i, base, usablemem_rgns.ranges[i].end - base);
#endif
				return;
			} else if (end > uend){
				usablemem_rgns.ranges[i].end = end;
#ifdef DEBUG
				fprintf(stderr, "usable memory rgn %u: new end:%llx, new size:%llx",
					i, end, end - usablemem_rgns.ranges[i].start);
#endif
				return;
			}
		}
	}
	usablemem_rgns.ranges[usablemem_rgns.size].start = base;
	usablemem_rgns.ranges[usablemem_rgns.size++].end = end;

	dbgprintf("usable memory rgns size:%u base:%llx size:%llx\n",
		usablemem_rgns.size, base, size);
}
Example #13
0
void __ref vmemmap_free(unsigned long start, unsigned long end)
{
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;

	start = _ALIGN_DOWN(start, page_size);

	pr_debug("vmemmap_free %lx...%lx\n", start, end);

	for (; start < end; start += page_size) {
		unsigned long addr;

		/*
		 * the section has already be marked as invalid, so
		 * vmemmap_populated() true means some other sections still
		 * in this page, so skip it.
		 */
		if (vmemmap_populated(start, page_size))
			continue;

		addr = vmemmap_list_free(start);
		if (addr) {
			struct page *page = pfn_to_page(addr >> PAGE_SHIFT);

			if (PageReserved(page)) {
				/* allocated from bootmem */
				if (page_size < PAGE_SIZE) {
					/*
					 * this shouldn't happen, but if it is
					 * the case, leave the memory there
					 */
					WARN_ON_ONCE(1);
				} else {
					unsigned int nr_pages =
						1 << get_order(page_size);
					while (nr_pages--)
						free_reserved_page(page++);
				}
			} else
				free_pages((unsigned long)(__va(addr)),
							get_order(page_size));

			vmemmap_remove_mapping(start, page_size);
		}
	}
}
Example #14
0
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
		struct vmem_altmap *altmap)
{
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;

	/* Align to the page size of the linear mapping. */
	start = _ALIGN_DOWN(start, page_size);

	pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);

	for (; start < end; start += page_size) {
		void *p = NULL;
		int rc;

		if (vmemmap_populated(start, page_size))
			continue;

		/*
		 * Allocate from the altmap first if we have one. This may
		 * fail due to alignment issues when using 16MB hugepages, so
		 * fall back to system memory if the altmap allocation fail.
		 */
		if (altmap)
			p = altmap_alloc_block_buf(page_size, altmap);
		if (!p)
			p = vmemmap_alloc_block_buf(page_size, node);
		if (!p)
			return -ENOMEM;

		vmemmap_list_populate(__pa(p), start, node);

		pr_debug("      * %016lx..%016lx allocated at %p\n",
			 start, start + page_size, p);

		rc = vmemmap_create_mapping(start, page_size, __pa(p));
		if (rc < 0) {
			pr_warn("%s: Unable to create vmemmap mapping: %d\n",
				__func__, rc);
			return -EFAULT;
		}
	}

	return 0;
}
Example #15
0
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;

	/* Align to the page size of the linear mapping. */
	start = _ALIGN_DOWN(start, page_size);

	pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);

	for (; start < end; start += page_size) {
		struct vmem_altmap *altmap;
		void *p;
		int rc;

		if (vmemmap_populated(start, page_size))
			continue;

		/* altmap lookups only work at section boundaries */
		altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start));

		p =  __vmemmap_alloc_block_buf(page_size, node, altmap);
		if (!p)
			return -ENOMEM;

		vmemmap_list_populate(__pa(p), start, node);

		pr_debug("      * %016lx..%016lx allocated at %p\n",
			 start, start + page_size, p);

		rc = vmemmap_create_mapping(start, page_size, __pa(p));
		if (rc < 0) {
			pr_warning(
				"vmemmap_populate: Unable to create vmemmap mapping: %d\n",
				rc);
			return -EFAULT;
		}
	}

	return 0;
}
Example #16
0
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;

	/* Align to the page size of the linear mapping. */
	start = _ALIGN_DOWN(start, page_size);

	pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);

	for (; start < end; start += page_size) {
		void *p;
		int rc;

		if (vmemmap_populated(start, page_size))
			continue;

		p = vmemmap_alloc_block(page_size, node);
		if (!p)
			return -ENOMEM;

		vmemmap_list_populate(__pa(p), start, node);

		pr_debug("      * %016lx..%016lx allocated at %p\n",
			 start, start + page_size, p);

		rc = vmemmap_create_mapping(start, page_size, __pa(p));
		if (rc < 0) {
			pr_warning(
				"vmemmap_populate: Unable to create vmemmap mapping: %d\n",
				rc);
			return -EFAULT;
		}
	}

	return 0;
}
Example #17
0
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>

#include <asm/irq.h>

#include <stmdisplay.h>
#include <linux/stm/stmcoredisplay.h>

#include <soc/sti5206/sti5206reg.h>
#include <soc/sti5206/sti5206device.h>

static const unsigned long whitelist[] = {
    STi5206_REGISTER_BASE + STi5206_DENC_BASE,
    _ALIGN_DOWN(STi5206_REGISTER_BASE + STi5206_BLITTER_BASE, PAGE_SIZE),
};


static struct stmcore_display_pipeline_data platform_data[] = {
  {
    .owner                    = THIS_MODULE,
    .name                     = "STi5206-main",
    .device                   = 0,
    .vtg_irq                  = evt2irq(0x1540),
    .blitter_irq              = evt2irq(0x15C0),
    .blitter_irq_kernel       = evt2irq(0x15E0),

/* HDMI is connected via External HDMI TX
 * which is feed by SII9024 DVO */
    .hdmi_irq                 = -1,
Example #18
0
static int __init parse_numa_properties(void)
{
	struct device_node *cpu = NULL;
	struct device_node *memory = NULL;
	int depth;
	int max_domain = 0;
	long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT;
	unsigned long i;

	if (strstr(saved_command_line, "numa=off")) {
		printk(KERN_WARNING "NUMA disabled by user\n");
		return -1;
	}

	numa_memory_lookup_table =
		(char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));

	for (i = 0; i < entries ; i++)
		numa_memory_lookup_table[i] = ARRAY_INITIALISER;

	depth = find_min_common_depth();

	printk(KERN_INFO "NUMA associativity depth for CPU/Memory: %d\n", depth);
	if (depth < 0)
		return depth;

	for_each_cpu(i) {
		int numa_domain;

		cpu = find_cpu_node(i);

		if (cpu) {
			numa_domain = of_node_numa_domain(cpu, depth);
			of_node_put(cpu);

			if (numa_domain >= MAX_NUMNODES) {
				/*
			 	 * POWER4 LPAR uses 0xffff as invalid node,
				 * dont warn in this case.
			 	 */
				if (numa_domain != 0xffff)
					printk(KERN_ERR "WARNING: cpu %ld "
					       "maps to invalid NUMA node %d\n",
					       i, numa_domain);
				numa_domain = 0;
			}
		} else {
			printk(KERN_ERR "WARNING: no NUMA information for "
			       "cpu %ld\n", i);
			numa_domain = 0;
		}

		node_set_online(numa_domain);

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		map_cpu_to_node(i, numa_domain);
	}

	memory = NULL;
	while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
		unsigned long start;
		unsigned long size;
		int numa_domain;
		int ranges;
		unsigned int *memcell_buf;
		unsigned int len;

		memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
		if (!memcell_buf || len <= 0)
			continue;

		ranges = memory->n_addrs;
new_range:
		/* these are order-sensitive, and modify the buffer pointer */
		start = read_cell_ul(memory, &memcell_buf);
		size = read_cell_ul(memory, &memcell_buf);

		start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
		size = _ALIGN_UP(size, MEMORY_INCREMENT);

		numa_domain = of_node_numa_domain(memory, depth);

		if (numa_domain >= MAX_NUMNODES) {
			if (numa_domain != 0xffff)
				printk(KERN_ERR "WARNING: memory at %lx maps "
				       "to invalid NUMA node %d\n", start,
				       numa_domain);
			numa_domain = 0;
		}

		node_set_online(numa_domain);

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		/* 
		 * For backwards compatibility, OF splits the first node
		 * into two regions (the first being 0-4GB). Check for
		 * this simple case and complain if there is a gap in
		 * memory
		 */
		if (node_data[numa_domain].node_spanned_pages) {
			unsigned long shouldstart =
				node_data[numa_domain].node_start_pfn + 
				node_data[numa_domain].node_spanned_pages;
			if (shouldstart != (start / PAGE_SIZE)) {
				printk(KERN_ERR "Hole in node, disabling "
						"region start %lx length %lx\n",
						start, size);
				continue;
			}
			node_data[numa_domain].node_spanned_pages +=
				size / PAGE_SIZE;
		} else {
			node_data[numa_domain].node_start_pfn =
				start / PAGE_SIZE;
			node_data[numa_domain].node_spanned_pages =
				size / PAGE_SIZE;
		}

		for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
			numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
				numa_domain;

		dbg("memory region %lx to %lx maps to domain %d\n",
		    start, start+size, numa_domain);

		ranges--;
		if (ranges)
			goto new_range;
	}

	numnodes = max_domain + 1;

	return 0;
}
Example #19
0
static int __init parse_numa_properties(void)
{
	struct device_node *cpu;
	struct device_node *memory;
	int *cpu_associativity;
	int *memory_associativity;
	int depth;
	int max_domain = 0;

	cpu = find_type_devices("cpu");
	if (!cpu)
		return -1;

	memory = find_type_devices("memory");
	if (!memory)
		return -1;

	cpu_associativity = (int *)get_property(cpu, "ibm,associativity", NULL);
	if (!cpu_associativity)
		return -1;

	memory_associativity = (int *)get_property(memory, "ibm,associativity",
						   NULL);
	if (!memory_associativity)
		return -1;

	/* find common depth */
	if (cpu_associativity[0] < memory_associativity[0])
		depth = cpu_associativity[0];
	else
		depth = memory_associativity[0];

	for (cpu = find_type_devices("cpu"); cpu; cpu = cpu->next) {
		int *tmp;
		int cpu_nr, numa_domain;

		tmp = (int *)get_property(cpu, "reg", NULL);
		if (!tmp)
			continue;
		cpu_nr = *tmp;

		tmp = (int *)get_property(cpu, "ibm,associativity",
					  NULL);
		if (!tmp)
			continue;
		numa_domain = tmp[depth];

		/* FIXME */
		if (numa_domain == 0xffff) {
			dbg("cpu %d has no numa doman\n", cpu_nr);
			numa_domain = 0;
		}

		if (numa_domain >= MAX_NUMNODES)
			BUG();

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		map_cpu_to_node(cpu_nr, numa_domain);
	}

	for (memory = find_type_devices("memory"); memory;
	     memory = memory->next) {
		int *tmp1, *tmp2;
		unsigned long i;
		unsigned long start = 0;
		unsigned long size = 0;
		int numa_domain;
		int ranges;

		tmp1 = (int *)get_property(memory, "reg", NULL);
		if (!tmp1)
			continue;

		ranges = memory->n_addrs;
new_range:

		i = prom_n_size_cells(memory);
		while (i--) {
			start = (start << 32) | *tmp1;
			tmp1++;
		}

		i = prom_n_size_cells(memory);
		while (i--) {
			size = (size << 32) | *tmp1;
			tmp1++;
		}

		start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
		size = _ALIGN_UP(size, MEMORY_INCREMENT);

		if ((start + size) > MAX_MEMORY)
			BUG();

		tmp2 = (int *)get_property(memory, "ibm,associativity",
					   NULL);
		if (!tmp2)
			continue;
		numa_domain = tmp2[depth];

		/* FIXME */
		if (numa_domain == 0xffff) {
			dbg("memory has no numa doman\n");
			numa_domain = 0;
		}

		if (numa_domain >= MAX_NUMNODES)
			BUG();

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		/* 
		 * For backwards compatibility, OF splits the first node
		 * into two regions (the first being 0-4GB). Check for
		 * this simple case and complain if there is a gap in
		 * memory
		 */
		if (node_data[numa_domain].node_spanned_pages) {
			unsigned long shouldstart =
				node_data[numa_domain].node_start_pfn + 
				node_data[numa_domain].node_spanned_pages;
			if (shouldstart != (start / PAGE_SIZE)) {
				printk(KERN_ERR "Hole in node, disabling "
						"region start %lx length %lx\n",
						start, size);
				continue;
			}
			node_data[numa_domain].node_spanned_pages += size / PAGE_SIZE;
		} else {
			node_data[numa_domain].node_start_pfn =
				start / PAGE_SIZE;
			node_data[numa_domain].node_spanned_pages = size / PAGE_SIZE;
		}

		for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
			numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
				numa_domain;

		dbg("memory region %lx to %lx maps to domain %d\n",
		    start, start+size, numa_domain);

		ranges--;
		if (ranges)
			goto new_range;
	}

	numnodes = max_domain + 1;

	return 0;
}
Example #20
0
void TestMovingCodeChunk(RPageMove& pagemove, RChunk aChunk, TBool aPagedData)
	{
	TUint8* p = aChunk.Base();

	TUint8* firstpage = (TUint8*)_ALIGN_DOWN((TLinAddr)p, PageSize);
	RThread thread;
	thread.Open(RThread().Id());
	SPinThreadArgs threadArgs;
	threadArgs.iLinAddr = (TLinAddr)p;
	threadArgs.iParentThread = thread;

	test.Printf(_L("Attempt to move pages while they are being executed and modified\n"));
	ThreadDie = EFalse;
	RThread modCodeThread;
	TRequestStatus s;
	test_KErrNone(modCodeThread.Create(_L("User Data thread"), &ModifyCodeThread, KDefaultStackSize, NULL, &threadArgs));
	modCodeThread.Logon(s);
	TRequestStatus threadInitialised;
	modCodeThread.Rendezvous(threadInitialised);
	modCodeThread.Resume();

	_T_PRINTF(_L("wait for child\n"));
	User::WaitForRequest(threadInitialised);
	test_KErrNone(threadInitialised.Int());

	_T_PRINTF(_L("Move code chunk page repeatedly\n"));
	TBool success=EFalse;
	*(volatile TUint8*)p = *p; // Ensure the page of the first entry is paged in for the first move.
	for (TInt i=0; i < Repitions; i++)
		{
		TInt r = pagemove.TryMovingUserPage(firstpage, ETrue);
		if (i == 0)
			{// If this is the first run allow the modifying thread to run now 
			// we've done one move.
			_T_PRINTF(_L("signal to child\n"));
			RThread::Rendezvous(KErrNone);
			}
		switch (r)
			{
			case KErrInUse:
				break;
			case KErrArgument:
				// The page was paged out, this should only happen for paged data.
				test(aPagedData);
				break;
			default:
				test_KErrNone(r);
				success=ETrue;
				break;
			}
		}
	test(success);

	ThreadDie = ETrue;
	User::WaitForRequest(s);
	test_Equal(EExitKill,modCodeThread.ExitType());
	test_KErrNone(modCodeThread.ExitReason());
	modCodeThread.Close();

	thread.Close();
	}
Example #21
0
int cpm_console_init(void *devp, struct serial_console_data *scdp)
{
	void *vreg[2];
	u32 reg[2];
	int is_smc = 0, is_cpm2 = 0;
	void *parent, *muram;
	void *muram_addr;
	unsigned long muram_offset, muram_size;

	if (dt_is_compatible(devp, "fsl,cpm1-smc-uart")) {
		is_smc = 1;
	} else if (dt_is_compatible(devp, "fsl,cpm2-scc-uart")) {
		is_cpm2 = 1;
	} else if (dt_is_compatible(devp, "fsl,cpm2-smc-uart")) {
		is_cpm2 = 1;
		is_smc = 1;
	}

	if (is_smc) {
		enable_port = smc_enable_port;
		disable_port = smc_disable_port;
	} else {
		enable_port = scc_enable_port;
		disable_port = scc_disable_port;
	}

	if (is_cpm2)
		do_cmd = cpm2_cmd;
	else
		do_cmd = cpm1_cmd;

	if (getprop(devp, "fsl,cpm-command", &cpm_cmd, 4) < 4)
		return -1;

	if (dt_get_virtual_reg(devp, vreg, 2) < 2)
		return -1;

	if (is_smc)
		smc = vreg[0];
	else
		scc = vreg[0];

	param = vreg[1];

	parent = get_parent(devp);
	if (!parent)
		return -1;

	if (dt_get_virtual_reg(parent, &cpcr, 1) < 1)
		return -1;

	muram = finddevice("/soc/cpm/muram/data");
	if (!muram)
		return -1;

	/* For bootwrapper-compatible device trees, we assume that the first
	 * entry has at least 128 bytes, and that #address-cells/#data-cells
	 * is one for both parent and child.
	 */

	if (dt_get_virtual_reg(muram, &muram_addr, 1) < 1)
		return -1;

	if (getprop(muram, "reg", reg, 8) < 8)
		return -1;

	muram_offset = reg[0];
	muram_size = reg[1];

	/* Store the buffer descriptors at the end of the first muram chunk.
	 * For SMC ports on CPM2-based platforms, relocate the parameter RAM
	 * just before the buffer descriptors.
	 */

	cbd_offset = muram_offset + muram_size - 2 * sizeof(struct cpm_bd);

	if (is_cpm2 && is_smc) {
		u16 *smc_base = (u16 *)param;
		u16 pram_offset;

		pram_offset = cbd_offset - 64;
		pram_offset = _ALIGN_DOWN(pram_offset, 64);

		disable_port();
		out_be16(smc_base, pram_offset);
		param = muram_addr - muram_offset + pram_offset;
	}

	cbd_addr = muram_addr - muram_offset + cbd_offset;

	scdp->open = cpm_serial_open;
	scdp->putc = cpm_serial_putc;
	scdp->getc = cpm_serial_getc;
	scdp->tstc = cpm_serial_tstc;

	return 0;
}
Example #22
0
void TestMovingRealtime(RPageMove& aPagemove, TUint8* aArray, TInt aSize, TTestFunction aFunc, TBool aCode, TBool aPaged=EFalse)
	{
	TThreadFunction threadFunc;
	TLinAddr pageAddr;
	RThread thread;
	TUint8* firstpage;
	thread.Open(RThread().Id());
	SPinThreadArgs threadArgs;
	threadArgs.iParentThread = thread;
	if (aCode)
		{
		pageAddr = (TLinAddr)aFunc;
		firstpage = (TUint8*)_ALIGN_DOWN(pageAddr, PageSize);
		threadArgs.iLinAddr = (TLinAddr)firstpage;
		threadFunc = RunCodeThread;
		threadArgs.iTestFunc = aFunc;
		test_Equal(KArbitraryNumber, aFunc());
		}
	else
		{
		pageAddr = (TLinAddr)aArray;
		firstpage = (TUint8*)_ALIGN_DOWN(pageAddr, PageSize);
		threadArgs.iLinAddr = (TLinAddr)aArray;
		threadFunc = ReadWriteByte;
		_T_PRINTF(_L("Fill the array with some data\n"));
		for (TInt i=0; i<aSize; i++) aArray[i] = i*i;
		}

	RMemoryTestLdd ldd;

	TMovingPinStage endStage = EMovingPinStages;
	if (gPinningSupported)
		{
		test_KErrNone(ldd.Open());
		test_KErrNone(ldd.CreateVirtualPinObject());
		test_KErrNone(ldd.CreatePhysicalPinObject());
		}
	else
		endStage = EVirtualPinning;

	for (TUint state = ENoPinning; state < (TUint)endStage; state++)
		{
		switch (state)
			{
			case ENoPinning:
				test.Printf(_L("Attempt to move pages while they are being accessed\n"));
				break;
			case EVirtualPinning:
				test.Printf(_L("Attempt to move pages while they are virtually pinned\n"));
				test_KErrNone(ldd.PinVirtualMemory((TLinAddr)firstpage, PageSize));

				break;
			case EPhysicalPinning:
				test.Printf(_L("Attempt to move pages while they are physically pinned\n"));
				test_KErrNone(ldd.PinPhysicalMemoryRO((TLinAddr)firstpage, PageSize));
				break;
			}
		for (	TUint realtimeState = User::ERealtimeStateOff; 
				realtimeState <= User::ERealtimeStateWarn; 
				realtimeState++)
			{
			ThreadDie = EFalse;
			RThread accessThread;
			TRequestStatus s;
			threadArgs.iRealtimeState = (User::TRealtimeState)realtimeState;
			test_KErrNone(accessThread.Create(_L("Realtime Thread"), threadFunc, KDefaultStackSize, NULL, &threadArgs));
			accessThread.Logon(s);
			TRequestStatus threadInitialised;
			accessThread.Rendezvous(threadInitialised);
			accessThread.Resume();

			_T_PRINTF(_L("wait for child\n"));
			User::WaitForRequest(threadInitialised);
			test_KErrNone(threadInitialised.Int());

			_T_PRINTF(_L("Move page repeatedly\n"));
			TBool success=EFalse, pagedOut=EFalse;
			TUint inuse=0;
			if (aCode)
				{
				test_Equal(KArbitraryNumber, aFunc());
				}
			else
				{
				*(volatile TUint8*)aArray = *aArray;
				}

			for (TInt i=0; i < Repitions; i++)
				{
				TInt r = aPagemove.TryMovingUserPage(firstpage, ETrue);
				if (i == 0)
					{
					_T_PRINTF(_L("signal to child\n"));
					RThread::Rendezvous(KErrNone);
					}
				switch (r)
					{
					case KErrInUse:
						inuse++;
						break;
					case KErrArgument:
						// The page was paged out, this should only happen for paged code.
						test(aPaged);
						pagedOut = ETrue;
						break;
					default:
						test_KErrNone(r);
						success=ETrue;
						break;
					}
				}
			ThreadDie = ETrue;
			User::WaitForRequest(s);
			test.Printf(_L("inuse %d\n"),inuse);
			switch (state)
				{
				case ENoPinning :
					test(success);
					if (EExitPanic == accessThread.ExitType())
						{
						test(accessThread.ExitCategory()==_L("KERN-EXEC"));
						test_Equal(EIllegalFunctionForRealtimeThread, accessThread.ExitReason());
						test(aPaged && realtimeState == User::ERealtimeStateOn);
						}
					else
						{
						test_Equal(EExitKill,accessThread.ExitType());
						test_KErrNone(accessThread.ExitReason());
						}
					// Ensure the page is paged in before we attempt to move it again with a different realtime state.
					if (aCode)
						{
						test_Equal(KArbitraryNumber, aFunc());
						}
					else
						{
						*(volatile TUint8*)aArray = *aArray;
						}
					break;				
				case EVirtualPinning :
					test(!aCode || !inuse);
					test(success);
					test(!pagedOut);
					test_Equal(EExitKill,accessThread.ExitType());
					test_KErrNone(accessThread.ExitReason());
					break;
				case EPhysicalPinning :
					test(!success);
					break;
				}
			accessThread.Close();
			}
		if (gPinningSupported)
			{
			// Unpin any pinned memory.
			test_KErrNone(ldd.UnpinVirtualMemory());
			test_KErrNone(ldd.UnpinPhysicalMemory());
			}

		_T_PRINTF(_L("Validate page data\n"));
		if (aCode)
			{
			test_Equal(KArbitraryNumber, aFunc());
			}
		else
			{
			for (TInt i=0; i<aSize; i++)
				test_Equal((TUint8)(i*i), aArray[i]);
			}
			
		}
	if (gPinningSupported)
		{
		test_KErrNone(ldd.DestroyVirtualPinObject());
		test_KErrNone(ldd.DestroyPhysicalPinObject());
		ldd.Close();
		}
	thread.Close();
	}
Example #23
0
void TestMovingCode(RPageMove& aPagemove, TTestFunction aFunc, TBool aPaged=EFalse)
	{
	TUint8* firstpage = (TUint8*)_ALIGN_DOWN((TLinAddr)aFunc, PageSize);
	RThread thread;
	thread.Open(RThread().Id());
	SPinThreadArgs threadArgs;
	threadArgs.iLinAddr = (TLinAddr)firstpage;
	threadArgs.iTestFunc = aFunc;
	threadArgs.iParentThread = thread;
	threadArgs.iRealtimeState = User::ERealtimeStateOff;

	TMovingPinStage endStage = EMovingPinStages;
	if (!gPinningSupported)
		endStage = EVirtualPinning;

	for (TUint state = ENoPinning; state < (TUint)endStage; state++)
		{
		TThreadFunction threadFunc = NULL;
		switch (state)
			{
			case ENoPinning:
				test.Printf(_L("Attempt to move pages while they are being executed\n"));
				threadFunc = &RunCodeThread;
				test_Equal(KArbitraryNumber, aFunc()); // Ensure the page is paged in.
				break;
			case EVirtualPinning:
				test.Printf(_L("Attempt to move pages while they are being virtually pinned\n"));
				threadFunc = &VirtualPinPage;
				break;
			case EPhysicalPinning:
				test.Printf(_L("Attempt to move pages while they are being physically pinned\n"));
				threadFunc = &PhysicalPinPage;
				break;
			}
		ThreadDie = EFalse;
		TUint numThreads = (NumberOfCpus > 1) ? NumberOfCpus - 1 : 1;
		RThread* codeRunThread = new RThread[numThreads];
		TRequestStatus* s = new TRequestStatus[numThreads];
		StartThreads(numThreads, codeRunThread, s, threadFunc, threadArgs);

		_T_PRINTF(_L("Move first code page repeatedly\n"));
		test_Equal(KArbitraryNumber, aFunc());	
		TBool inuse=EFalse, success=EFalse;
		for (TInt i=0; i < Repitions; i++)
			{
			TInt r = aPagemove.TryMovingUserPage(firstpage, ETrue);
			if (i == 0)
				{// If this is the first run allow the pinning threads to 
				// unpin the memory now that we've definitely done at least 
				// one page move with the page pinned.
				_T_PRINTF(_L("signal to child\n"));
				RThread::Rendezvous(KErrNone);
				}
			switch (r)
				{
				case KErrInUse:
					inuse=ETrue;
					break;
				case KErrArgument:
					// The page was paged out, this should only happen for paged code.
					test(aPaged);
					break;
				default:
					test_KErrNone(r);
					success=ETrue;
					break;
				}
			}
		// Physical pinning or adding a new pinning while a page is being moved
		// should prevent code pages being moved.
		switch (state)
		{
			case ENoPinning :			
				test(!inuse || aPaged);	// Stealing may get KErrInUse but this should only happen for paged code.
			case EVirtualPinning :
				test(success);
				break;
			case EPhysicalPinning :
				break;
		}

		ThreadDie = ETrue;
		EndThreads(numThreads, codeRunThread, s);

		_T_PRINTF(_L("Validate page data\n"));
		test_Equal(KArbitraryNumber, aFunc());		
		}
	thread.Close();
	}
Example #24
0
void TestUserData(RPageMove& pagemove, TUint8* array, TInt size, TBool aPagedData=EFalse)
	{
	_T_PRINTF(_L("Fill the array with some data\n"));
	for (TInt i=0; i<size; i++) array[i] = i*i;

	TUint8* firstpage = (TUint8*)_ALIGN_DOWN((TLinAddr)array, PageSize);
	RThread thread;
	thread.Open(RThread().Id());
	SPinThreadArgs threadArgs;
	threadArgs.iLinAddr = (TLinAddr)array;
	threadArgs.iParentThread = thread;
	threadArgs.iRealtimeState = User::ERealtimeStateOff;

	TMovingPinStage endStage = EMovingPinStages;
	if (!gPinningSupported)
		endStage = EVirtualPinning;

	for (TUint state = ENoPinning; state < (TUint)endStage; state++)
		{
		TThreadFunction threadFunc = NULL;
		switch (state)
			{
			case ENoPinning:
				test.Printf(_L("Attempt to move pages while they are being modified\n"));
				threadFunc = &ReadWriteByte;
				break;
			case EVirtualPinning:
				test.Printf(_L("Attempt to move pages while they are being virtually pinned\n"));
				threadFunc = &VirtualPinPage;
				break;
			case EPhysicalPinning:
				test.Printf(_L("Attempt to move pages while they are being physically pinned\n"));
				threadFunc = &PhysicalPinPage;
				break;
			}
		ThreadDie = EFalse;
		TUint numThreads = (NumberOfCpus > 1) ? NumberOfCpus - 1 : 1;
		RThread* userDataThread = new RThread[numThreads];
		TRequestStatus* s = new TRequestStatus[numThreads];
		StartThreads(numThreads, userDataThread, s, threadFunc, threadArgs);

		_T_PRINTF(_L("Move first array page repeatedly\n"));
		TBool success=EFalse;
		TUint inuse = 0;
		*(volatile TUint8*)array = *array;	// Ensure the page of the first entry is paged in for the first move.
		for (TInt i=0; i < Repitions*2; i++)
			{
			TInt r = pagemove.TryMovingUserPage(firstpage, ETrue);
			if (i == 0)
				{// If this is the first run allow the pinning threads to 
				// unpin the memory now that we've definitely done at least 
				// one page move with the page pinned.
				_T_PRINTF(_L("signal to child\n"));
				RThread::Rendezvous(KErrNone);
				}
			switch (r)
				{
				case KErrInUse:
					inuse++;
					break;
				case KErrArgument:
					// The page was paged out, this should only happen for paged data.
					test(aPagedData);
					break;
				default:
					test_KErrNone(r);
					success=ETrue;
					break;
				}
			}
		// Can't guarantee that for paged data the page and its page tables will
		// be paged in, in most cases it will be at least once.
		// Pinning the page should always return KErrInUse except for virtually 
		// pinned non-paged memory as virtual pinning is a nop for unpaged memory.
		test.Printf(_L("inuse test removed; inuse %d\n"),inuse);
		//test(inuse || aPagedData || state == EVirtualPinning);
		test(success || state == EPhysicalPinning);

		ThreadDie = ETrue;
		EndThreads(numThreads, userDataThread, s);

		_T_PRINTF(_L("Validate page data\n"));
		for (TInt i=0; i<size; i++)
			test_Equal((TUint8)(i*i), array[i]);
		}
	thread.Close();
	}
Example #25
0
int zImage_arm_load(int argc, char **argv, const char *buf, off_t len,
	struct kexec_info *info)
{
	unsigned long base;
	unsigned int atag_offset = 0x1000; /* 4k offset from memory start */
	unsigned int offset = 0x8000;      /* 32k offset from memory start */
	const char *command_line;
	char *modified_cmdline = NULL;
	off_t command_line_len;
	const char *ramdisk;
	char *ramdisk_buf;
	int opt;
	int use_atags;
	char *dtb_buf;
	off_t dtb_length;
	char *dtb_file;
	off_t dtb_offset;
	char *end;

	/* See options.h -- add any more there, too. */
	static const struct option options[] = {
		KEXEC_ARCH_OPTIONS
		{ "command-line",	1, 0, OPT_APPEND },
		{ "append",		1, 0, OPT_APPEND },
		{ "initrd",		1, 0, OPT_RAMDISK },
		{ "ramdisk",		1, 0, OPT_RAMDISK },
		{ "dtb",		1, 0, OPT_DTB },
		{ "atags",		0, 0, OPT_ATAGS },
		{ "image-size",		1, 0, OPT_IMAGE_SIZE },
		{ "atags-file",		1, 0, OPT_ATAGS },
		{ 0, 			0, 0, 0 },
	};
	static const char short_options[] = KEXEC_ARCH_OPT_STR "a:r:";

	/*
	 * Parse the command line arguments
	 */
	command_line = 0;
	command_line_len = 0;
	ramdisk = 0;
	ramdisk_buf = 0;
	initrd_size = 0;
	use_atags = 0;
	dtb_file = NULL;
	while((opt = getopt_long(argc, argv, short_options, options, 0)) != -1) {
		switch(opt) {
		default:
			/* Ignore core options */
			if (opt < OPT_ARCH_MAX) {
				break;
			}
		case OPT_APPEND:
			command_line = optarg;
			break;
		case OPT_RAMDISK:
			ramdisk = optarg;
			break;
		case OPT_DTB:
			dtb_file = optarg;
			break;
		case OPT_ATAGS:
			use_atags = 1;
			break;
		case OPT_IMAGE_SIZE:
			kexec_arm_image_size = strtoul(optarg, &end, 0);
			break;
		case OPT_ATAGS_FILE:
			atags_file = optarg;
			break;
		}
	}

	if (use_atags && dtb_file) {
		fprintf(stderr, "You can only use ATAGs if you don't specify a "
		        "dtb file.\n");
		return -1;
	}

	if (command_line) {
		command_line_len = strlen(command_line) + 1;
		if (command_line_len > COMMAND_LINE_SIZE)
			command_line_len = COMMAND_LINE_SIZE;
	}
	if (ramdisk) {
		ramdisk_buf = slurp_file(ramdisk, &initrd_size);
	}

	/*
	 * If we are loading a dump capture kernel, we need to update kernel
	 * command line and also add some additional segments.
	 */
	if (info->kexec_flags & KEXEC_ON_CRASH) {
		uint64_t start, end;

		modified_cmdline = xmalloc(COMMAND_LINE_SIZE);
		if (!modified_cmdline)
			return -1;

		if (command_line) {
			(void) strncpy(modified_cmdline, command_line,
				       COMMAND_LINE_SIZE);
			modified_cmdline[COMMAND_LINE_SIZE - 1] = '\0';
		}

		if (load_crashdump_segments(info, modified_cmdline) < 0) {
			free(modified_cmdline);
			return -1;
		}

		command_line = modified_cmdline;
		command_line_len = strlen(command_line) + 1;

		/*
		 * We put the dump capture kernel at the start of crashkernel
		 * reserved memory.
		 */
		if (parse_iomem_single("Crash kernel\n", &start, &end)) {
			/*
			 * No crash kernel memory reserved. We cannot do more
			 * but just bail out.
			 */
			return -1;
		}
		base = start;
	} else {
		base = locate_hole(info,len+offset,0,0,ULONG_MAX,INT_MAX);
	}

	if (base == ULONG_MAX)
		return -1;

	printf("Kernel segment stats: %lx (%ld)\n", base, len);

	if (kexec_arm_image_size) {
		/* If the image size was passed as command line argument,
		 * use that value for determining the address for initrd,
		 * atags and dtb images. page-align the given length.*/
		initrd_base = base + _ALIGN(kexec_arm_image_size, getpagesize());
	} else {
		/* Otherwise, assume the maximum kernel compression ratio
		 * is 4, and just to be safe, place ramdisk after that */
		initrd_base = base + _ALIGN(len * 4, getpagesize());
	}

	if (use_atags) {
		/*
		 * use ATAGs from /proc/atags
		 */
		if (atag_arm_load(info, base + atag_offset,
		                  command_line, command_line_len,
		                  ramdisk_buf, initrd_size, initrd_base) == -1)
			return -1;
	} else {
		/*
		 * Read a user-specified DTB file.
		 */
		if (dtb_file) {
			dtb_buf = slurp_file(dtb_file, &dtb_length);

			if (fdt_check_header(dtb_buf) != 0) {
				fprintf(stderr, "Invalid FDT buffer.\n");
				return -1;
			}

			if (command_line) {
				/*
				 *  Error should have been reported so
				 *  directly return -1
				 */
				if (setup_dtb_prop(&dtb_buf, &dtb_length, "/chosen",
						"bootargs", command_line,
						strlen(command_line) + 1))
					return -1;
			}
		} else {
			/*
			 * Extract the DTB from /proc/device-tree.
			 */
			create_flatten_tree(&dtb_buf, &dtb_length, command_line);
		}

		if (base + atag_offset + dtb_length > base + offset) {
			fprintf(stderr, "DTB too large!\n");
			return -1;
		}

		if (ramdisk) {
			add_segment(info, ramdisk_buf, initrd_size,
			            initrd_base, initrd_size);

			unsigned long start, end;
			start = cpu_to_be32((unsigned long)(initrd_base));
			end = cpu_to_be32((unsigned long)(initrd_base + initrd_size));

			if (setup_dtb_prop(&dtb_buf, &dtb_length, "/chosen",
					"linux,initrd-start", &start,
					sizeof(start)))
				return -1;
			if (setup_dtb_prop(&dtb_buf, &dtb_length, "/chosen",
					"linux,initrd-end", &end,
					sizeof(end)))
				return -1;
		}

		/* Stick the dtb at the end of the initrd and page
		 * align it.
		 */
		dtb_offset = initrd_base + initrd_size + getpagesize();
		dtb_offset = _ALIGN_DOWN(dtb_offset, getpagesize());

		add_segment(info, dtb_buf, dtb_length,
		            dtb_offset, dtb_length);
	}

	printf("Kernel segment info: %lx (%d)\n", base+offset, len);
	add_segment(info, buf, len, base + offset, len);

	info->entry = (void*)base + offset;

	return 0;
}
int zImage_arm_load(int argc, char **argv, const char *buf, off_t len,
	struct kexec_info *info)
{
	unsigned long base;
	unsigned int atag_offset = 0x1000; /* 4k offset from memory start */
	unsigned int offset = 0x8000;      /* 32k offset from memory start */
	unsigned int opt_ramdisk_addr;
	unsigned int opt_atags_addr;
	const char *command_line;
	char *modified_cmdline = NULL;
	off_t command_line_len;
	const char *ramdisk;
	char *ramdisk_buf;
	int opt;
	char *endptr;
	int use_dtb;
	const char *dtb_file;
	char *dtb_buf;
	off_t dtb_length;
	off_t dtb_offset;
	struct arm_mach *mach;

	/* See options.h -- add any more there, too. */
	static const struct option options[] = {
		KEXEC_ARCH_OPTIONS
		{ "command-line",	1, 0, OPT_APPEND },
		{ "append",		1, 0, OPT_APPEND },
		{ "initrd",		1, 0, OPT_RAMDISK },
		{ "ramdisk",		1, 0, OPT_RAMDISK },
		{ "dtb",		2, 0, OPT_DTB },
		{ "rd-addr",		1, 0, OPT_RD_ADDR },
		{ "atags-addr",		1, 0, OPT_ATAGS_ADDR },
		{ "boardname",  1, 0, OPT_BOARDNAME },
		{ 0, 			0, 0, 0 },
	};
	static const char short_options[] = KEXEC_ARCH_OPT_STR "a:r:d::i:g:b:";

	/*
	 * Parse the command line arguments
	 */
	command_line = 0;
	command_line_len = 0;
	ramdisk = 0;
	ramdisk_buf = 0;
	use_dtb = 0;
	dtb_file = NULL;
	opt_ramdisk_addr = 0;
	opt_atags_addr = 0;
	mach = NULL;
	while((opt = getopt_long(argc, argv, short_options, options, 0)) != -1) {
		switch(opt) {
		default:
			/* Ignore core options */
			if (opt < OPT_ARCH_MAX) {
				break;
			}
		case '?':
			usage();
			return -1;
		case OPT_APPEND:
			command_line = optarg;
			break;
		case OPT_RAMDISK:
			ramdisk = optarg;
			break;
		case OPT_DTB:
			use_dtb = 1;
			if(optarg)
				dtb_file = optarg;
			break;
		case OPT_RD_ADDR:
			opt_ramdisk_addr = strtoul(optarg, &endptr, 0);
			if (*endptr) {
				fprintf(stderr,
					"Bad option value in --rd-addr=%s\n",
					optarg);
				usage();
				return -1;
			}
			break;
		case OPT_ATAGS_ADDR:
			opt_atags_addr = strtoul(optarg, &endptr, 0);
			if (*endptr) {
				fprintf(stderr,
					"Bad option value in --atag-addr=%s\n",
					optarg);
				usage();
				return -1;
			}
			break;
		case OPT_BOARDNAME:
			mach = arm_mach_choose(optarg);
			if(!mach)
			{
				fprintf(stderr, "Unknown boardname '%s'!\n", optarg);
				return -1;
			}
			break;
		}
	}
	if (command_line) {
		command_line_len = strlen(command_line) + 1;
		if (command_line_len > COMMAND_LINE_SIZE)
			command_line_len = COMMAND_LINE_SIZE;
	}
	if (ramdisk) {
		ramdisk_buf = slurp_file(ramdisk, &initrd_size);
	}

	/*
	 * If we are loading a dump capture kernel, we need to update kernel
	 * command line and also add some additional segments.
	 */
	if (info->kexec_flags & KEXEC_ON_CRASH) {
		uint64_t start, end;

		modified_cmdline = xmalloc(COMMAND_LINE_SIZE);
		if (!modified_cmdline)
			return -1;

		if (command_line) {
			(void) strncpy(modified_cmdline, command_line,
				       COMMAND_LINE_SIZE);
			modified_cmdline[COMMAND_LINE_SIZE - 1] = '\0';
		}

		if (load_crashdump_segments(info, modified_cmdline) < 0) {
			free(modified_cmdline);
			return -1;
		}

		command_line = modified_cmdline;
		command_line_len = strlen(command_line) + 1;

		/*
		 * We put the dump capture kernel at the start of crashkernel
		 * reserved memory.
		 */
		if (parse_iomem_single("Crash kernel\n", &start, &end)) {
			/*
			 * No crash kernel memory reserved. We cannot do more
			 * but just bail out.
			 */
			return -1;
		}
		base = start;
	} else {
		base = locate_hole(info,len+offset,0,0,ULONG_MAX,INT_MAX);
	}

	if (base == ULONG_MAX)
		return -1;

	/* assume the maximum kernel compression ratio is 4,
	 * and just to be safe, place ramdisk after that
	 */
	if(opt_ramdisk_addr == 0)
		initrd_base = _ALIGN(base + len * 4, getpagesize());
	else
		initrd_base = opt_ramdisk_addr;

	if(!use_dtb)
	{
		if (atag_arm_load(info, base + atag_offset,
				command_line, command_line_len,
				ramdisk_buf, initrd_size, initrd_base) == -1)
			return -1;
	}
	else
	{
		char *dtb_img = NULL;
		off_t dtb_img_len = 0;
		int free_dtb_img = 0;
		int choose_res = 0;

		if(!mach)
		{
			fprintf(stderr, "DTB: --boardname was not specified.\n");
			return -1;
		}

		if(dtb_file)
		{
			if(!load_dtb_image(dtb_file, &dtb_img, &dtb_img_len))
				return -1;

			printf("DTB: Using DTB from file %s\n", dtb_file);
			free_dtb_img = 1;
		}
		else
		{
			if(!get_appended_dtb(buf, len, &dtb_img, &dtb_img_len))
				return -1;

			printf("DTB: Using DTB appended to zImage\n");
		}

		choose_res = (mach->choose_dtb)(dtb_img, dtb_img_len, &dtb_buf, &dtb_length);

		if(free_dtb_img)
			free(dtb_img);

		if(choose_res)
		{
			int ret, off;

			dtb_length = fdt_totalsize(dtb_buf) + DTB_PAD_SIZE;
			dtb_buf = xrealloc(dtb_buf, dtb_length);
			ret = fdt_open_into(dtb_buf, dtb_buf, dtb_length);
			if(ret)
				die("DTB: fdt_open_into failed");

			ret = (mach->add_extra_regs)(dtb_buf);
			if (ret < 0)
			{
				fprintf(stderr, "DTB: error while adding mach-specific extra regs\n");
				return -1;
			}

			if (command_line) {
				const char *node_name = "/chosen";
				const char *prop_name = "bootargs";

				/* check if a /choosen subnode already exists */
				off = fdt_path_offset(dtb_buf, node_name);

				if (off == -FDT_ERR_NOTFOUND)
					off = fdt_add_subnode(dtb_buf, off, node_name);

				if (off < 0) {
					fprintf(stderr, "DTB: Error adding %s node.\n", node_name);
					return -1;
				}

				if (fdt_setprop(dtb_buf, off, prop_name,
						command_line, strlen(command_line) + 1) != 0) {
					fprintf(stderr, "DTB: Error setting %s/%s property.\n",
						node_name, prop_name);
					return -1;
				}
			}

			if(ramdisk)
			{
				const char *node_name = "/chosen";
				uint32_t initrd_start, initrd_end;

				/* check if a /choosen subnode already exists */
				off = fdt_path_offset(dtb_buf, node_name);

				if (off == -FDT_ERR_NOTFOUND)
					off = fdt_add_subnode(dtb_buf, off, node_name);

				if (off < 0) {
					fprintf(stderr, "DTB: Error adding %s node.\n", node_name);
					return -1;
				}

				initrd_start = cpu_to_fdt32(initrd_base);
				initrd_end = cpu_to_fdt32(initrd_base + initrd_size);

				ret = fdt_setprop(dtb_buf, off, "linux,initrd-start", &initrd_start, sizeof(initrd_start));
				if (ret)
					die("DTB: Error setting %s/linux,initrd-start property.\n", node_name);

				ret = fdt_setprop(dtb_buf, off, "linux,initrd-end", &initrd_end, sizeof(initrd_end));
				if (ret)
					die("DTB: Error setting %s/linux,initrd-end property.\n", node_name);
			}

			fdt_pack(dtb_buf);
		}
		else
		{
			/*
			* Extract the DTB from /proc/device-tree.
			*/
			printf("DTB: Failed to load dtb from zImage or dtb.img, using /proc/device-tree. This is unlikely to work.\n");
			create_flatten_tree(&dtb_buf, &dtb_length, command_line);
		}

		if(ramdisk)
		{
			add_segment(info, ramdisk_buf, initrd_size, initrd_base,
				initrd_size);
		}

		if(opt_atags_addr != 0)
			dtb_offset = opt_atags_addr;
		else
		{
			dtb_offset = initrd_base + initrd_size + getpagesize();
			dtb_offset = _ALIGN_DOWN(dtb_offset, getpagesize());
		}

		printf("DTB: add dtb segment 0x%x\n", (unsigned int)dtb_offset);
		add_segment(info, dtb_buf, dtb_length,
		            dtb_offset, dtb_length);
	}

	add_segment(info, buf, len, base + offset, len);

	info->entry = (void*)base + offset;

	return 0;
}
Example #27
0
#include <STMCommon/stmhdmiregs.h>

/*
 * Note: STi7106 and STI7106 are practically identical from a display point of
 * view, including the system infrastructure (IRQ,PIO,SysCfg) wrapped around it.
 * However it does use a new HDMI cell and that needs a different implementation
 * of InfroFrame management so just from a "build" perspective it is easier to
 * give STi7106 its own platform configuration file to keep the two chips
 * separate.
 */
static const unsigned long whitelist[] = {
    STi7111_REGISTER_BASE + STi7111_DENC_BASE,
    STi7111_REGISTER_BASE + STi7111_DENC_BASE+PAGE_SIZE,
    STi7111_REGISTER_BASE + STi7111_DENC_BASE+(PAGE_SIZE*2),
    STi7111_REGISTER_BASE + STi7111_HDMI_BASE,
    _ALIGN_DOWN(STi7111_REGISTER_BASE + STi7111_BLITTER_BASE, PAGE_SIZE),
};



static struct stmcore_display_pipeline_data platform_data[] = {
  {
    .owner                    = THIS_MODULE,
    .name                     = "STi7106-main",
    .device                   = 0,
    .vtg_irq                  = evt2irq(0x1540),
    .blitter_irq              = evt2irq(0x1220),
    .hdmi_irq                 = evt2irq(0x15C0),
#if defined(CONFIG_SH_ST_MB840)
    .hdmi_i2c_adapter_id      = 3,
#else
Example #28
0
void TestPageTableDiscard(RPageMove& pagemove, TUint8* array, TUint size)
	{
	_T_PRINTF(_L("Fill the array with some data\n"));
	for (TUint i=0; i<size; i++) array[i] = i*i;

	TUint8* firstpage = (TUint8*)_ALIGN_DOWN((TLinAddr)array, PageSize);
	RThread thread;
	thread.Open(RThread().Id());
	SPinThreadArgs threadArgs;
	threadArgs.iLinAddr = (TLinAddr)array;
	threadArgs.iParentThread = thread;
	threadArgs.iRealtimeState = User::ERealtimeStateOff;

	TMovingPinStage endStage = EMovingPinStages;
	if (!gPinningSupported)
		endStage = EVirtualPinning;
	
	for (TUint pageTableInfo = 0; pageTableInfo < 2; pageTableInfo++)
		{
		for (TUint state = ENoPinning; state < (TUint)endStage; state++)
			{
			TThreadFunction threadFunc = NULL;
			if (!pageTableInfo)
			{
			switch (state)
				{
				case ENoPinning:
					test.Printf(_L("Attempt to move page tables whilst the pages they map are being modified\n"));
					threadFunc = &ReadWriteByte;
					break;
				case EVirtualPinning:
					test.Printf(_L("Attempt to move page tables whilst the pages they map are being virtually pinned\n"));
					threadFunc = &VirtualPinPage;
					break;
				case EPhysicalPinning:
					test.Printf(_L("Attempt to move page tables whilst the pages they map are being physically pinned\n"));
					threadFunc = &PhysicalPinPage;
					break;
				}
			}
			else
			{
			switch (state)
				{
				case ENoPinning:
					test.Printf(_L("Attempt to move page table infos whilst pages they refer to are being modified\n"));
					threadFunc = &ReadWriteByte;
					break;
				case EVirtualPinning:
					test.Printf(_L("Attempt to move page table infos whilst pages they refer to are being virtually pinned\n"));
					threadFunc = &VirtualPinPage;
					break;
				case EPhysicalPinning:
					test.Printf(_L("Attempt to move page table infos whilst pages they refer to are being physically pinned\n"));
					threadFunc = &PhysicalPinPage;
					break;
				}
			}
			ThreadDie = EFalse;
			TUint numThreads = (NumberOfCpus > 1) ? NumberOfCpus - 1 : 1;
			RThread* threads = new RThread[numThreads];
			TRequestStatus* s = new TRequestStatus[numThreads];
			StartThreads(numThreads, threads, s, threadFunc, threadArgs);

			_T_PRINTF(_L("Move first array page repeatedly\n"));
			TUint inuse = 0;
			for (TInt i=0; i < Repitions; i++)
				{
				TInt r;
				if (!pageTableInfo)
					r = pagemove.TryMovingPageTable(firstpage);
				else
					r = pagemove.TryMovingPageTableInfo(firstpage);					
				if (i == 0)
					{// If this is the first run allow the pinning threads to 
					// unpin the memory now that we've definitely done at least 
					// one page move with the page pinned.
					_T_PRINTF(_L("signal to child\n"));
					RThread::Rendezvous(KErrNone);
					}
				switch (r)
					{
					case KErrInUse:
						inuse++;
						break;
					case KErrNotFound:
						// The page table or page table info page was paged out.
						break;
					default:
						test_KErrNone(r);
						break;
					}
				}
			test.Printf(_L("inuse %d\n"),inuse);
			// A virtually pinned page should always return KErrInUse at least once.
			test(state != EVirtualPinning || inuse);

			ThreadDie = ETrue;
			EndThreads(numThreads, threads, s);

			_T_PRINTF(_L("Validate page data\n"));
			for (TUint i=0; i<size; i++)
				test_Equal((TUint8)(i*i), array[i]);
			}
		}
	thread.Close();
	}