Пример #1
0
Файл: mmu.c Проект: 1314cc/linux
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
				  phys_addr_t phys, pgprot_t prot,
				  phys_addr_t (*pgtable_alloc)(void))
{
	pmd_t *pmd;
	unsigned long next;

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
	 */
	if (pud_none(*pud) || pud_sect(*pud)) {
		phys_addr_t pmd_phys;
		BUG_ON(!pgtable_alloc);
		pmd_phys = pgtable_alloc();
		pmd = pmd_set_fixmap(pmd_phys);
		if (pud_sect(*pud)) {
			/*
			 * need to have the 1G of mappings continue to be
			 * present
			 */
			split_pud(pud, pmd);
		}
		__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
		flush_tlb_all();
		pmd_clear_fixmap();
	}
	BUG_ON(pud_bad(*pud));

	pmd = pmd_set_fixmap_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		/* try section mapping first */
		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
		      block_mappings_allowed(pgtable_alloc)) {
			pmd_t old_pmd =*pmd;
			pmd_set_huge(pmd, phys, prot);
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
			 */
			if (!pmd_none(old_pmd)) {
				flush_tlb_all();
				if (pmd_table(old_pmd)) {
					phys_addr_t table = pmd_page_paddr(old_pmd);
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
				}
			}
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot, pgtable_alloc);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);

	pmd_clear_fixmap();
}
Пример #2
0
static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
				  unsigned long addr, unsigned long end,
				  phys_addr_t phys, pgprot_t prot,
				  void *(*alloc)(unsigned long size))
{
	pmd_t *pmd;
	unsigned long next;

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
	 */
	if (pud_none(*pud) || pud_sect(*pud)) {
		pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
		if (pud_sect(*pud)) {
			/*
			 * need to have the 1G of mappings continue to be
			 * present
			 */
			split_pud(pud, pmd);
		}
		pud_populate(mm, pud, pmd);
		flush_tlb_all();
	}
	BUG_ON(pud_bad(*pud));

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		/* try section mapping first */
		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
			pmd_t old_pmd =*pmd;
			set_pmd(pmd, __pmd(phys |
					   pgprot_val(mk_sect_prot(prot))));
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
			 */
			if (!pmd_none(old_pmd)) {
				flush_tlb_all();
				if (pmd_table(old_pmd)) {
					phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
				}
			}
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot, alloc);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);
}
Пример #3
0
/*
 * Flush the mmu and reset associated register to default values.
 */
void __init init_mmu(void)
{
#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
	/*
	 * Writing zeros to the instruction and data TLBCFG special
	 * registers ensure that valid values exist in the register.
	 *
	 * For existing PGSZID<w> fields, zero selects the first element
	 * of the page-size array.  For nonexistent PGSZID<w> fields,
	 * zero is the best value to write.  Also, when changing PGSZID<w>
	 * fields, the corresponding TLB must be flushed.
	 */
	set_itlbcfg_register(0);
	set_dtlbcfg_register(0);
#endif
	flush_tlb_all();

	/* Set rasid register to a known value. */

	set_rasid_register(ASID_INSERT(ASID_USER_FIRST));

	/* Set PTEVADDR special register to the start of the page
	 * table, which is in kernel mappable space (ie. not
	 * statically mapped).  This register's value is undefined on
	 * reset.
	 */
	set_ptevaddr_register(PGTABLE_START);
}
Пример #4
0
void free_initmem(void)
{
	const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET;

	/*
	 * Evict the cache on all cores to avoid incoherence.
	 * We are guaranteed that no one will touch the init pages any more.
	 */
	homecache_evict(&cpu_cacheable_map);

	/* Free the data pages that we won't use again after init. */
	free_init_pages("unused kernel data",
			(unsigned long)__init_begin,
			(unsigned long)__init_end);

	/*
	 * Free the pages mapped from 0xc0000000 that correspond to code
	 * pages from MEM_SV_START that we won't use again after init.
	 */
	free_init_pages("unused kernel text",
			(unsigned long)_sinittext - text_delta,
			(unsigned long)_einittext - text_delta);
	/* Do a global TLB flush so everyone sees the changes. */
	flush_tlb_all();
}
Пример #5
0
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
				 unsigned long size, unsigned long flags)
{
	pgd_t * dir;
	unsigned long end = address + size;

	phys_addr -= address;
	dir = pgd_offset(&init_mm, address);
	flush_cache_all();
	if (address >= end)
		BUG();
	do {
		pmd_t *pmd = pmd_alloc_kernel(dir, address);
		if (!pmd)
			return -ENOMEM;
		if (remap_area_pmd(pmd, address, end - address,
					 phys_addr + address, flags))
			return -ENOMEM;
		set_pgdir(address, *dir);
		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	} while (address && (address < end));
	flush_tlb_all();
	return 0;
}
Пример #6
0
static void sh7372_enter_core_standby(void)
{
	void __iomem *smfram = (void __iomem *)SMFRAM;

	__raw_writel(0, APARMBAREA); /* translate 4k */
	__raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */
	__raw_writel(0x10, SYSTBCR); /* enable core standby */

	__raw_writel(0, smfram + 0x3c); /* clear page table address */

	sh7372_cpu_suspend();
	cpu_init();

	/* if page table address is non-NULL then we have been powered down */
	if (__raw_readl(smfram + 0x3c)) {
		__raw_writel(__raw_readl(smfram + 0x40),
			     __va(__raw_readl(smfram + 0x3c)));

		flush_tlb_all();
		set_cr(__raw_readl(smfram + 0x38));
	}

	__raw_writel(0, SYSTBCR); /* disable core standby */
	__raw_writel(0, SBAR); /* disable reset vector translation */
}
Пример #7
0
// init_mm.page_table_lock must be held before calling!
void pram_writeable(void * vaddr, unsigned long size, int rw)
{
        unsigned long addr = (unsigned long)vaddr & PAGE_MASK;
	unsigned long end = (unsigned long)vaddr + size;
	unsigned long start = addr;

	do {
		pram_page_writeable(addr, rw);
		addr += PAGE_SIZE;
	} while (addr && (addr < end));


	/*
	 * FIXME: can't use flush_tlb_page/range() until these
	 * routines support flushing memory regions owned by
	 * init_mm (so far only PPC versions).
	 */
#if 0
	if (end <= start + PAGE_SIZE)
		flush_tlb_page(find_vma(&init_mm,start), start);
	else
		flush_tlb_range(&init_mm, start, end);
#else
	flush_tlb_all();
#endif
}
Пример #8
0
void __init davinci_map_common_io(void)
{
	iotable_init(davinci_io_desc, ARRAY_SIZE(davinci_io_desc));

	/* We want to check CPU revision early for cpu_is_davinci_xxxx() macros.
	 * IO space mapping must be initialized before we can do that.
	 */
	davinci_check_revision();

	if (cpu_is_davinci_dm644x()) {
		iotable_init(dm644x_io_desc, ARRAY_SIZE(dm644x_io_desc));
	} else if (cpu_is_davinci_dm6467()) {
		davinci_cpu_index = DM6467_CPU_IDX;
		iotable_init(dm646x_io_desc, ARRAY_SIZE(dm646x_io_desc));
	} else if (cpu_is_davinci_dm355()) {
		davinci_cpu_index = DM355_CPU_IDX;
	}

	/* Normally devicemaps_init() would flush caches and tlb after
	 * mdesc->map_io(), but we must also do it here because of the CPU
	 * revision check below.
	 */
	flush_tlb_all();
	flush_cache_all();


	davinci_mux_init();
	davinci_clk_init();
}
Пример #9
0
__initfunc(void ld_mmu_tfp(void))
{
	flush_cache_all = tfp_flush_cache_all;
	flush_cache_mm = tfp_flush_cache_mm;
	flush_cache_range = tfp_flush_cache_range;
	flush_cache_page = tfp_flush_cache_page;
	flush_cache_sigtramp = tfp_flush_cache_sigtramp;
	flush_page_to_ram = tfp_flush_page_to_ram;

	flush_tlb_all = tfp_flush_tlb_all;
	flush_tlb_mm = tfp_flush_tlb_mm;
	flush_tlb_range = tfp_flush_tlb_range;
	flush_tlb_page = tfp_flush_tlb_page;
	tfp_asid_setup();

        add_wired_entry = tfp_add_wired_entry;

	user_mode = tfp_user_mode;

	load_pgd = tfp_load_pgd;
	pgd_init = tfp_pgd_init;
    
	flush_cache_all();
	flush_tlb_all();
}
Пример #10
0
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
				 unsigned long size, unsigned long flags)
{
	int error;
	pgd_t * dir;
	unsigned long end = address + size;

	phys_addr -= address;
	dir = pgd_offset(&init_mm, address);
	flush_cache_all();
	if (address >= end)
		BUG();
	spin_lock(&init_mm.page_table_lock);
	do {
		pmd_t *pmd;
		pmd = pmd_alloc(&init_mm, dir, address);
		error = -ENOMEM;
		if (!pmd)
			break;
		if (remap_area_pmd(pmd, address, end - address,
				   phys_addr + address, flags))
			break;
		error = 0;
		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	} while (address && (address < end));
	spin_unlock(&init_mm.page_table_lock);
	flush_tlb_all();
	return error;
}
Пример #11
0
asmlinkage void __cpuinit secondary_start_kernel(void)
{
	struct mm_struct *mm = &init_mm;
	unsigned int cpu = smp_processor_id();

	printk("CPU%u: Booted secondary processor\n", cpu);

	atomic_inc(&mm->mm_count);
	current->active_mm = mm;
	cpumask_set_cpu(cpu, mm_cpumask(mm));

	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));

	cpu_set_reserved_ttbr0();
	flush_tlb_all();

	preempt_disable();
	trace_hardirqs_off();

	if (cpu_ops[cpu]->cpu_postboot)
		cpu_ops[cpu]->cpu_postboot();

	set_cpu_online(cpu, true);
	complete(&cpu_running);

	smp_store_cpu_info(cpu);

	notify_cpu_starting(cpu);

	local_dbg_enable();
	local_irq_enable();
	local_fiq_enable();

	cpu_startup_entry(CPUHP_ONLINE);
}
Пример #12
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	unsigned long idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
		pagefault_enable();
		return;
	}

	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));

/* XXX Fix - Anton */
#if 0
	__flush_cache_one(vaddr);
#else
	flush_cache_all();
#endif

	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(&init_mm, vaddr, kmap_pte-idx);
/* XXX Fix - Anton */
#if 0
	__flush_tlb_one(vaddr);
#else
	flush_tlb_all();
#endif
#endif

	pagefault_enable();
}
Пример #13
0
int ftrace_arch_code_modify_post_process(void)
{
	set_all_modules_text_ro();
	/* Make sure any TLB misses during machine stop are cleared. */
	flush_tlb_all();
	return 0;
}
Пример #14
0
void
flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long size = end - start;
	unsigned long nbits;

	if (mm != current->active_mm) {
		/* this does happen, but perhaps it's not worth optimizing for? */
#ifdef CONFIG_SMP
		flush_tlb_all();
#else
		mm->context = 0;
#endif
		return;
	}

	nbits = ia64_fls(size + 0xfff);
	while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
		++nbits;
	if (nbits > purge.max_bits)
		nbits = purge.max_bits;
	start &= ~((1UL << nbits) - 1);

# ifdef CONFIG_SMP
	platform_global_tlb_purge(start, end, nbits);
# else
	do {
		ia64_ptcl(start, (nbits<<2));
		start += (1UL << nbits);
	} while (start < end);
# endif

	ia64_srlz_i();			/* srlz.i implies srlz.d */
}
Пример #15
0
void *kmap_atomic(struct page *page, enum km_type type)
{
	unsigned long idx;
	unsigned long vaddr;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

	debug_kmap_atomic(type);
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);

/* XXX Fix - Anton */
#if 0
	__flush_cache_one(vaddr);
#else
	flush_cache_all();
#endif

#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
/* XXX Fix - Anton */
#if 0
	__flush_tlb_one(vaddr);
#else
	flush_tlb_all();
#endif

	return (void*) vaddr;
}
Пример #16
0
__initfunc(void ld_mmu_andes(void))
{
	flush_cache_all = andes_flush_cache_all;
	flush_cache_mm = andes_flush_cache_mm;
	flush_cache_range = andes_flush_cache_range;
	flush_cache_page = andes_flush_cache_page;
	flush_cache_sigtramp = andes_flush_cache_sigtramp;
	flush_page_to_ram = andes_flush_page_to_ram;

	flush_tlb_all = andes_flush_tlb_all;
	flush_tlb_mm = andes_flush_tlb_mm;
	flush_tlb_range = andes_flush_tlb_range;
	flush_tlb_page = andes_flush_tlb_page;
	andes_asid_setup();
    
        add_wired_entry = andes_add_wired_entry;

	user_mode = andes_user_mode;

	load_pgd = andes_load_pgd;
	pgd_init = andes_pgd_init;

	flush_cache_all();
	flush_tlb_all();
}
Пример #17
0
static void init_low_mapping(void)
{
	pgd_t *slot0 = pgd_offset(current->mm, 0UL);
	low_ptr = *slot0;
	set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
	flush_tlb_all();
}
Пример #18
0
static int remap_area_pages(unsigned long address, phys_t phys_addr,
	phys_t size, unsigned long flags)
{
	int error;
	pgd_t * dir;
	unsigned long end = address + size;

	phys_addr -= address;
	dir = pgd_offset(&init_mm, address);
	flush_cache_all();
	BUG_ON(address >= end);
	do {
		pud_t *pud;
		pmd_t *pmd;

		error = -ENOMEM;
		pud = pud_alloc(&init_mm, dir, address);
		if (!pud)
			break;
		pmd = pmd_alloc(&init_mm, pud, address);
		if (!pmd)
			break;
		if (remap_area_pmd(pmd, address, end - address,
					 phys_addr + address, flags))
			break;
		error = 0;
		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	} while (address && (address < end));
	flush_tlb_all();
	return error;
}
Пример #19
0
Файл: mmu.c Проект: 1314cc/linux
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
				  unsigned long end, unsigned long pfn,
				  pgprot_t prot,
				  phys_addr_t (*pgtable_alloc)(void))
{
	pte_t *pte;

	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
		phys_addr_t pte_phys;
		BUG_ON(!pgtable_alloc);
		pte_phys = pgtable_alloc();
		pte = pte_set_fixmap(pte_phys);
		if (pmd_sect(*pmd))
			split_pmd(pmd, pte);
		__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
		flush_tlb_all();
		pte_clear_fixmap();
	}
	BUG_ON(pmd_bad(*pmd));

	pte = pte_set_fixmap_offset(pmd, addr);
	do {
		set_pte(pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);

	pte_clear_fixmap();
}
Пример #20
0
void *kmap_atomic(struct page *page)
{
	unsigned long vaddr;
	long idx, type;

	preempt_disable();
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

	type = kmap_atomic_idx_push();
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);

/* XXX Fix - Anton */
#if 0
	__flush_cache_one(vaddr);
#else
	flush_cache_all();
#endif

#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
/* XXX Fix - Anton */
#if 0
	__flush_tlb_one(vaddr);
#else
	flush_tlb_all();
#endif

	return (void*) vaddr;
}
Пример #21
0
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
				 unsigned long size, unsigned long flags)
{
	int error;
	pgd_t *pgd;
	unsigned long end = address + size;

	phys_addr -= address;
	pgd = pgd_offset_k(address);
	flush_cache_all();
	if (address >= end)
		BUG();
	do {
		pud_t *pud;
		pud = pud_alloc(&init_mm, pgd, address);
		error = -ENOMEM;
		if (!pud)
			break;
		if (remap_area_pud(pud, address, end - address,
					 phys_addr + address, flags))
			break;
		error = 0;
		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		pgd++;
	} while (address && (address < end));
	flush_tlb_all();
	return error;
}
Пример #22
0
/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
asmlinkage void __cpuinit secondary_start_kernel(void)
{
	struct mm_struct *mm = &init_mm;
	unsigned int cpu = smp_processor_id();

	/*
	 * All kernel threads share the same mm context; grab a
	 * reference and switch to it.
	 */
	atomic_inc(&mm->mm_count);
	current->active_mm = mm;
	cpumask_set_cpu(cpu, mm_cpumask(mm));

	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
	printk("CPU%u: Booted secondary processor\n", cpu);

	/*
	 * TTBR0 is only used for the identity mapping at this stage. Make it
	 * point to zero page to avoid speculatively fetching new entries.
	 */
	cpu_set_reserved_ttbr0();
	flush_tlb_all();

	preempt_disable();
	trace_hardirqs_off();

	if (cpu_ops[cpu]->cpu_postboot)
		cpu_ops[cpu]->cpu_postboot();

	/*
	 * Enable GIC and timers.
	 */

	smp_store_cpu_info(cpu);

	notify_cpu_starting(cpu);

	/*
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
	 * before we continue.
	 */
	set_cpu_online(cpu, true);
	complete(&cpu_running);

	local_dbg_enable();
	/*
	 * Setup the percpu timer for this CPU.
	 */
	percpu_timer_setup();

	local_irq_enable();
	local_async_enable();

	/*
	 * OK, it's off to the idle thread for us
	 */
	cpu_startup_entry(CPUHP_ONLINE);
}
Пример #23
0
void flush_tlb_mm(struct mm_struct *mm)
{

	/* Was seeing bugs with the mm struct passed to us. Scrapped most of
	   this function. */
	/* Several architctures do this */
	flush_tlb_all();
}
Пример #24
0
/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
asmlinkage void __cpuinit secondary_start_kernel(void)
{
	struct mm_struct *mm = &init_mm;
	unsigned int cpu = smp_processor_id();

	printk("CPU%u: Booted secondary processor\n", cpu);

	/*
	 * All kernel threads share the same mm context; grab a
	 * reference and switch to it.
	 */
	atomic_inc(&mm->mm_count);
	current->active_mm = mm;
	cpumask_set_cpu(cpu, mm_cpumask(mm));

	/*
	 * TTBR0 is only used for the identity mapping at this stage. Make it
	 * point to zero page to avoid speculatively fetching new entries.
	 */
	cpu_set_reserved_ttbr0();
	flush_tlb_all();

	preempt_disable();
	trace_hardirqs_off();

	/*
	 * Let the primary processor know we're out of the
	 * pen, then head off into the C entry point
	 */
	write_pen_release(INVALID_HWID);

	/*
	 * Synchronise with the boot thread.
	 */
	raw_spin_lock(&boot_lock);
	raw_spin_unlock(&boot_lock);

	/*
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
	 * before we continue.
	 */
	set_cpu_online(cpu, true);
	complete(&cpu_running);

	/*
	 * Enable GIC and timers.
	 */
	notify_cpu_starting(cpu);

	local_irq_enable();
	local_fiq_enable();

	/*
	 * OK, it's off to the idle thread for us
	 */
	cpu_startup_entry(CPUHP_ONLINE);
}
Пример #25
0
/**
 * cpu_suspend
 *
 * @arg: argument to pass to the finisher function
 */
int cpu_suspend(unsigned long arg)
{
	struct mm_struct *mm = current->active_mm;
	int ret, cpu = smp_processor_id();
	unsigned long flags;

	/*
	 * If cpu_ops have not been registered or suspend
	 * has not been initialized, cpu_suspend call fails early.
	 */
	if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
		return -EOPNOTSUPP;

	/*
	 * From this point debug exceptions are disabled to prevent
	 * updates to mdscr register (saved and restored along with
	 * general purpose registers) from kernel debuggers.
	 */
	local_dbg_save(flags);

	/*
	 * mm context saved on the stack, it will be restored when
	 * the cpu comes out of reset through the identity mapped
	 * page tables, so that the thread address space is properly
	 * set-up on function return.
	 */
	ret = __cpu_suspend(arg);
        pclog();

	if (ret == 0) {
		cpu_switch_mm(mm->pgd, mm);
		flush_tlb_all();

		/*
		 * Restore per-cpu offset before any kernel
		 * subsystem relying on it has a chance to run.
		 */
		set_my_cpu_offset(per_cpu_offset(cpu));

		/*
		 * Restore HW breakpoint registers to sane values
		 * before debug exceptions are possibly reenabled
		 * through local_dbg_restore.
		 */
		if (hw_breakpoint_restore)
			hw_breakpoint_restore(NULL);
	}

	/*
	 * Restore pstate flags. OS lock and mdscr have been already
	 * restored, so from this point onwards, debugging is fully
	 * renabled if it was enabled when core started shutdown.
	 */
	local_dbg_restore(flags);

	return ret;
}
Пример #26
0
static void sync_cache(void)
{
	unsigned long oldIrq;

	local_irq_save(oldIrq);
	flush_cache_all();
	flush_tlb_all();
	local_irq_restore(oldIrq);
}
Пример #27
0
void switch_to_tt(void *prev, void *next)
{
	struct task_struct *from, *to, *prev_sched;
	unsigned long flags;
	int err, vtalrm, alrm, prof, cpu;
	char c;

	from = prev;
	to = next;

	cpu = from->thread_info->cpu;
	if(cpu == 0)
		forward_interrupts(to->thread.mode.tt.extern_pid);
#ifdef CONFIG_SMP
	forward_ipi(cpu_data[cpu].ipi_pipe[0], to->thread.mode.tt.extern_pid);
#endif
	local_irq_save(flags);

	vtalrm = change_sig(SIGVTALRM, 0);
	alrm = change_sig(SIGALRM, 0);
	prof = change_sig(SIGPROF, 0);

	forward_pending_sigio(to->thread.mode.tt.extern_pid);

	c = 0;

	err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
	if(err != sizeof(c))
		panic("write of switch_pipe failed, err = %d", -err);

	if(from->thread.mode.tt.switch_pipe[0] == -1)
		os_kill_process(os_getpid(), 0);

	err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c));
	if(err != sizeof(c))
		panic("read of switch_pipe failed, errno = %d", -err);

	/* If the process that we have just scheduled away from has exited,
	 * then it needs to be killed here.  The reason is that, even though
	 * it will kill itself when it next runs, that may be too late.  Its
	 * stack will be freed, possibly before then, and if that happens,
	 * we have a use-after-free situation.  So, it gets killed here
	 * in case it has not already killed itself.
	 */
	prev_sched = current->thread.prev_sched;
        if(prev_sched->thread.mode.tt.switch_pipe[0] == -1)
		os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1);

	change_sig(SIGVTALRM, vtalrm);
	change_sig(SIGALRM, alrm);
	change_sig(SIGPROF, prof);

	arch_switch();

	flush_tlb_all();
	local_irq_restore(flags);
}
Пример #28
0
static void __init sbus_iommu_init(struct of_device *op)
{
    struct iommu_struct *iommu;
    unsigned int impl, vers;
    unsigned long *bitmap;
    unsigned long tmp;

    iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
    if (!iommu) {
        prom_printf("Unable to allocate iommu structure\n");
        prom_halt();
    }

    iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
                             "iommu_regs");
    if (!iommu->regs) {
        prom_printf("Cannot map IOMMU registers\n");
        prom_halt();
    }
    impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
    vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
    tmp = iommu->regs->control;
    tmp &= ~(IOMMU_CTRL_RNGE);
    tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
    iommu->regs->control = tmp;
    iommu_invalidate(iommu->regs);
    iommu->start = IOMMU_START;
    iommu->end = 0xffffffff;

    /* Allocate IOMMU page table */
    /* Stupid alignment constraints give me a headache.
       We need 256K or 512K or 1M or 2M area aligned to
           its size and current gfp will fortunately give
           it to us. */
    tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
    if (!tmp) {
        prom_printf("Unable to allocate iommu table [0x%08x]\n",
                    IOMMU_NPTES*sizeof(iopte_t));
        prom_halt();
    }
    iommu->page_table = (iopte_t *)tmp;

    /* Initialize new table. */
    memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
    flush_cache_all();
    flush_tlb_all();
    iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
    iommu_invalidate(iommu->regs);

    bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
    if (!bitmap) {
        prom_printf("Unable to allocate iommu bitmap [%d]\n",
                    (int)(IOMMU_NPTES>>3));
        prom_halt();
    }
Пример #29
0
static void init_low_mapping(pgd_t * pgd, int pgd_limit)
{
	int pgd_ofs = 0;

	while ((pgd_ofs < pgd_limit)
	       && (pgd_ofs + USER_PTRS_PER_PGD < PTRS_PER_PGD)) {
		set_pgd(pgd, *(pgd + USER_PTRS_PER_PGD));
		pgd_ofs++, pgd++;
	}
	flush_tlb_all();
}
Пример #30
0
static int __init sec_log_setup(char *str)
{
	unsigned size = memparse(str, &str);
	unsigned long base = 0;
	unsigned *sec_log_mag;

	if (!size || size != roundup_pow_of_two(size) || *str != '@'
	    || kstrtoul(str + 1, 0, &base))
		goto out;

	base = 0x46000000;
	size = 0x200000;

	/* If we encounter any problem parsing str ... */
	if (reserve_bootmem(base - 8, size + 8, BOOTMEM_EXCLUSIVE)) {
		pr_err("%s: failed reserving size %d + 8 " \
		       "at base 0x%lx - 8\n", __func__, size, base);
		goto out;
	}

#ifdef CONFIG_SEC_LOG_NONCACHED
	log_buf_iodesc[0].pfn = __phys_to_pfn((unsigned long)base - 0x100000);
	log_buf_iodesc[0].length = (unsigned long)(size + 0x100000);
	iotable_init(log_buf_iodesc, ARRAY_SIZE(log_buf_iodesc));
	flush_tlb_all();
	sec_log_mag = (S3C_VA_KLOG_BUF + 0x100000) - 8;
	sec_log_ptr = (S3C_VA_KLOG_BUF + 0x100000) - 4;
	sec_log_buf = S3C_VA_KLOG_BUF + 0x100000;
#else
	sec_log_mag = phys_to_virt(base) - 8;
	sec_log_ptr = phys_to_virt(base) - 4;
	sec_log_buf = phys_to_virt(base);
#endif

	sec_log_size = size;
	pr_info("%s: *sec_log_mag:%x *sec_log_ptr:%x " \
		"sec_log_buf:%p sec_log_size:%d\n",
		__func__, *sec_log_mag, *sec_log_ptr, sec_log_buf,
		sec_log_size);

	if (*sec_log_mag != LOG_MAGIC) {
		pr_info("%s: no old log found\n", __func__);
		*sec_log_ptr = 0;
		*sec_log_mag = LOG_MAGIC;
	} else
		sec_log_save_old();

	register_log_char_hook(emit_sec_log_char);

	sec_getlog_supply_kloginfo(phys_to_virt(base));

out:
	return 0;
}