Ejemplo n.º 1
0
/*
 * Create the page directory entries for 0x0000,0000 <-> 0x0000,0000
 */
void create_mapping(void)
{
	*((volatile __u32 *)(PAGE_TBL_ADDR)) = 0xc4a;
	
	/** 
	 * clean dcache,, invalidat icache&invalidate tlb,
	 *
	 * function: 
	 *	to make sure the correct PA will be access.
	 * 
	 * cache:
	 * 	clean cache unit: is cache line size;
	 * 	whether the end addr will be flush? exclusive,not including.
	 *
	 * tlb:
	 * 	invalidate tlb unit: PAGE_SIZE;
	 * 	Not including end addr.
	 *
	 * Note: 
	 * 	actually, because the PA will be used at resume period time, 
	 * 	mean, not use immediately, 
	 * 	and, the cache will be clean at the end.
	 * 	so, the clean&invalidate is not necessary.
	 * 	do this here, just in case testing. like: jump to resume code for testing.
	**/

	//Note: 0xc000,0000,is device area; not need to flush cache.
	//ref: ./arch/arm/kernel/head.S
	__cpuc_coherent_kern_range((unsigned long)(PAGE_TBL_ADDR), (unsigned long)(PAGE_TBL_ADDR + (sizeof(u32))));
	local_flush_tlb_kernel_range((unsigned long)(PAGE_TBL_ADDR), (unsigned long)(PAGE_TBL_ADDR + (sizeof(u32))));
	return;
}
Ejemplo n.º 2
0
void __kunmap_atomic(void *kv)
{
	unsigned long kvaddr = (unsigned long)kv;

	if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {

		/*
		 * Because preemption is disabled, this vaddr can be associated
		 * with the current allocated index.
		 * But in case of multiple live kmap_atomic(), it still relies on
		 * callers to unmap in right order.
		 */
		int cpu_idx = kmap_atomic_idx();
		int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();

		WARN_ON(kvaddr != FIXMAP_ADDR(idx));

		pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
		local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);

		kmap_atomic_idx_pop();
	}

	pagefault_enable();
	preempt_enable();
}
Ejemplo n.º 3
0
/** 20131102
 * 커널 address 영역에 대한 flush tlb 명령을 수행한다.
 **/
static inline void ipi_flush_tlb_kernel_range(void *arg)
{
	struct tlb_args *ta = (struct tlb_args *)arg;

	/** 20131102
	 * arg로 넘어온 start~end 사이의 주소에 대해 TLB invalidate 시킴.
	 **/
	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
}
Ejemplo n.º 4
0
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_start = start;
		ta.ta_end = end;
		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
	} else
		local_flush_tlb_kernel_range(start, end);
}
Ejemplo n.º 5
0
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	if (IS_ENABLED(CONFIG_L4)) {
		l4x_unmap_sync_all();
		return;
	}

	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_start = start;
		ta.ta_end = end;
		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
	} else
		local_flush_tlb_kernel_range(start, end);
	broadcast_tlb_a15_erratum();
}
Ejemplo n.º 6
0
/** 20131026
 * CONFIG_SMP일 경우
 * start ~ end 사이의 커널 주소 공간에 대해 flush tlb 를 수행
 **/
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	/** 20131026
	 * tlb operation이 broadcast되어야 하는 경우
	 * (operation이 local structures에만 반영되는 경우)
	 **/
	if (tlb_ops_need_broadcast()) {
		/** 20131026
		 * tlb_args 구조체를 채운다.
		 * kernel range이므로 vm_area_struct는 채우지 않는다.
		 **/
		struct tlb_args ta;
		ta.ta_start = start;
		ta.ta_end = end;
		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
	} else
		local_flush_tlb_kernel_range(start, end);
}
Ejemplo n.º 7
0
/**restore the va: 0x0000,0000 mapping. 
*@vaddr: the va of mmu mapping to restore.
*
*/
void restore_mapping(unsigned long vaddr)
{
	unsigned long addr;
	
	addr = vaddr & PAGE_MASK;
	
	if(addr != backup_tbl[0].vaddr){
		while(1);
		return;
	}

	*((volatile __u32 *)(PAGE_TBL_ADDR)) = backup_tbl[0].entry_val;
	//clean dcache, invalidat icache
	__cpuc_coherent_kern_range((unsigned long)(PAGE_TBL_ADDR), (unsigned long)(PAGE_TBL_ADDR + (sizeof(u32))));
	//	flust tlb after change mmu mapping.
	local_flush_tlb_kernel_range((unsigned long)(PAGE_TBL_ADDR), (unsigned long)(PAGE_TBL_ADDR + (sizeof(u32))));

	return;
}
Ejemplo n.º 8
0
void __kunmap_atomic(void *kvaddr)
{
	if (kvaddr >= (void *)FIXADDR_START &&
	    kvaddr < (void *)FIXADDR_TOP) {
		int idx = kmap_idx(kmap_atomic_idx(),
				   DCACHE_ALIAS((unsigned long)kvaddr));

		/*
		 * Force other mappings to Oops if they'll try to access this
		 * pte without first remap it.  Keeping stale mappings around
		 * is a bad idea also, in case the page changes cacheability
		 * attributes or becomes a protected page in a hypervisor.
		 */
		pte_clear(&init_mm, kvaddr, kmap_pte + idx);
		local_flush_tlb_kernel_range((unsigned long)kvaddr,
					     (unsigned long)kvaddr + PAGE_SIZE);

		kmap_atomic_idx_pop();
	}

	pagefault_enable();
}
Ejemplo n.º 9
0
Archivo: smp.c Proyecto: Einheri/wl500g
static void flush_tlb_kernel_range_ipi(void *info)
{
	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;

	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
Ejemplo n.º 10
0
static inline void ipi_flush_tlb_kernel_range(void *arg)
{
	struct tlb_args *ta = (struct tlb_args *)arg;

	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
}