Ejemplo n.º 1
0
/*
 * Do all the things a cpu should do for a TLB shootdown message.
 * Other cpu's may come here at the same time for this message.
 */
static void bau_process_message(struct msg_desc *mdp,
					struct bau_control *bcp)
{
	short socket_ack_count = 0;
	short *sp;
	struct atomic_short *asp;
	struct ptc_stats *stat = bcp->statp;
	struct bau_pq_entry *msg = mdp->msg;
	struct bau_control *smaster = bcp->socket_master;

	/*
	 * This must be a normal message, or retry of a normal message
	 */
	if (msg->address == TLB_FLUSH_ALL) {
		local_flush_tlb();
		stat->d_alltlb++;
	} else {
		__flush_tlb_one(msg->address);
		stat->d_onetlb++;
	}
	stat->d_requestee++;

	/*
	 * One cpu on each uvhub has the additional job on a RETRY
	 * of releasing the resource held by the message that is
	 * being retried.  That message is identified by sending
	 * cpu number.
	 */
	if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
		bau_process_retry_msg(mdp, bcp);

	/*
	 * This is a swack message, so we have to reply to it.
	 * Count each responding cpu on the socket. This avoids
	 * pinging the count's cache line back and forth between
	 * the sockets.
	 */
	sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
	asp = (struct atomic_short *)sp;
	socket_ack_count = atom_asr(1, asp);
	if (socket_ack_count == bcp->cpus_in_socket) {
		int msg_ack_count;
		/*
		 * Both sockets dump their completed count total into
		 * the message's count.
		 */
		smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
		asp = (struct atomic_short *)&msg->acknowledge_count;
		msg_ack_count = atom_asr(socket_ack_count, asp);

		if (msg_ack_count == bcp->cpus_in_uvhub) {
			/*
			 * All cpus in uvhub saw it; reply
			 */
			reply_to_message(mdp, bcp);
		}
	}

	return;
}
Ejemplo n.º 2
0
void *kmap_atomic(struct page *page, enum km_type type)
{
	unsigned long idx;
	unsigned long vaddr;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

	debug_kmap_atomic(type);
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);

/* XXX Fix - Anton */
#if 0
	__flush_cache_one(vaddr);
#else
	flush_cache_all();
#endif

#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
/* XXX Fix - Anton */
#if 0
	__flush_tlb_one(vaddr);
#else
	flush_tlb_all();
#endif

	return (void*) vaddr;
}
Ejemplo n.º 3
0
/**
 * 建立临时内核映射
 * type和CPU共同确定用哪个固定映射的线性地址映射请求页。
 */
void *kmap_atomic(struct page *page, enum km_type type)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	inc_preempt_count();
	/**
	 * 如果被映射的页不属于高端内存,当然用不着映射。直接返回线性地址就行了。
	 */
	if (!PageHighMem(page))
		return page_address(page);

	/**
	 * 通过type和CPU确定线性地址。
	 */
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	if (!pte_none(*(kmap_pte-idx)))
		BUG();
#endif
	/**
	 * 将线性地址与页表项建立映射。
	 */
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
	/**
	 * 当然,最后必须刷新一下TLB。然后才能返回线性地址。
	 */
	__flush_tlb_one(vaddr);

	return (void*) vaddr;
}
Ejemplo n.º 4
0
void smp_invalidate_interrupt(struct pt_regs *regs)
{
    unsigned long cpu;

    cpu = get_cpu();

    if (!cpu_isset(cpu, flush_cpumask))
        goto out;
    /*
     * This was a BUG() but until someone can quote me the
     * line from the intel manual that guarantees an IPI to
     * multiple CPUs is retried _only_ on the erroring CPUs
     * its staying as a return
     *
     * BUG();
     */

    if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
        if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
            if (flush_va == TLB_FLUSH_ALL)
                local_flush_tlb();
            else
                __flush_tlb_one(flush_va);
        } else
            leave_mm(cpu);
    }
    ack_APIC_irq();
    smp_mb__before_clear_bit();
    cpu_clear(cpu, flush_cpumask);
    smp_mb__after_clear_bit();
out:
    put_cpu_no_resched();
    inc_irq_stat(irq_tlb_count);
}
Ejemplo n.º 5
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
		dec_preempt_count();
		preempt_check_resched();
		return;
	}

	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
		BUG();

	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(&init_mm, vaddr, kmap_pte-idx);
	__flush_tlb_one(vaddr);
#endif

	dec_preempt_count();
	preempt_check_resched();
}
Ejemplo n.º 6
0
/*
 * Associate a large virtual page frame with a given physical page frame
 * and protection flags for that frame. pfn is for the base of the page,
 * vaddr is what the page gets mapped to - both must be properly aligned.
 * The pmd must already be instantiated. Assumes PAE mode.
 */
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    if (vaddr & (PMD_SIZE-1)) {        /* vaddr is misaligned */
        printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
        return; /* BUG(); */
    }
    if (pfn & (PTRS_PER_PTE-1)) {        /* pfn is misaligned */
        printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
        return; /* BUG(); */
    }
    pgd = swapper_pg_dir + pgd_index(vaddr);
    if (pgd_none(*pgd)) {
        printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
        return; /* BUG(); */
    }
    pud = pud_offset(pgd, vaddr);
    pmd = pmd_offset(pud, vaddr);
    set_pmd(pmd, pfn_pmd(pfn, flags));
    /*
     * It's enough to flush this one mapping.
     * (PGE mappings get flushed as well)
     */
    __flush_tlb_one(vaddr);
}
Ejemplo n.º 7
0
void *kmap_atomic(struct page *page)
{
	unsigned long vaddr;
	long idx, type;

	preempt_disable();
	pagefault_disable();
	if (!PageHighMem(page))
		return page_address(page);

	type = kmap_atomic_idx_push();
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);

/* XXX Fix - Anton */
#if 0
	__flush_cache_one(vaddr);
#else
	flush_cache_all();
#endif

#ifdef CONFIG_DEBUG_HIGHMEM
	BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
/* XXX Fix - Anton */
#if 0
	__flush_tlb_one(vaddr);
#else
	flush_tlb_all();
#endif

	return (void*) vaddr;
}
Ejemplo n.º 8
0
/*
 * Associate a virtual page frame with a given physical page frame
 * and protection flags for that frame.
 */
void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = swapper_pg_dir + pgd_index(vaddr);
	if (pgd_none(*pgd)) {
		BUG();
		return;
	}
	pud = pud_offset(pgd, vaddr);
	if (pud_none(*pud)) {
		BUG();
		return;
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		BUG();
		return;
	}
	pte = pte_offset_kernel(pmd, vaddr);
	/* <mfn,flags> stored as-is, to permit clearing entries */
	xen_set_pte(pte, mfn_pte(mfn, flags));

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}
Ejemplo n.º 9
0
/*
 * Map the 32bit vsyscall page on demand.
 *
 * RED-PEN: This knows too much about high level VM.
 *
 * Alternative would be to generate a vma with appropriate backing options
 * and let it be handled by generic VM.
 */
int __map_syscall32(struct mm_struct *mm, unsigned long address)
{ 
	pgd_t *pgd;
	pud_t *pud;
	pte_t *pte;
	pmd_t *pmd;
	int err = -ENOMEM;

	spin_lock(&mm->page_table_lock); 
 	pgd = pgd_offset(mm, address);
 	pud = pud_alloc(mm, pgd, address);
 	if (pud) {
 		pmd = pmd_alloc(mm, pud, address);
 		if (pmd && (pte = pte_alloc_map(mm, pmd, address)) != NULL) {
 			if (pte_none(*pte)) {
 				set_pte(pte,
 					mk_pte(virt_to_page(syscall32_page),
 					       PAGE_KERNEL_VSYSCALL32));
 			}
 			/* Flush only the local CPU. Other CPUs taking a fault
 			   will just end up here again
			   This probably not needed and just paranoia. */
 			__flush_tlb_one(address);
 			err = 0;
		}
	}
	spin_unlock(&mm->page_table_lock);
	return err;
}
Ejemplo n.º 10
0
irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
				     struct pt_regs *regs)
{
	unsigned long cpu;

	cpu = get_cpu();

	if (!cpu_isset(cpu, flush_cpumask))
		goto out;
		/* 
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */
		 
	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
			if (flush_va == FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(flush_va);
		} else
			leave_mm(cpu);
	}
	smp_mb__before_clear_bit();
	cpu_clear(cpu, flush_cpumask);
	smp_mb__after_clear_bit();
out:
	put_cpu_no_resched();

	return IRQ_HANDLED;
}
Ejemplo n.º 11
0
/*
 * Associate a virtual page frame with a given physical page frame 
 * and protection flags for that frame.
 */ 
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = swapper_pg_dir + pgd_index(vaddr);
	if (pgd_none(*pgd)) {
		BUG();
		return;
	}
	pud = pud_offset(pgd, vaddr);
	if (pud_none(*pud)) {
		BUG();
		return;
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		BUG();
		return;
	}
	pte = pte_offset_kernel(pmd, vaddr);
	if (!pte_none(pteval))
		set_pte_at(&init_mm, vaddr, pte, pteval);
	else
		pte_clear(&init_mm, vaddr, pte);

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}
Ejemplo n.º 12
0
Archivo: tlb.c Proyecto: AK101111/linux
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
{
	struct mm_struct *mm = vma->vm_mm;

	preempt_disable();

	if (current->active_mm == mm) {
		if (current->mm) {
			/*
			 * Implicit full barrier (INVLPG) that synchronizes
			 * with switch_mm.
			 */
			__flush_tlb_one(start);
		} else {
			leave_mm(smp_processor_id());

			/* Synchronize with switch_mm. */
			smp_mb();
		}
	}

	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
		flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);

	preempt_enable();
}
Ejemplo n.º 13
0
asmlinkage void smp_invalidate_interrupt (void)
{
	unsigned long cpu;

	cpu = get_cpu();

	if (!cpu_isset(cpu, flush_cpumask))
		goto out;
		/* 
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */
		 
	if (flush_mm == read_pda(active_mm)) {
		if (read_pda(mmu_state) == TLBSTATE_OK) {
			if (flush_va == FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(flush_va);
		} else
			leave_mm(cpu);
	}
	ack_APIC_irq();
	cpu_clear(cpu, flush_cpumask);

out:
	put_cpu_no_resched();
}
Ejemplo n.º 14
0
asmlinkage void smp_invalidate_interrupt (void)
{
	unsigned long cpu = smp_processor_id();

	if (!test_bit(cpu, &flush_cpumask))
		return;
		/* 
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */
		 
	if (flush_mm == cpu_tlbstate[cpu].active_mm) {
		if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
			if (flush_va == FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(flush_va);
		} else
			leave_mm(cpu);
	}
	ack_APIC_irq();
	clear_bit(cpu, &flush_cpumask);
}
Ejemplo n.º 15
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	unsigned long idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
		pagefault_enable();
		return;
	}

	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));

/* XXX Fix - Anton */
#if 0
	__flush_cache_one(vaddr);
#else
	flush_cache_all();
#endif

	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(&init_mm, vaddr, kmap_pte-idx);
/* XXX Fix - Anton */
#if 0
	__flush_tlb_one(vaddr);
#else
	flush_tlb_all();
#endif
#endif

	pagefault_enable();
}
Ejemplo n.º 16
0
static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
{
	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
	void *base = ghes_ioremap_area->addr;

	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
	__flush_tlb_one(vaddr);
}
Ejemplo n.º 17
0
int kmemcheck_show_addr(unsigned long address)
{
	pte_t *pte;

	pte = kmemcheck_pte_lookup(address);
	if (!pte)
		return 0;

	set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
	__flush_tlb_one(address);
	return 1;
}
Ejemplo n.º 18
0
/*
 * This is only for a caller who is clever enough to page-align
 * phys_addr and virtual_source, and who also has a preference
 * about which virtual address from which to steal ptes
 */
static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, 
		    void* virtual_source)
{
	boot_pte_t* pte;
	int i;
	char *vaddr = virtual_source;

	pte = boot_vaddr_to_pte(virtual_source);
	for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) {
		set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL));
		__flush_tlb_one(&vaddr[i*PAGE_SIZE]);
	}
}
Ejemplo n.º 19
0
asmlinkage
#endif
void smp_invalidate_interrupt(struct pt_regs *regs)
{
	unsigned int cpu;
	unsigned int sender;
	union smp_flush_state *f;

	cpu = smp_processor_id();

#ifdef CONFIG_X86_32
	if (current->active_mm)
		load_user_cs_desc(cpu, current->active_mm);
#endif

	/*
	 * orig_rax contains the negated interrupt vector.
	 * Use that to determine where the sender put the data.
	 */
	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
	f = &flush_state[sender];

	if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
		goto out;
		/*
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */

	if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
		if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
			if (f->flush_va == TLB_FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(f->flush_va);
		} else
			leave_mm(cpu);
	}
out:
	ack_APIC_irq();
	smp_mb__before_clear_bit();
	cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
	smp_mb__after_clear_bit();
	inc_irq_stat(irq_tlb_count);
}
Ejemplo n.º 20
0
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());

	if (current->active_mm == mm) {
		if(current->mm)
			__flush_tlb_one(va);
		 else
		 	leave_mm(smp_processor_id());
	}

	if (cpu_mask)
		flush_tlb_others(cpu_mask, mm, va);
}
Ejemplo n.º 21
0
static __init void set_pte_phys(unsigned long vaddr,
			 unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte, new_pte;

	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);

	pgd = pgd_offset_k(vaddr);
	if (pgd_none(*pgd)) {
		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
		return;
	}
	pud = pud_offset(pgd, vaddr);
	if (pud_none(*pud)) {
		pmd = (pmd_t *) spp_getpage(); 
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
		if (pmd != pmd_offset(pud, 0)) {
			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
			return;
		}
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		pte = (pte_t *) spp_getpage();
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
		if (pte != pte_offset_kernel(pmd, 0)) {
			printk("PAGETABLE BUG #02!\n");
			return;
		}
	}
	new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);

	pte = pte_offset_kernel(pmd, vaddr);
	if (!pte_none(*pte) &&
	    pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
		pte_ERROR(*pte);
	set_pte(pte, new_pte);

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}
Ejemplo n.º 22
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
	struct mm_struct *mm = vma->vm_mm;

	preempt_disable();

	if (current->active_mm == mm) {
		if (current->mm)
			__flush_tlb_one(va);
		else
			leave_mm(smp_processor_id());
	}

	if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
		flush_tlb_others(&mm->cpu_vm_mask, mm, va);

	preempt_enable();
}
Ejemplo n.º 23
0
void kmemcheck_hide_pages(struct page *p, unsigned int n)
{
	unsigned int i;

	for (i = 0; i < n; ++i) {
		unsigned long address;
		pte_t *pte;
		unsigned int level;

		address = (unsigned long) page_address(&p[i]);
		pte = lookup_address(address, &level);
		BUG_ON(!pte);
		BUG_ON(level != PG_LEVEL_4K);

		set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
		set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN));
		__flush_tlb_one(address);
	}
}
Ejemplo n.º 24
0
void __kunmap_atomic(void *kvaddr)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	int type;

	if (vaddr < FIXADDR_START) { //      
		pagefault_enable();
		return;
	}

	type = kmap_atomic_idx();

#ifdef CONFIG_DEBUG_HIGHMEM
	{
		unsigned long idx;

		idx = type + KM_TYPE_NR * smp_processor_id();
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));

		/*                 */
#if 0
		__flush_cache_one(vaddr);
#else
		flush_cache_all();
#endif

		/*
                                                          
                                    
   */
		pte_clear(&init_mm, vaddr, kmap_pte-idx);
		/*                 */
#if 0
		__flush_tlb_one(vaddr);
#else
		flush_tlb_all();
#endif
	}
#endif

	kmap_atomic_idx_pop();
	pagefault_enable();
}
Ejemplo n.º 25
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
    struct mm_struct *mm = vma->vm_mm;
    cpumask_t cpu_mask;

    preempt_disable();
    cpu_mask = mm->cpu_vm_mask;
    cpu_clear(smp_processor_id(), cpu_mask);

    if (current->active_mm == mm) {
        if (current->mm)
            __flush_tlb_one(va);
        else
            leave_mm(smp_processor_id());
    }

    if (!cpus_empty(cpu_mask))
        flush_tlb_others(cpu_mask, mm, va);

    preempt_enable();
}
Ejemplo n.º 26
0
/*
 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 * no global lock is needed and because the kmap code must perform a global TLB
 * invalidation when the kmap pool wraps.
 *
 * However when holding an atomic kmap is is not legal to sleep, so atomic
 * kmaps are appropriate for short, tight code paths only.
 */
void *kmap_atomic(struct page *page, enum km_type type)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	inc_preempt_count();
	if (!PageHighMem(page))
		return page_address(page);

	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
	if (!pte_none(*(kmap_pte-idx)))
		BUG();
#endif
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
	__flush_tlb_one(vaddr);

	return (void*) vaddr;
}
Ejemplo n.º 27
0
/*
 * Map the 32bit vsyscall page on demand.
 *
 * RED-PEN: This knows too much about high level VM.
 *
 * Alternative would be to generate a vma with appropriate backing options
 * and let it be handled by generic VM.
 */
int __map_syscall32(struct mm_struct *mm, unsigned long address)
{ 
	pte_t *pte;
	pmd_t *pmd;
	int err = 0;

	spin_lock(&mm->page_table_lock); 
	pmd = pmd_alloc(mm, pgd_offset(mm, address), address); 
	if (pmd && (pte = pte_alloc_map(mm, pmd, address)) != NULL) { 
		if (pte_none(*pte)) { 
			set_pte(pte, 
				mk_pte(virt_to_page(syscall32_page), 
				       PAGE_KERNEL_VSYSCALL)); 
		}
		/* Flush only the local CPU. Other CPUs taking a fault
		   will just end up here again */
		__flush_tlb_one(address); 
	} else
		err = -ENOMEM; 
	spin_unlock(&mm->page_table_lock);
	return err;
}
Ejemplo n.º 28
0
Archivo: tlb_64.c Proyecto: E-LLP/n900
asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
{
	int cpu;
	int sender;
	union smp_flush_state *f;

	cpu = smp_processor_id();
	/*
	 * orig_rax contains the negated interrupt vector.
	 * Use that to determine where the sender put the data.
	 */
	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
	f = &per_cpu(flush_state, sender);

	if (!cpu_isset(cpu, f->flush_cpumask))
		goto out;
		/*
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */

	if (f->flush_mm == read_pda(active_mm)) {
		if (read_pda(mmu_state) == TLBSTATE_OK) {
			if (f->flush_va == TLB_FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(f->flush_va);
		} else
			leave_mm(cpu);
	}
out:
	ack_APIC_irq();
	cpu_clear(cpu, f->flush_cpumask);
	add_pda(irq_tlb_count, 1);
}
Ejemplo n.º 29
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
		dec_preempt_count();
		preempt_check_resched();
		return;
	}
#endif

#if defined(CONFIG_DEBUG_HIGHMEM)
	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
		BUG();

	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(kmap_pte-idx);
	__flush_tlb_one(vaddr);
#elif defined(CONFIG_XEN)
	/*
	 * We must ensure there are no dangling pagetable references when
	 * returning memory to Xen (decrease_reservation).
	 * XXX TODO: We could make this faster by only zapping when
	 * kmap_flush_unused is called but that is trickier and more invasive.
	 */
	pte_clear(kmap_pte-idx);
#endif

	dec_preempt_count();
	preempt_check_resched();
}
Ejemplo n.º 30
0
/* this is called from assembler and passed control in long mode 64 bit
 * interrupts disabled.
 * At this stage the first 32MB have been mapped with 2MB pages.
 */
extern "C" void
kinit()
{
    extern char __bss_end[];
    struct KernelInitArgs kernelInitArgs;
    MemoryMgrPrimitiveKern *memory = &kernelInitArgs.memory;
    uval vp = 0;	/* master processor */
    uval vaddr;

    /* on this machine like all x86 machines nowaydays the boot
     * image is loaded at 1MB.  This is hard coded here.  */
    extern code start_real;
    codeAddress kernPhysStartAddress = &start_real;
    extern code kernVirtStart;

    early_printk("kernPhysStartAddress 0x%lx \n",
		 (unsigned long)kernPhysStartAddress);


    /* We ignore memory below the 1meg boundary.  PhysSize is the
     * size of memory above the boundary.
     */
    uval physSize = BOOT_MINIMUM_REAL_MEMORY -0x100000;
    uval physStart = 0x0;
    uval physEnd = physStart + 0x100000 + physSize;

    early_printk("BOOT_MINIMUM_REAL_MEMORY 0x%lx, physStart 0x%lx,"
		 " physEnd 0x%lx, physSize 0x%lx \n",
		 BOOT_MINIMUM_REAL_MEMORY,  physStart, physEnd, physSize);

    /*
     * We want to map all of physical memory into a V->R region.  We choose a
     * base for the V->R region (virtBase) that makes the kernel land correctly
     * at its link origin, &kernVirtStart.  This link origin must wind up
     * mapped to the physical location at which the kernel was loaded
     * (kernPhysStartAddress).
     */
    uval virtBase = (uval) (&kernVirtStart - kernPhysStartAddress);
    early_printk("&kernVirtStart 0x%lx virtBase 0x%lx \n",
		 (unsigned long long)&kernVirtStart,
		 (unsigned long long)virtBase);

    /*
     * Memory from __end_bss
     * to the end of physical memory is available for allocation.
     * Correct first for the 2MB page mapping the kernel.
     */
    early_printk("__bss_end is 0x%lx physEnd is 0x%lx \n", __bss_end , physEnd);
    uval allocStart = ALIGN_UP(__bss_end, SEGMENT_SIZE);
    uval allocEnd = virtBase + physEnd;

    early_printk("allocStart is 0x%lx allocEnd is 0x%lx \n",
		 allocStart, allocEnd);
    memory->init(physStart, physEnd, virtBase, allocStart, allocEnd);

    /*
     * Remove mappings between allocStart and
     * BOOT_MINIMUM_REAL_MEMORY to allow 4KB page mapping for
     * that range.  No need to tlb invalidate, unless they are
     * touched (debugging).  Actually we need to keep the first
     * 2MB mapping above allocStart so that we can initialize the
     * first 2 (or 3 if we need a PDP page as well) 4KB pages
     * which are PDE and PTE pages for the V->R mapping before
     * they are themselves mapped as 4KB pages.
     */
    early_printk("top page real address is 0x%lx \n", (uval)&level4_pgt);
    uval level1_pgt_virt = memory->virtFromPhys((uval)&level4_pgt);
    early_printk("top page real address is 0x%lx \n", (uval)level4_pgt & ~0xfff);
    early_printk("top page virtual  address is 0x%lx \n", (uval )level1_pgt_virt);

    for (vaddr = allocStart + SEGMENT_SIZE; vaddr < allocEnd; vaddr += SEGMENT_SIZE)	{

#ifndef NDEBUG
      //     early_printk("removing pde, pml4 at virtual address 0x%lx \n", EARLY_VADDR_TO_L1_PTE_P(level1_pgt_virt, vaddr, memory));
      TOUCH(EARLY_VADDR_TO_L1_PTE_P(level1_pgt_virt, vaddr, memory));

      //     early_printk("removing pde, pdp at virtual address 0x%lx \n", EARLY_VADDR_TO_L2_PTE_P(level1_pgt_virt, vaddr, memory));
      TOUCH(EARLY_VADDR_TO_L2_PTE_P(level1_pgt_virt, vaddr, memory));

      //     early_printk("removing pde at virtual address 0x%lx \n", EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory));
      TOUCH(EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory));
#endif /* #ifndef NDEBUG */


      EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)->P = 0;
      EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)->PS = 0;
      EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)->G = 0;
      EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt, vaddr, memory)->Frame = 0;
      __flush_tlb_one(vaddr);
    }

    /*
     * Because of the 2MB page mapping for the kernel no
     * unused space can be recuperated at a 4KB page granularity.
     * We may want to map the fringe bss with 4KB page(s)
     * or alternatively make free for (pinned only) 4KB allocation
     * the unused 4KB pages unused in the 2MB pages at this point. XXX dangerous
     */

    early_printk("Calling InitKernelMappings\n");
    InitKernelMappings(0, memory);

    // kernelInitArgs.onSim = onSim; not there anymore but where is it set XXX

    kernelInitArgs.vp = 0;
    kernelInitArgs.barrierP = 0;

#define LOOP_NUMBER 	0x000fffff	// iteration counter for delay
    init_PIC(LOOP_NUMBER);

    early_printk("Calling InitIdt\n");
    InitIdt();			// initialize int handlers

    early_printk("Calling enableHardwareInterrupts\n");
    enableHardwareInterrupts();

    early_printk("Calling thinwireInit\n");
    thinwireInit(memory);

    /* no thinwire console XXX taken from mips64 but check  */
    early_printk("Calling LocalConsole and switching to tty \n");
    LocalConsole::Init(vp, memory, CONSOLE_CHANNEL, 1, 0 );

    err_printf("Calling KernelInit.C\n");

    /* Remove the V=R initial mapping only used for jumping to
     * the final mapping, i.e the first 2MB. XXX todo should not
     * do it until VGABASE has been relocated currently mapped
     * V==R XXX cannot use early_printk() from now on.  */
    L3_PTE *p;
    p = EARLY_VADDR_TO_L3_PTE_P(level1_pgt_virt,(uval)0x100000,memory);
    p->P = 0;
    p->PS = 0;
    p->G = 0;
    p->Frame = 0;
    __flush_tlb_one(0x100000);

    KernelInit(kernelInitArgs);
    /* NOTREACHED */
}