Example #1
0
void __init mem_init(void)
{
    unsigned long codesize, reservedpages, datasize, initsize;
    unsigned long tmp, ram = 0;

    high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
    totalram_pages += free_all_bootmem();
    totalram_pages -= setup_zero_page();	/* Setup zeroed pages. */
    reservedpages = 0;

    for (tmp = 0; tmp < max_low_pfn; tmp++)
        if (page_is_ram(tmp)) {
            ram++;
            if (PageReserved(pfn_to_page(tmp)))
                reservedpages++;
        }

    num_physpages = ram;
    codesize = (unsigned long) &_etext - (unsigned long) &_text;
    datasize = (unsigned long) &_edata - (unsigned long) &_etext;
    initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;

    printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
           "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
           (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
           ram << (PAGE_SHIFT-10), codesize >> 10,
           reservedpages << (PAGE_SHIFT-10), datasize >> 10,
           initsize >> 10,
           totalhigh_pages << (PAGE_SHIFT-10));
}
Example #2
0
void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
{
	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
		ClearPageReserved(page);
		free_new_highpage(page);
	} else
		SetPageReserved(page);
}
Example #3
0
/*
 * devmem_is_allowed() checks to see if /dev/mem access to a certain
 * address is valid. The argument is a physical page number.
 * We mimic x86 here by disallowing access to system RAM as well as
 * device-exclusive MMIO regions. This effectively disable read()/write()
 * on /dev/mem.
 */
int devmem_is_allowed(unsigned long pfn)
{
	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
		return 0;
	if (!page_is_ram(pfn))
		return 1;
	return 0;
}
Example #4
0
File: mem.c Project: 03199618/linux
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);

	if (!page_is_ram(pfn))
		vma_prot = pgprot_noncached(vma_prot);

	return vma_prot;
}
Example #5
0
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
			      unsigned long size, pgprot_t vma_prot)
{
	if (ppc_md.phys_mem_access_prot)
		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);

	if (!page_is_ram(pfn))
		vma_prot = __pgprot(pgprot_val(vma_prot)
				    | _PAGE_GUARDED | _PAGE_NO_CACHE);
	return vma_prot;
}
Example #6
0
/*
 * Architectures vary in how they handle caching for addresses
 * outside of main memory.
 *
 */
static inline int uncached_access(struct file *file, unsigned long addr)
{
#if defined(__i386__)
	/*
	 * On the PPro and successors, the MTRRs are used to set
	 * memory types for physical addresses outside main memory,
	 * so blindly setting PCD or PWT on those pages is wrong.
	 * For Pentiums and earlier, the surround logic should disable
	 * caching for the high addresses through the KEN pin, but
	 * we maintain the tradition of paranoia in this code.
	 */
	if (file->f_flags & O_SYNC)
		return 1;
 	return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
		  test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
		  test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
		  test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
	  && addr >= __pa(high_memory);
#elif defined(__x86_64__)
	/* 
	 * This is broken because it can generate memory type aliases,
	 * which can cause cache corruptions
	 * But it is only available for root and we have to be bug-to-bug
	 * compatible with i386.
	 */
	if (file->f_flags & O_SYNC)
		return 1;
	/* same behaviour as i386. PAT always set to cached and MTRRs control the
	   caching behaviour. 
	   Hopefully a full PAT implementation will fix that soon. */	   
	return 0;
#elif defined(CONFIG_IA64)
	/*
	 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
	 */
	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
#elif defined(CONFIG_PPC64)
	/* On PPC64, we always do non-cacheable access to the IO hole and
	 * cacheable elsewhere. Cache paradox can checkstop the CPU and
	 * the high_memory heuristic below is wrong on machines with memory
	 * above the IO hole... Ah, and of course, XFree86 doesn't pass
	 * O_SYNC when mapping us to tap IO space. Surprised ?
	 */
	return !page_is_ram(addr);
#else
	/*
	 * Accessing memory above the top the kernel knows about or through a file pointer
	 * that was marked O_SYNC will be done non-cached.
	 */
	if (file->f_flags & O_SYNC)
		return 1;
	return addr >= __pa(high_memory);
#endif
}
/*
 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
 * OK to have direct references to sparsemem variables in here.
 */
static int
memory_block_action(unsigned long phys_index, unsigned long action)
{
	int i;
	unsigned long start_pfn, start_paddr;
	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
	struct page *first_page;
	int ret;

	first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);

	/*
	 * The probe routines leave the pages reserved, just
	 * as the bootmem code does.  Make sure they're still
	 * that way.
	 */
	if (action == MEM_ONLINE) {
		for (i = 0; i < nr_pages; i++) {
			if (page_is_ram(page_to_pfn(first_page+i))) {
				if (PageReserved(first_page+i))
					continue;
					printk(KERN_WARNING
						"section number %ld page number"
						" %d not reserved, was it "
						" already online?\n",
						phys_index, i);
					return -EBUSY;
			}
		}
	}

	switch (action) {
		case MEM_ONLINE:
			start_pfn = page_to_pfn(first_page);
			ret = online_pages(start_pfn, nr_pages);
			break;
		case MEM_OFFLINE:
			start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
			ret = remove_memory(start_paddr,
					    nr_pages << PAGE_SHIFT);
			break;
		default:
			WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
			     "%ld\n", __func__, phys_index, action, action);
			ret = -EINVAL;
	}

	return ret;
}
/*
 * On 64-bit we don't want to invoke hash_page on user addresses from
 * interrupt context, so if the access faults, we read the page tables
 * to find which page (if any) is mapped and access it directly.
 */
static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
{
	int ret = -EFAULT;
	pgd_t *pgdir;
	pte_t *ptep, pte;
	unsigned shift;
	unsigned long addr = (unsigned long) ptr;
	unsigned long offset;
	unsigned long pfn, flags;
	void *kaddr;

	pgdir = current->mm->pgd;
	if (!pgdir)
		return -EFAULT;

	local_irq_save(flags);
	ptep = find_linux_pte_or_hugepte(pgdir, addr, NULL, &shift);
	if (!ptep)
		goto err_out;
	if (!shift)
		shift = PAGE_SHIFT;

	/* align address to page boundary */
	offset = addr & ((1UL << shift) - 1);

	pte = READ_ONCE(*ptep);
	if (!pte_present(pte) || !pte_user(pte))
		goto err_out;
	pfn = pte_pfn(pte);
	if (!page_is_ram(pfn))
		goto err_out;

	/* no highmem to worry about here */
	kaddr = pfn_to_kaddr(pfn);
	memcpy(buf, kaddr + offset, nb);
	ret = 0;
err_out:
	local_irq_restore(flags);
	return ret;
}
/*
 * On 64-bit we don't want to invoke hash_page on user addresses from
 * interrupt context, so if the access faults, we read the page tables
 * to find which page (if any) is mapped and access it directly.
 */
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
	pgd_t *pgdir;
	pte_t *ptep, pte;
	int pagesize;
	unsigned long addr = (unsigned long) ptr;
	unsigned long offset;
	unsigned long pfn;
	void *kaddr;

	pgdir = current->mm->pgd;
	if (!pgdir)
		return -EFAULT;

	pagesize = get_slice_psize(current->mm, addr);

	/* align address to page boundary */
	offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
	addr -= offset;

	if (is_huge_psize(pagesize))
		ptep = huge_pte_offset(current->mm, addr);
	else
		ptep = find_linux_pte(pgdir, addr);

	if (ptep == NULL)
		return -EFAULT;
	pte = *ptep;
	if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
		return -EFAULT;
	pfn = pte_pfn(pte);
	if (!page_is_ram(pfn))
		return -EFAULT;

	/* no highmem to worry about here */
	kaddr = pfn_to_kaddr(pfn);
	memcpy(ret, kaddr + offset, nb);
	return 0;
}
Example #10
0
/*
 * On 64-bit we don't want to invoke hash_page on user addresses from
 * interrupt context, so if the access faults, we read the page tables
 * to find which page (if any) is mapped and access it directly.
 */
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
	pgd_t *pgdir;
	pte_t *ptep, pte;
	unsigned shift;
	unsigned long addr = (unsigned long) ptr;
	unsigned long offset;
	unsigned long pfn;
	void *kaddr;

	pgdir = current->mm->pgd;
	if (!pgdir)
		return -EFAULT;

	ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
	if (!shift)
		shift = PAGE_SHIFT;

	/* align address to page boundary */
	offset = addr & ((1UL << shift) - 1);
	addr -= offset;

	if (ptep == NULL)
		return -EFAULT;
	pte = *ptep;
	if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
		return -EFAULT;
	pfn = pte_pfn(pte);
	if (!page_is_ram(pfn))
		return -EFAULT;

	/* no highmem to worry about here */
	kaddr = pfn_to_kaddr(pfn);
	memcpy(ret, kaddr + offset, nb);
	return 0;
}
/*
 *  Read from a diskdump-created dumpfile.
 */
int
read_diskdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
{
	int ret;
	physaddr_t curpaddr;
	ulong pfn, page_offset;

	pfn = paddr_to_pfn(paddr);

	if (KDUMP_SPLIT()) {
		/* Find proper dd */
		int i;
		unsigned long start_pfn;
		unsigned long end_pfn;

		for (i=0; i<num_dumpfiles; i++) {
			start_pfn = dd_list[i]->sub_header_kdump->start_pfn;
			end_pfn = dd_list[i]->sub_header_kdump->end_pfn;
			if ((pfn >= start_pfn) && (pfn <= end_pfn))	{
				dd = dd_list[i];
				break;
			}
		}

		if (i == num_dumpfiles) {
			if (CRASHDEBUG(8))
				fprintf(fp, "read_diskdump: SEEK_ERROR: "
				    "paddr/pfn %llx/%lx beyond last dumpfile\n",
					(ulonglong)paddr, pfn);
			return SEEK_ERROR;
		}
	}

	curpaddr = paddr & ~((physaddr_t)(dd->block_size-1));
	page_offset = paddr & ((physaddr_t)(dd->block_size-1));

	if ((pfn >= dd->header->max_mapnr) || !page_is_ram(pfn)) {
		if (CRASHDEBUG(8)) {
			fprintf(fp, "read_diskdump: SEEK_ERROR: "
			    "paddr/pfn: %llx/%lx ",
				(ulonglong)paddr, pfn);
			if (pfn >= dd->header->max_mapnr)
				fprintf(fp, "max_mapnr: %x\n",
					dd->header->max_mapnr);
			else
				fprintf(fp, "!page_is_ram\n");
		}

		return SEEK_ERROR;
	}

	if (!page_is_dumpable(pfn)) {
		if ((dd->flags & (ZERO_EXCLUDED|ERROR_EXCLUDED)) ==
		    ERROR_EXCLUDED) {
			if (CRASHDEBUG(8))
				fprintf(fp, "read_diskdump: PAGE_EXCLUDED: "
			    	    "paddr/pfn: %llx/%lx\n",
					(ulonglong)paddr, pfn);
			return PAGE_EXCLUDED;
		}
		if (CRASHDEBUG(8))
			fprintf(fp, "read_diskdump: zero-fill: "
		    	    "paddr/pfn: %llx/%lx\n",
				(ulonglong)paddr, pfn);
		memset(bufptr, 0, cnt);
		return cnt;
	}

	if (!page_is_cached(curpaddr)) {
		if (CRASHDEBUG(8))
			fprintf(fp, "read_diskdump: paddr/pfn: %llx/%lx"
			    " -> cache physical page: %llx\n",
				(ulonglong)paddr, pfn, (ulonglong)curpaddr);

		if ((ret = cache_page(curpaddr)) < 0) {
			if (CRASHDEBUG(8))
				fprintf(fp, "read_diskdump: " 
				    "%s: cannot cache page: %llx\n",
					ret == SEEK_ERROR ?  
					"SEEK_ERROR" : "READ_ERROR",
					(ulonglong)curpaddr);
			return ret;
		}
	} else if (CRASHDEBUG(8))
		fprintf(fp, "read_diskdump: paddr/pfn: %llx/%lx"
		    " -> physical page is cached: %llx\n", 
			(ulonglong)paddr, pfn, (ulonglong)curpaddr);
	
	memcpy(bufptr, dd->curbufptr + page_offset, cnt);
	return cnt;
}
Example #12
0
void __init mem_init(void)
{
	extern int ppro_with_ram_bug(void);
	int codesize, reservedpages, datasize, initsize;
	int tmp;
	int bad_ppro;

#ifdef CONFIG_FLATMEM
	if (!mem_map)
		BUG();
#endif
	
	bad_ppro = ppro_with_ram_bug();

#ifdef CONFIG_HIGHMEM
	/* check that fixmap and pkmap do not overlap */
	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
		BUG();
	}
#endif
 
	set_max_mapnr_init();

#ifdef CONFIG_HIGHMEM
	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif

	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem();

	reservedpages = 0;
	for (tmp = 0; tmp < max_low_pfn; tmp++)
		/*
		 * Only count reserved RAM pages
		 */
		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
			reservedpages++;

	set_highmem_pages_init(bad_ppro);

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
		   VMALLOC_END-VMALLOC_START);

	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10,
		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
	       );

#ifdef CONFIG_X86_PAE
	if (!cpu_has_pae)
		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
#endif
	if (boot_cpu_data.wp_works_ok < 0)
		test_wp_bit();

	/*
	 * Subtle. SMP is doing it's boot stuff late (because it has to
	 * fork idle threads) - but it also needs low mappings for the
	 * protected-mode entry to work. We zap these entries only after
	 * the WP-bit has been tested.
	 */
#ifndef CONFIG_SMP
	zap_low_mappings();
#endif
}
Example #13
0
void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from IOREMAP_TOP
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * If the address lies within the first 16 MB, assume it's in ISA
	 * memory space
	 */
	if (p < 16*1024*1024)
		p += _ISA_MEM_BASE;

#ifndef CONFIG_CRASH_DUMP
	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 */
	if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
	    page_is_ram(__phys_to_pfn(p))) {
		printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
		       (unsigned long long)p, __builtin_return_address(0));
		return NULL;
	}
#endif

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped?  Perhaps overlapped by a previous
	 * mapping.
	 */
	v = p_block_mapped(p);
	if (v)
		goto out;

	if (slab_is_available()) {
		struct vm_struct *area;
		area = get_vm_area_caller(size, VM_IOREMAP, caller);
		if (area == 0)
			return NULL;
		area->phys_addr = p;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	/*
	 * Should check if it is a candidate for a BAT mapping
	 */

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_kernel_page(v + i, p + i, prot);
	if (err) {
		if (slab_is_available())
			vunmap((void *)v);
		return NULL;
	}

out:
	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}