Ejemplo n.º 1
0
/*H:330
 * (i) Looking up a page table entry when the Guest faults.
 *
 * We saw this call in run_guest(): when we see a page fault in the Guest, we
 * come here.  That's because we only set up the shadow page tables lazily as
 * they're needed, so we get page faults all the time and quietly fix them up
 * and return to the Guest without it knowing.
 *
 * If we fixed up the fault (ie. we mapped the address), this routine returns
 * true.  Otherwise, it was a real fault and we need to tell the Guest.
 */
bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
{
	pgd_t gpgd;
	pgd_t *spgd;
	unsigned long gpte_ptr;
	pte_t gpte;
	pte_t *spte;

	/* Mid level for PAE. */
#ifdef CONFIG_X86_PAE
	pmd_t *spmd;
	pmd_t gpmd;
#endif

	/* First step: get the top-level Guest page table entry. */
	if (unlikely(cpu->linear_pages)) {
		/* Faking up a linear mapping. */
		gpgd = __pgd(CHECK_GPGD_MASK);
	} else {
		gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
		/* Toplevel not present?  We can't map it in. */
		if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
			return false;
	}

	/* Now look at the matching shadow entry. */
	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
		/* No shadow entry: allocate a new shadow PTE page. */
		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
		/*
		 * This is not really the Guest's fault, but killing it is
		 * simple for this corner case.
		 */
		if (!ptepage) {
			kill_guest(cpu, "out of memory allocating pte page");
			return false;
		}
		/* We check that the Guest pgd is OK. */
		check_gpgd(cpu, gpgd);
		/*
		 * And we copy the flags to the shadow PGD entry.  The page
		 * number in the shadow PGD is the page we just allocated.
		 */
		set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
	}

#ifdef CONFIG_X86_PAE
	if (unlikely(cpu->linear_pages)) {
		/* Faking up a linear mapping. */
		gpmd = __pmd(_PAGE_TABLE);
	} else {
		gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
		/* Middle level not present?  We can't map it in. */
		if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
			return false;
	}

	/* Now look at the matching shadow entry. */
	spmd = spmd_addr(cpu, *spgd, vaddr);

	if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
		/* No shadow entry: allocate a new shadow PTE page. */
		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);

		/*
		 * This is not really the Guest's fault, but killing it is
		 * simple for this corner case.
		 */
		if (!ptepage) {
			kill_guest(cpu, "out of memory allocating pte page");
			return false;
		}

		/* We check that the Guest pmd is OK. */
		check_gpmd(cpu, gpmd);

		/*
		 * And we copy the flags to the shadow PMD entry.  The page
		 * number in the shadow PMD is the page we just allocated.
		 */
		set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
	}

	/*
	 * OK, now we look at the lower level in the Guest page table: keep its
	 * address, because we might update it later.
	 */
	gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
#else
	/*
	 * OK, now we look at the lower level in the Guest page table: keep its
	 * address, because we might update it later.
	 */
	gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
#endif

	if (unlikely(cpu->linear_pages)) {
		/* Linear?  Make up a PTE which points to same page. */
		gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
	} else {
		/* Read the actual PTE value. */
		gpte = lgread(cpu, gpte_ptr, pte_t);
	}

	/* If this page isn't in the Guest page tables, we can't page it in. */
	if (!(pte_flags(gpte) & _PAGE_PRESENT))
		return false;

	/*
	 * Check they're not trying to write to a page the Guest wants
	 * read-only (bit 2 of errcode == write).
	 */
	if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
		return false;

	/* User access to a kernel-only page? (bit 3 == user access) */
	if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
		return false;

	/*
	 * Check that the Guest PTE flags are OK, and the page number is below
	 * the pfn_limit (ie. not mapping the Launcher binary).
	 */
	check_gpte(cpu, gpte);

	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
	gpte = pte_mkyoung(gpte);
	if (errcode & 2)
		gpte = pte_mkdirty(gpte);

	/* Get the pointer to the shadow PTE entry we're going to set. */
	spte = spte_addr(cpu, *spgd, vaddr);

	/*
	 * If there was a valid shadow PTE entry here before, we release it.
	 * This can happen with a write to a previously read-only entry.
	 */
	release_pte(*spte);

	/*
	 * If this is a write, we insist that the Guest page is writable (the
	 * final arg to gpte_to_spte()).
	 */
	if (pte_dirty(gpte))
		*spte = gpte_to_spte(cpu, gpte, 1);
	else
		/*
		 * If this is a read, don't set the "writable" bit in the page
		 * table entry, even if the Guest says it's writable.  That way
		 * we will come back here when a write does actually occur, so
		 * we can update the Guest's _PAGE_DIRTY flag.
		 */
		set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));

	/*
	 * Finally, we write the Guest PTE entry back: we've set the
	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
	 */
	if (likely(!cpu->linear_pages))
		lgwrite(cpu, gpte_ptr, pte_t, gpte);

	/*
	 * The fault is fixed, the page table is populated, the mapping
	 * manipulated, the result returned and the code complete.  A small
	 * delay and a trace of alliteration are the only indications the Guest
	 * has that a page fault occurred at all.
	 */
	return true;
}
Ejemplo n.º 2
0
/* First C function to be called on Xen boot */
asmlinkage void __init xen_start_kernel(void)
{
	pgd_t *pgd;

	if (!xen_start_info)
		return;

	xen_domain_type = XEN_PV_DOMAIN;

	/* Install Xen paravirt ops */
	pv_info = xen_info;
	pv_init_ops = xen_init_ops;
	pv_time_ops = xen_time_ops;
	pv_cpu_ops = xen_cpu_ops;
	pv_apic_ops = xen_apic_ops;
	pv_mmu_ops = xen_mmu_ops;

#ifdef CONFIG_X86_64
	/*
	 * Setup percpu state.  We only need to do this for 64-bit
	 * because 32-bit already has %fs set properly.
	 */
	load_percpu_segment(0);
#endif

	xen_init_irq_ops();
	xen_init_cpuid_mask();

#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * set up the basic apic ops.
	 */
	set_xen_basic_apic_ops();
#endif

	xen_setup_features();

	if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
		pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
		pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
	}

	machine_ops = xen_machine_ops;

	/*
	 * The only reliable way to retain the initial address of the
	 * percpu gdt_page is to remember it here, so we can go and
	 * mark it RW later, when the initial percpu area is freed.
	 */
	xen_initial_gdt = &per_cpu(gdt_page, 0);

	xen_smp_init();

	/* Get mfn list */
	if (!xen_feature(XENFEAT_auto_translated_physmap))
		xen_build_dynamic_phys_to_machine();

	pgd = (pgd_t *)xen_start_info->pt_base;

	/* Prevent unwanted bits from being set in PTEs. */
	__supported_pte_mask &= ~_PAGE_GLOBAL;
	if (!xen_initial_domain())
		__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);

#ifdef CONFIG_X86_64
	/* Work out if we support NX */
	check_efer();
#endif

	/* Don't do the full vcpu_info placement stuff until we have a
	   possible map and a non-dummy shared_info. */
	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];

	local_irq_disable();
	early_boot_irqs_off();

	xen_raw_console_write("mapping kernel into physical memory\n");
	pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);

	init_mm.pgd = pgd;

	/* keep using Xen gdt for now; no urgent need to change it */

	pv_info.kernel_rpl = 1;
	if (xen_feature(XENFEAT_supervisor_mode_kernel))
		pv_info.kernel_rpl = 0;

	/* set the limit of our address space */
	xen_reserve_top();

#ifdef CONFIG_X86_32
	/* set up basic CPUID stuff */
	cpu_detect(&new_cpu_data);
	new_cpu_data.hard_math = 1;
	new_cpu_data.x86_capability[0] = cpuid_edx(1);
#endif

	/* Poke various useful things into boot_params */
	boot_params.hdr.type_of_loader = (9 << 4) | 0;
	boot_params.hdr.ramdisk_image = xen_start_info->mod_start
		? __pa(xen_start_info->mod_start) : 0;
	boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
	boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);

	if (!xen_initial_domain()) {
		add_preferred_console("xenboot", 0, NULL);
		add_preferred_console("tty", 0, NULL);
		add_preferred_console("hvc", 0, NULL);
	}

	xen_raw_console_write("about to get started...\n");

	/* Start the world */
#ifdef CONFIG_X86_32
	i386_start_kernel();
#else
	x86_64_start_reservations((char *)__pa_symbol(&boot_params));
#endif
}
Ejemplo n.º 3
0
void nommu_dump_state(struct pt_regs *regs,
		      unsigned long ea, unsigned long vector)
{
	int i;
	unsigned long addr, stack = regs->sp;

	printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector);

	printk("CPU #: %d\n"
	       "   PC: %08lx    SR: %08lx    SP: %08lx\n",
	       0, regs->pc, regs->sr, regs->sp);
	printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
	       0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
	printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
	       regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
	printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
	       regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
	printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
	       regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
	printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
	       regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
	printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
	       regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
	printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
	       regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
	printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
	       regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
	printk("  RES: %08lx oGPR11: %08lx\n",
	       regs->gpr[11], regs->orig_gpr11);

	printk("Process %s (pid: %d, stackpage=%08lx)\n",
	       ((struct task_struct *)(__pa(current)))->comm,
	       ((struct task_struct *)(__pa(current)))->pid,
	       (unsigned long)current);

	printk("\nStack: ");
	printk("Stack dump [0x%08lx]:\n", (unsigned long)stack);
	for (i = 0; i < kstack_depth_to_print; i++) {
		if (((long)stack & (THREAD_SIZE - 1)) == 0)
			break;
		stack++;

		printk("%lx :: sp + %02d: 0x%08lx\n", stack, i * 4,
		       *((unsigned long *)(__pa(stack))));
	}
	printk("\n");

	printk("Call Trace:   ");
	i = 1;
	while (((long)stack & (THREAD_SIZE - 1)) != 0) {
		addr = *((unsigned long *)__pa(stack));
		stack++;

		if (kernel_text_address(addr)) {
			if (i && ((i % 6) == 0))
				printk("\n ");
			printk(" [<%08lx>]", addr);
			i++;
		}
	}
	printk("\n");

	printk("\nCode: ");

	for (i = -24; i < 24; i++) {
		unsigned char c;
		c = ((unsigned char *)(__pa(regs->pc)))[i];

		if (i == 0)
			printk("(%02x) ", c);
		else
			printk("%02x ", c);
	}
	printk("\n");
}
Ejemplo n.º 4
0
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
	return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
Ejemplo n.º 5
0
asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
{
	struct rtas_args args;
	unsigned long flags;
	char *buff_copy, *errbuf = NULL;
	int nargs;
	int rc;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
		return -EFAULT;

	nargs = args.nargs;
	if (nargs > ARRAY_SIZE(args.args)
	    || args.nret > ARRAY_SIZE(args.args)
	    || nargs + args.nret > ARRAY_SIZE(args.args))
		return -EINVAL;

	/* Copy in args. */
	if (copy_from_user(args.args, uargs->args,
			   nargs * sizeof(rtas_arg_t)) != 0)
		return -EFAULT;

	if (args.token == RTAS_UNKNOWN_SERVICE)
		return -EINVAL;

	args.rets = &args.args[nargs];
	memset(args.rets, 0, args.nret * sizeof(rtas_arg_t));

	/* Need to handle ibm,suspend_me call specially */
	if (args.token == ibm_suspend_me_token) {
		rc = rtas_ibm_suspend_me(&args);
		if (rc)
			return rc;
		goto copy_return;
	}

	buff_copy = get_errorlog_buffer();

	flags = lock_rtas();

	rtas.args = args;
	enter_rtas(__pa(&rtas.args));
	args = rtas.args;

	/* A -1 return code indicates that the last command couldn't
	   be completed due to a hardware error. */
	if (args.rets[0] == -1)
		errbuf = __fetch_rtas_last_error(buff_copy);

	unlock_rtas(flags);

	if (buff_copy) {
		if (errbuf)
			log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
		kfree(buff_copy);
	}

 copy_return:
	/* Copy out args. */
	if (copy_to_user(uargs->args + nargs,
			 args.args + nargs,
			 args.nret * sizeof(rtas_arg_t)) != 0)
		return -EFAULT;

	return 0;
}
Ejemplo n.º 6
0
/* We assume to be passed big endian arguments */
asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
{
	struct rtas_args args;
	unsigned long flags;
	char *buff_copy, *errbuf = NULL;
	int nargs, nret, token;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
		return -EFAULT;

	nargs = be32_to_cpu(args.nargs);
	nret  = be32_to_cpu(args.nret);
	token = be32_to_cpu(args.token);

	if (nargs > ARRAY_SIZE(args.args)
	    || nret > ARRAY_SIZE(args.args)
	    || nargs + nret > ARRAY_SIZE(args.args))
		return -EINVAL;

	/* Copy in args. */
	if (copy_from_user(args.args, uargs->args,
			   nargs * sizeof(rtas_arg_t)) != 0)
		return -EFAULT;

	if (token == RTAS_UNKNOWN_SERVICE)
		return -EINVAL;

	args.rets = &args.args[nargs];
	memset(args.rets, 0, nret * sizeof(rtas_arg_t));

	/* Need to handle ibm,suspend_me call specially */
	if (token == ibm_suspend_me_token) {

		/*
		 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
		 * endian, or at least the hcall within it requires it.
		 */
		int rc = 0;
		u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
		              | be32_to_cpu(args.args[1]);
		rc = rtas_ibm_suspend_me(handle);
		if (rc == -EAGAIN)
			args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
		else if (rc == -EIO)
			args.rets[0] = cpu_to_be32(-1);
		else if (rc)
			return rc;
		goto copy_return;
	}

	buff_copy = get_errorlog_buffer();

	flags = lock_rtas();

	rtas.args = args;
	enter_rtas(__pa(&rtas.args));
	args = rtas.args;

	/* A -1 return code indicates that the last command couldn't
	   be completed due to a hardware error. */
	if (be32_to_cpu(args.rets[0]) == -1)
		errbuf = __fetch_rtas_last_error(buff_copy);

	unlock_rtas(flags);

	if (buff_copy) {
		if (errbuf)
			log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
		kfree(buff_copy);
	}

 copy_return:
	/* Copy out args. */
	if (copy_to_user(uargs->args + nargs,
			 args.args + nargs,
			 nret * sizeof(rtas_arg_t)) != 0)
		return -EFAULT;

	return 0;
}
Ejemplo n.º 7
0
/**
 * machine_specific_memory_setup - Hook for machine specific memory setup.
 **/
char * __init xen_memory_setup(void)
{
	static struct e820entry map[E820MAX] __initdata;

	unsigned long max_pfn = xen_start_info->nr_pages;
	unsigned long long mem_end;
	int rc;
	struct xen_memory_map memmap;
	unsigned long max_pages;
	unsigned long extra_pages = 0;
	int i;
	int op;

	max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
	mem_end = PFN_PHYS(max_pfn);

	memmap.nr_entries = E820MAX;
	set_xen_guest_handle(memmap.buffer, map);

	op = xen_initial_domain() ?
		XENMEM_machine_memory_map :
		XENMEM_memory_map;
	rc = HYPERVISOR_memory_op(op, &memmap);
	if (rc == -ENOSYS) {
		BUG_ON(xen_initial_domain());
		memmap.nr_entries = 1;
		map[0].addr = 0ULL;
		map[0].size = mem_end;
		/* 8MB slack (to balance backend allocations). */
		map[0].size += 8ULL << 20;
		map[0].type = E820_RAM;
		rc = 0;
	}
	BUG_ON(rc);

	/* Make sure the Xen-supplied memory map is well-ordered. */
	sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);

	max_pages = xen_get_max_pages();
	if (max_pages > max_pfn)
		extra_pages += max_pages - max_pfn;

	/*
	 * Set P2M for all non-RAM pages and E820 gaps to be identity
	 * type PFNs.  Any RAM pages that would be made inaccesible by
	 * this are first released.
	 */
	xen_released_pages = xen_set_identity_and_release(
		map, memmap.nr_entries, max_pfn);
	extra_pages += xen_released_pages;

	/*
	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
	 * factor the base size.  On non-highmem systems, the base
	 * size is the full initial memory allocation; on highmem it
	 * is limited to the max size of lowmem, so that it doesn't
	 * get completely filled.
	 *
	 * In principle there could be a problem in lowmem systems if
	 * the initial memory is also very large with respect to
	 * lowmem, but we won't try to deal with that here.
	 */
	extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
			  extra_pages);

	i = 0;
	while (i < memmap.nr_entries) {
		u64 addr = map[i].addr;
		u64 size = map[i].size;
		u32 type = map[i].type;

		if (type == E820_RAM) {
			if (addr < mem_end) {
				size = min(size, mem_end - addr);
			} else if (extra_pages) {
				size = min(size, (u64)extra_pages * PAGE_SIZE);
				extra_pages -= size / PAGE_SIZE;
				xen_add_extra_mem(addr, size);
			} else
				type = E820_UNUSABLE;
		}

		xen_align_and_add_e820_region(addr, size, type);

		map[i].addr += size;
		map[i].size -= size;
		if (map[i].size == 0)
			i++;
	}

	/*
	 * In domU, the ISA region is normal, usable memory, but we
	 * reserve ISA memory anyway because too many things poke
	 * about in there.
	 */
	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
			E820_RESERVED);

	/*
	 * Reserve Xen bits:
	 *  - mfn_list
	 *  - xen_start_info
	 * See comment above "struct start_info" in <xen/interface/xen.h>
	 */
	memblock_reserve(__pa(xen_start_info->mfn_list),
			 xen_start_info->pt_base - xen_start_info->mfn_list);

	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);

	return "Xen";
}
Ejemplo n.º 8
0
static int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;
	unsigned long gdt_mfn;

	/* used to tell cpu_init() that it can proceed with initialization */
	cpumask_set_cpu(cpu, cpu_callout_mask);
	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_rw(cpu);

#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
#endif
	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	/*
	 * Bring up the CPU in cpu_bringup_and_idle() with the stack
	 * pointing just below where pt_regs would be if it were a normal
	 * kernel entry.
	 */
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
	ctxt->user_regs.ds = __USER_DS;
	ctxt->user_regs.es = __USER_DS;
	ctxt->user_regs.ss = __KERNEL_DS;
	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

	BUG_ON((unsigned long)gdt & ~PAGE_MASK);

	gdt_mfn = arbitrary_virt_to_mfn(gdt);
	make_lowmem_page_readonly(gdt);
	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));

	ctxt->gdt_frames[0] = gdt_mfn;
	ctxt->gdt_ents      = GDT_ENTRIES;

	/*
	 * Set SS:SP that Xen will use when entering guest kernel mode
	 * from guest user mode.  Subsequent calls to load_sp0() can
	 * change this value.
	 */
	ctxt->kernel_ss = __KERNEL_DS;
	ctxt->kernel_sp = task_top_of_stack(idle);

#ifdef CONFIG_X86_32
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
	ctxt->event_callback_eip    =
		(unsigned long)xen_hypervisor_callback;
	ctxt->failsafe_callback_eip =
		(unsigned long)xen_failsafe_callback;
	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);

	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}
Ejemplo n.º 9
0
/**
 *	ps3stor_setup - Setup a storage device before use
 *	@dev: Pointer to a struct ps3_storage_device
 *	@handler: Pointer to an interrupt handler
 *
 *	Returns 0 for success, or an error code
 */
int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
{
	int error, res, alignment;
	enum ps3_dma_page_size page_size;

	error = ps3stor_open_hv_device(&dev->sbd);
	if (error) {
		dev_err(&dev->sbd.core,
			"%s:%u: ps3_open_hv_device failed %d\n", __func__,
			__LINE__, error);
		goto fail;
	}

	error = ps3_sb_event_receive_port_setup(&dev->sbd, PS3_BINDING_CPU_ANY,
						&dev->irq);
	if (error) {
		dev_err(&dev->sbd.core,
			"%s:%u: ps3_sb_event_receive_port_setup failed %d\n",
		       __func__, __LINE__, error);
		goto fail_close_device;
	}

	error = request_irq(dev->irq, handler, IRQF_DISABLED,
			    dev->sbd.core.driver->name, dev);
	if (error) {
		dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
			__func__, __LINE__, error);
		goto fail_sb_event_receive_port_destroy;
	}

	alignment = min(__ffs(dev->bounce_size),
			__ffs((unsigned long)dev->bounce_buf));
	if (alignment < 12) {
		dev_err(&dev->sbd.core,
			"%s:%u: bounce buffer not aligned (%lx at 0x%p)\n",
			__func__, __LINE__, dev->bounce_size, dev->bounce_buf);
		error = -EINVAL;
		goto fail_free_irq;
	} else if (alignment < 16)
		page_size = PS3_DMA_4K;
	else
		page_size = PS3_DMA_64K;
	dev->sbd.d_region = &dev->dma_region;
	ps3_dma_region_init(&dev->sbd, &dev->dma_region, page_size,
			    PS3_DMA_OTHER, dev->bounce_buf, dev->bounce_size);
	res = ps3_dma_region_create(&dev->dma_region);
	if (res) {
		dev_err(&dev->sbd.core, "%s:%u: cannot create DMA region\n",
			__func__, __LINE__);
		error = -ENOMEM;
		goto fail_free_irq;
	}

	dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
	dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
					 dev->bounce_size, DMA_BIDIRECTIONAL);
	if (!dev->bounce_dma) {
		dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
			__func__, __LINE__);
		error = -ENODEV;
		goto fail_free_dma;
	}

	error = ps3stor_probe_access(dev);
	if (error) {
		dev_err(&dev->sbd.core, "%s:%u: No accessible regions found\n",
			__func__, __LINE__);
		goto fail_unmap_dma;
	}
	return 0;

fail_unmap_dma:
	dma_unmap_single(&dev->sbd.core, dev->bounce_dma, dev->bounce_size,
			 DMA_BIDIRECTIONAL);
fail_free_dma:
	ps3_dma_region_free(&dev->dma_region);
fail_free_irq:
	free_irq(dev->irq, dev);
fail_sb_event_receive_port_destroy:
	ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
fail_close_device:
	ps3stor_close_hv_device(&dev->sbd);
fail:
	return error;
}
Ejemplo n.º 10
0
/*
 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
 * Launcher to reboot us.
 */
static void lguest_restart(char *reason)
{
	hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
}
Ejemplo n.º 11
0
/* The Guest calls lguest_set_pmd to set a top-level entry when !PAE. */
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
	native_set_pmd(pmdp, pmdval);
	lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
		   (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
}
Ejemplo n.º 12
0
/*
 * Panicing.
 *
 * Don't.  But if you did, this is what happens.
 */
static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
{
	hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
	/* The hcall won't return, but to keep gcc happy, we're "done". */
	return NOTIFY_DONE;
}
Ejemplo n.º 13
0
/*
 * The SHUTDOWN hypercall takes a string to describe what's happening, and
 * an argument which says whether this to restart (reboot) the Guest or not.
 *
 * Note that the Host always prefers that the Guest speak in physical addresses
 * rather than virtual addresses, so we use __pa() here.
 */
static void lguest_power_off(void)
{
	hcall(LHCALL_SHUTDOWN, __pa("Power down"),
	      LGUEST_SHUTDOWN_POWEROFF, 0, 0);
}
void __init msm_msm7x2x_allocate_memory_regions(void)
{
	void *addr;
	unsigned long size;

	size = pmem_mdp_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_pdata.start = __pa(addr);
		android_pmem_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for mdp "
				"pmem arena\n", size, addr, __pa(addr));
	}

	size = pmem_adsp_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_adsp_pdata.start = __pa(addr);
		android_pmem_adsp_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for adsp "
				"pmem arena\n", size, addr, __pa(addr));
	}

	size = pmem_audio_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_audio_pdata.start = __pa(addr);
		android_pmem_audio_pdata.size = size;
		pr_info("allocating %lu bytes (at %lx physical) for audio "
				"pmem arena\n", size , __pa(addr));
	}

	size = pmem_fb_size ? : MSM_FB_SIZE;
	addr = alloc_bootmem(size);
	msm_fb_resources[0].start = __pa(addr);
	msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
	pr_info("allocating %lu bytes at %p (%lx physical) for fb\n",
			size, addr, __pa(addr));

	size = pmem_kernel_ebi1_size;
	if (size) {
		addr = alloc_bootmem_aligned(size, 0x100000);
		android_pmem_kernel_ebi1_pdata.start = __pa(addr);
		android_pmem_kernel_ebi1_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for kernel"
				" ebi1 pmem arena\n", size, addr, __pa(addr));
	}
#ifdef CONFIG_ARCH_MSM7X27
	size = MSM_GPU_PHYS_SIZE;
	addr = alloc_bootmem(size);
	kgsl_resources[1].start = __pa(addr);
	kgsl_resources[1].end = kgsl_resources[1].start + size - 1;
	pr_info("allocating %lu bytes at %p (at %lx physical) for KGSL\n",
			size, addr, __pa(addr));
#endif

	// LGE_CHANGE_S [[email protected]] 2010-08-06, lge_mtd_direct_access
#ifdef CONFIG_MACH_MSM7X27_THUNDERC
	// PAGE_NUM_PER_BLK*PAGE_SIZE_BYTE
	lge_mtd_direct_access_addr = alloc_bootmem(64*2048);
#endif
	// LGE_CHANGE_E [[email protected]] 2010-08-06
}
Ejemplo n.º 15
0
static int calxeda_idle_finish(unsigned long val)
{
	return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume));
}
Ejemplo n.º 16
0
static int __init sh5pci_init(void)
{
	unsigned long memStart = __pa(memory_start);
	unsigned long memSize = __pa(memory_end) - memStart;
	u32 lsr0;
	u32 uval;

        if (request_irq(IRQ_ERR, pcish5_err_irq,
                        IRQF_DISABLED, "PCI Error",NULL) < 0) {
                printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
                return -EINVAL;
        }

        if (request_irq(IRQ_SERR, pcish5_serr_irq,
                        IRQF_DISABLED, "PCI SERR interrupt", NULL) < 0) {
                printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
                return -EINVAL;
        }

	pcicr_virt = (unsigned long)ioremap_nocache(SH5PCI_ICR_BASE, 1024);
	if (!pcicr_virt) {
		panic("Unable to remap PCICR\n");
	}

	PCI_IO_AREA = (unsigned long)ioremap_nocache(SH5PCI_IO_BASE, 0x10000);
	if (!PCI_IO_AREA) {
		panic("Unable to remap PCIIO\n");
	}

	/* Clear snoop registers */
        SH5PCI_WRITE(CSCR0, 0);
        SH5PCI_WRITE(CSCR1, 0);

        /* Switch off interrupts */
        SH5PCI_WRITE(INTM,  0);
        SH5PCI_WRITE(AINTM, 0);
        SH5PCI_WRITE(PINTM, 0);

        /* Set bus active, take it out of reset */
        uval = SH5PCI_READ(CR);

	/* Set command Register */
        SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE |
		     CR_PFCS | CR_BMAM);

	uval=SH5PCI_READ(CR);

        /* Allow it to be a master */
	/* NB - WE DISABLE I/O ACCESS to stop overlap */
        /* set WAIT bit to enable stepping, an attempt to improve stability */
	SH5PCI_WRITE_SHORT(CSR_CMD,
			    PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
			    PCI_COMMAND_WAIT);

        /*
        ** Set translation mapping memory in order to convert the address
        ** used for the main bus, to the PCI internal address.
        */
        SH5PCI_WRITE(MBR,0x40000000);

        /* Always set the max size 512M */
        SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));

        /*
        ** I/O addresses are mapped at internal PCI specific address
        ** as is described into the configuration bridge table.
        ** These are changed to 0, to allow cards that have legacy
        ** io such as vga to function correctly. We set the SH5 IOBAR to
        ** 256K, which is a bit big as we can only have 64K of address space
        */

        SH5PCI_WRITE(IOBR,0x0);

        /* Set up a 256K window. Totally pointless waste  of address space */
        SH5PCI_WRITE(IOBMR,0);

	/* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec.
	 * Ideally, we would want to map the I/O region somewhere, but it
	 * is so big this is not that easy!
         */
	SH5PCI_WRITE(CSR_IBAR0,~0);
	/* Set memory size value */
        memSize = memory_end - memory_start;

	/* Now we set up the mbars so the PCI bus can see the memory of
	 * the machine */
	if (memSize < (1024 * 1024)) {
                printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%lx?\n",
		       memSize);
                return -EINVAL;
        }

        /* Set LSR 0 */
        lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 :
		((r2p2(memSize) - 0x100000) | 0x1);
        SH5PCI_WRITE(LSR0, lsr0);

        /* Set MBAR 0 */
        SH5PCI_WRITE(CSR_MBAR0, memory_start);
        SH5PCI_WRITE(LAR0, memory_start);

        SH5PCI_WRITE(CSR_MBAR1,0);
        SH5PCI_WRITE(LAR1,0);
        SH5PCI_WRITE(LSR1,0);

        /* Enable the PCI interrupts on the device */
        SH5PCI_WRITE(INTM,  ~0);
        SH5PCI_WRITE(AINTM, ~0);
        SH5PCI_WRITE(PINTM, ~0);

	sh5_pci_resources[0].start = PCI_IO_AREA;
	sh5_pci_resources[0].end = PCI_IO_AREA + 0x10000;

	sh5_pci_resources[1].start = memStart;
	sh5_pci_resources[1].end = memStart + memSize;

	return register_pci_controller(&sh5pci_controller);
}
Ejemplo n.º 17
0
static unsigned long get_kcore_size(int *num_vma, size_t *elf_buflen)
{
	unsigned long size;
#ifndef NO_MM
	unsigned long try;
	struct vm_struct *m;
#endif

	*num_vma = 0;
	size = ((size_t)high_memory - PAGE_OFFSET + PAGE_SIZE);
#ifdef NO_MM
	/* vmlist is not available then */
	*elf_buflen = PAGE_SIZE;
	return size;
#else
	if (!vmlist) {
		*elf_buflen = PAGE_SIZE;
		return (size);
	}

	for (m=vmlist; m; m=m->next) {
		try = (unsigned long)m->addr + m->size;
		if (try > size)
			size = try;
		*num_vma = *num_vma + 1;
	}
	*elf_buflen =	sizeof(struct elfhdr) + 
			(*num_vma + 2)*sizeof(struct elf_phdr) + 
			3 * sizeof(struct memelfnote);
	*elf_buflen = PAGE_ALIGN(*elf_buflen);
	return (size - PAGE_OFFSET + *elf_buflen);
#endif
}


/*****************************************************************************/
/*
 * determine size of ELF note
 */
static int notesize(struct memelfnote *en)
{
	int sz;

	sz = sizeof(struct elf_note);
	sz += roundup(strlen(en->name), 4);
	sz += roundup(en->datasz, 4);

	return sz;
} /* end notesize() */

/*****************************************************************************/
/*
 * store a note in the header buffer
 */
static char *storenote(struct memelfnote *men, char *bufp)
{
	struct elf_note en;

#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)

	en.n_namesz = strlen(men->name);
	en.n_descsz = men->datasz;
	en.n_type = men->type;

	DUMP_WRITE(&en, sizeof(en));
	DUMP_WRITE(men->name, en.n_namesz);

	/* XXX - cast from long long to long to avoid need for libgcc.a */
	bufp = (char*) roundup((unsigned long)bufp,4);
	DUMP_WRITE(men->data, men->datasz);
	bufp = (char*) roundup((unsigned long)bufp,4);

#undef DUMP_WRITE

	return bufp;
} /* end storenote() */

/*
 * store an ELF coredump header in the supplied buffer
 * num_vma is the number of elements in vmlist
 */
static void elf_kcore_store_hdr(char *bufp, int num_vma, int dataoff)
{
	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
	struct elf_prpsinfo prpsinfo;	/* NT_PRPSINFO */
	struct elf_phdr *nhdr, *phdr;
	struct elfhdr *elf;
	struct memelfnote notes[3];
	off_t offset = 0;
#ifndef NO_MM
	struct vm_struct *m;
#endif

	/* setup ELF header */
	elf = (struct elfhdr *) bufp;
	bufp += sizeof(struct elfhdr);
	offset += sizeof(struct elfhdr);
	memcpy(elf->e_ident, ELFMAG, SELFMAG);
	elf->e_ident[EI_CLASS]	= ELF_CLASS;
	elf->e_ident[EI_DATA]	= ELF_DATA;
	elf->e_ident[EI_VERSION]= EV_CURRENT;
	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
	elf->e_type	= ET_CORE;
	elf->e_machine	= ELF_ARCH;
	elf->e_version	= EV_CURRENT;
	elf->e_entry	= 0;
	elf->e_phoff	= sizeof(struct elfhdr);
	elf->e_shoff	= 0;
	elf->e_flags	= 0;
	elf->e_ehsize	= sizeof(struct elfhdr);
	elf->e_phentsize= sizeof(struct elf_phdr);
	elf->e_phnum	= 2 + num_vma;
	elf->e_shentsize= 0;
	elf->e_shnum	= 0;
	elf->e_shstrndx	= 0;

	/* setup ELF PT_NOTE program header */
	nhdr = (struct elf_phdr *) bufp;
	bufp += sizeof(struct elf_phdr);
	offset += sizeof(struct elf_phdr);
	nhdr->p_type	= PT_NOTE;
	nhdr->p_offset	= 0;
	nhdr->p_vaddr	= 0;
	nhdr->p_paddr	= 0;
	nhdr->p_filesz	= 0;
	nhdr->p_memsz	= 0;
	nhdr->p_flags	= 0;
	nhdr->p_align	= 0;

	/* setup ELF PT_LOAD program header for the 
	 * virtual range 0xc0000000 -> high_memory */
	phdr = (struct elf_phdr *) bufp;
	bufp += sizeof(struct elf_phdr);
	offset += sizeof(struct elf_phdr);
	phdr->p_type	= PT_LOAD;
	phdr->p_flags	= PF_R|PF_W|PF_X;
	phdr->p_offset	= dataoff;
	phdr->p_vaddr	= PAGE_OFFSET;
	phdr->p_paddr	= __pa(PAGE_OFFSET);
	phdr->p_filesz	= phdr->p_memsz = ((unsigned long)high_memory - PAGE_OFFSET);
	phdr->p_align	= PAGE_SIZE;

#ifndef NO_MM
	/* setup ELF PT_LOAD program header for every vmalloc'd area */
	for (m=vmlist; m; m=m->next) {
		if (m->flags & VM_IOREMAP) /* don't dump ioremap'd stuff! (TA) */
			continue;

		phdr = (struct elf_phdr *) bufp;
		bufp += sizeof(struct elf_phdr);
		offset += sizeof(struct elf_phdr);

		phdr->p_type	= PT_LOAD;
		phdr->p_flags	= PF_R|PF_W|PF_X;
		phdr->p_offset	= (size_t)m->addr - PAGE_OFFSET + dataoff;
		phdr->p_vaddr	= (size_t)m->addr;
		phdr->p_paddr	= __pa(m->addr);
		phdr->p_filesz	= phdr->p_memsz	= m->size;
		phdr->p_align	= PAGE_SIZE;
	}
#endif /* NO_MM */

	/*
	 * Set up the notes in similar form to SVR4 core dumps made
	 * with info from their /proc.
	 */
	nhdr->p_offset	= offset;

	/* set up the process status */
	notes[0].name = "CORE";
	notes[0].type = NT_PRSTATUS;
	notes[0].datasz = sizeof(struct elf_prstatus);
	notes[0].data = &prstatus;

	memset(&prstatus, 0, sizeof(struct elf_prstatus));

	nhdr->p_filesz	= notesize(&notes[0]);
	bufp = storenote(&notes[0], bufp);

	/* set up the process info */
	notes[1].name	= "CORE";
	notes[1].type	= NT_PRPSINFO;
	notes[1].datasz	= sizeof(struct elf_prpsinfo);
	notes[1].data	= &prpsinfo;

	memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
	prpsinfo.pr_state	= 0;
	prpsinfo.pr_sname	= 'R';
	prpsinfo.pr_zomb	= 0;

	strcpy(prpsinfo.pr_fname, "vmlinux");
	strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);

	nhdr->p_filesz	= notesize(&notes[1]);
	bufp = storenote(&notes[1], bufp);

	/* set up the task structure */
	notes[2].name	= "CORE";
	notes[2].type	= NT_TASKSTRUCT;
	notes[2].datasz	= sizeof(struct task_struct);
	notes[2].data	= current;

	nhdr->p_filesz	= notesize(&notes[2]);
	bufp = storenote(&notes[2], bufp);

} /* end elf_kcore_store_hdr() */

/*****************************************************************************/
/*
 * read from the ELF header and then kernel memory
 */
static ssize_t read_kcore(struct file *file, char *buffer, size_t buflen, loff_t *fpos)
{
	ssize_t acc = 0;
	unsigned long size, tsz;
	size_t elf_buflen;
	int num_vma;
	unsigned long start;

#ifdef NO_MM
	proc_root_kcore->size = size = get_kcore_size(&num_vma, &elf_buflen);
#else
	read_lock(&vmlist_lock);
	proc_root_kcore->size = size = get_kcore_size(&num_vma, &elf_buflen);
	if (buflen == 0 || (unsigned long long)*fpos >= size) {
		read_unlock(&vmlist_lock);
		return 0;
	}
#endif /* NO_MM */

	/* trim buflen to not go beyond EOF */
	if (buflen > size - *fpos)
		buflen = size - *fpos;

	/* construct an ELF core header if we'll need some of it */
	if (*fpos < elf_buflen) {
		char * elf_buf;

		tsz = elf_buflen - *fpos;
		if (buflen < tsz)
			tsz = buflen;
		elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
		if (!elf_buf) {
			read_unlock(&vmlist_lock);
			return -ENOMEM;
		}
		memset(elf_buf, 0, elf_buflen);
		elf_kcore_store_hdr(elf_buf, num_vma, elf_buflen);
		read_unlock(&vmlist_lock);
		if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
			kfree(elf_buf);
			return -EFAULT;
		}
		kfree(elf_buf);
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		acc += tsz;

		/* leave now if filled buffer already */
		if (buflen == 0)
			return acc;
	} else
		read_unlock(&vmlist_lock);

	/* where page 0 not mapped, write zeros into buffer */
#if defined (__i386__) || defined (__mc68000__) || defined(__x86_64__)
	if (*fpos < PAGE_SIZE + elf_buflen) {
		/* work out how much to clear */
		tsz = PAGE_SIZE + elf_buflen - *fpos;
		if (buflen < tsz)
			tsz = buflen;

		/* write zeros to buffer */
		if (clear_user(buffer, tsz))
			return -EFAULT;
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		acc += tsz;

		/* leave now if filled buffer already */
		if (buflen == 0)
			return tsz;
	}
#endif
	
	/*
	 * Fill the remainder of the buffer from kernel VM space.
	 * We said in the ELF header that the data which starts
	 * at 'elf_buflen' is virtual address PAGE_OFFSET. --rmk
	 */
	start = PAGE_OFFSET + (*fpos - elf_buflen);
	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
		tsz = buflen;
	while (buflen) {
		int err; 
	
		if ((start > PAGE_OFFSET) && (start < (unsigned long)high_memory)) {
			if (kern_addr_valid(start)) {
				err = copy_to_user(buffer, (char *)start, tsz);
			} else {
				err = clear_user(buffer, tsz);
			}
		} else {
#ifndef NO_MM
			char * elf_buf;
			struct vm_struct *m;
			unsigned long curstart = start;
			unsigned long cursize = tsz;

			elf_buf = kmalloc(tsz, GFP_KERNEL);
			if (!elf_buf)
				return -ENOMEM;
			memset(elf_buf, 0, tsz);

			read_lock(&vmlist_lock);
			for (m=vmlist; m && cursize; m=m->next) {
				unsigned long vmstart;
				unsigned long vmsize;
				unsigned long msize = m->size - PAGE_SIZE;

				if (((unsigned long)m->addr + msize) < 
								curstart)
					continue;
				if ((unsigned long)m->addr > (curstart + 
								cursize))
					break;
				vmstart = (curstart < (unsigned long)m->addr ? 
					(unsigned long)m->addr : curstart);
				if (((unsigned long)m->addr + msize) > 
							(curstart + cursize))
					vmsize = curstart + cursize - vmstart;
				else
					vmsize = (unsigned long)m->addr + 
							msize - vmstart;
				curstart = vmstart + vmsize;
				cursize -= vmsize;
				/* don't dump ioremap'd stuff! (TA) */
				if (m->flags & VM_IOREMAP)
					continue;
				memcpy(elf_buf + (vmstart - start),
					(char *)vmstart, vmsize);
			}
			read_unlock(&vmlist_lock);
			err = copy_to_user(buffer, elf_buf, tsz); 
				kfree(elf_buf);
#endif /* NO_MM */
			}
		if (err)
					return -EFAULT;
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		acc += tsz;
		start += tsz;
		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
	}

	return acc;
}
Ejemplo n.º 18
0
static void __init msm_msm7x27_allocate_memory_regions(void)
{
	void *addr;
	unsigned long size;

	size = pmem_kernel_ebi1_size;
	if (size) {
		addr = alloc_bootmem_aligned(size, 0x100000);
		android_pmem_kernel_ebi1_pdata.start = __pa(addr);
		android_pmem_kernel_ebi1_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for kernel"
			" ebi1 pmem arena\n", size, addr, __pa(addr));
	}

	size = pmem_mdp_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_pdata.start = __pa(addr);
		android_pmem_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for mdp "
			"pmem arena\n", size, addr, __pa(addr));
	}

	size = pmem_adsp_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_adsp_pdata.start = __pa(addr);
		android_pmem_adsp_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for adsp "
			"pmem arena\n", size, addr, __pa(addr));
	}

	size = pmem_gpu1_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_gpu1_pdata.start = __pa(addr);
		android_pmem_gpu1_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for gpu1 "
			"pmem arena\n", size, addr, __pa(addr));
	}

	size = fb_size ? : MSM_FB_SIZE;
	addr = alloc_bootmem(size);
	msm_fb_resources[0].start = __pa(addr);
	msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
	pr_info("allocating %lu bytes at %p (%lx physical) for fb\n",
		size, addr, __pa(addr));

	size = gpu_phys_size ? : MSM_GPU_PHYS_SIZE;
	addr = alloc_bootmem(size);
	kgsl_resources[1].start = __pa(addr);
	kgsl_resources[1].end = kgsl_resources[1].start + size - 1;
	pr_info("allocating %lu bytes at %p (%lx physical) for KGSL\n",
		size, addr, __pa(addr));
}
Ejemplo n.º 19
0
static ssize_t scanlog_read(struct file *file, char __user *buf,
                size_t count, loff_t *ppos)
{
        struct inode * inode = file->f_path.dentry->d_inode;
    struct proc_dir_entry *dp;
    unsigned int *data;
    int status;
    unsigned long len, off;
    unsigned int wait_time;

        dp = PDE(inode);
     data = (unsigned int *)dp->data;

    if (count > RTAS_DATA_BUF_SIZE)
        count = RTAS_DATA_BUF_SIZE;

    if (count < 1024) {
        /* This is the min supported by this RTAS call.  Rather
         * than do all the buffering we insist the user code handle
         * larger reads.  As long as cp works... :)
         */
        printk(KERN_ERR "scanlog: cannot perform a small read (%ld)\n", count);
        return -EINVAL;
    }

    if (!access_ok(VERIFY_WRITE, buf, count))
        return -EFAULT;

    for (;;) {
        wait_time = 500;    /* default wait if no data */
        spin_lock(&rtas_data_buf_lock);
        memcpy(rtas_data_buf, data, RTAS_DATA_BUF_SIZE);
        status = rtas_call(ibm_scan_log_dump, 2, 1, NULL,
                   (u32) __pa(rtas_data_buf), (u32) count);
        memcpy(data, rtas_data_buf, RTAS_DATA_BUF_SIZE);
        spin_unlock(&rtas_data_buf_lock);

        pr_debug("scanlog: status=%d, data[0]=%x, data[1]=%x, " \
             "data[2]=%x\n", status, data[0], data[1], data[2]);
        switch (status) {
            case SCANLOG_COMPLETE:
            pr_debug("scanlog: hit eof\n");
            return 0;
            case SCANLOG_HWERROR:
            pr_debug("scanlog: hardware error reading data\n");
            return -EIO;
            case SCANLOG_CONTINUE:
            /* We may or may not have data yet */
            len = data[1];
            off = data[2];
            if (len > 0) {
                if (copy_to_user(buf, ((char *)data)+off, len))
                    return -EFAULT;
                return len;
            }
            /* Break to sleep default time */
            break;
            default:
            /* Assume extended busy */
            wait_time = rtas_busy_delay_time(status);
            if (!wait_time) {
                printk(KERN_ERR "scanlog: unknown error " \
                       "from rtas: %d\n", status);
                return -EIO;
            }
        }
        /* Apparently no data yet.  Wait and try again. */
        msleep_interruptible(wait_time);
    }
    /*NOTREACHED*/
}
Ejemplo n.º 20
0
/**
 * machine_specific_memory_setup - Hook for machine specific memory setup.
 **/
char * __init xen_memory_setup(void)
{
	static struct e820entry map[E820MAX] __initdata;
	static struct e820entry map_raw[E820MAX] __initdata;

	unsigned long max_pfn = xen_start_info->nr_pages;
	unsigned long long mem_end;
	int rc;
	struct xen_memory_map memmap;
	unsigned long extra_pages = 0;
	unsigned long extra_limit;
	unsigned long identity_pages = 0;
	int i;
	int op;

	max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
	mem_end = PFN_PHYS(max_pfn);

	memmap.nr_entries = E820MAX;
	set_xen_guest_handle(memmap.buffer, map);

	op = xen_initial_domain() ?
		XENMEM_machine_memory_map :
		XENMEM_memory_map;
	rc = HYPERVISOR_memory_op(op, &memmap);
	if (rc == -ENOSYS) {
		BUG_ON(xen_initial_domain());
		memmap.nr_entries = 1;
		map[0].addr = 0ULL;
		map[0].size = mem_end;
		/* 8MB slack (to balance backend allocations). */
		map[0].size += 8ULL << 20;
		map[0].type = E820_RAM;
		rc = 0;
	}
	BUG_ON(rc);

	memcpy(map_raw, map, sizeof(map));
	e820.nr_map = 0;
	xen_extra_mem_start = mem_end;
	for (i = 0; i < memmap.nr_entries; i++) {
		unsigned long long end;

		/* Guard against non-page aligned E820 entries. */
		if (map[i].type == E820_RAM)
			map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;

		end = map[i].addr + map[i].size;
		if (map[i].type == E820_RAM && end > mem_end) {
			/* RAM off the end - may be partially included */
			u64 delta = min(map[i].size, end - mem_end);

			map[i].size -= delta;
			end -= delta;

			extra_pages += PFN_DOWN(delta);
			/*
			 * Set RAM below 4GB that is not for us to be unusable.
			 * This prevents "System RAM" address space from being
			 * used as potential resource for I/O address (happens
			 * when 'allocate_resource' is called).
			 */
			if (delta &&
				(xen_initial_domain() && end < 0x100000000ULL))
				e820_add_region(end, delta, E820_UNUSABLE);
		}

		if (map[i].size > 0 && end > xen_extra_mem_start)
			xen_extra_mem_start = end;

		/* Add region if any remains */
		if (map[i].size > 0)
			e820_add_region(map[i].addr, map[i].size, map[i].type);
	}
	/* Align the balloon area so that max_low_pfn does not get set
	 * to be at the _end_ of the PCI gap at the far end (fee01000).
	 * Note that xen_extra_mem_start gets set in the loop above to be
	 * past the last E820 region. */
	if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
		xen_extra_mem_start = (1ULL<<32);

	/*
	 * In domU, the ISA region is normal, usable memory, but we
	 * reserve ISA memory anyway because too many things poke
	 * about in there.
	 *
	 * In Dom0, the host E820 information can leave gaps in the
	 * ISA range, which would cause us to release those pages.  To
	 * avoid this, we unconditionally reserve them here.
	 */
	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
			E820_RESERVED);

	/*
	 * Reserve Xen bits:
	 *  - mfn_list
	 *  - xen_start_info
	 * See comment above "struct start_info" in <xen/interface/xen.h>
	 */
	memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
		      __pa(xen_start_info->pt_base),
			"XEN START INFO");

	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);

	extra_limit = xen_get_max_pages();
	if (max_pfn + extra_pages > extra_limit) {
		if (extra_limit > max_pfn)
			extra_pages = extra_limit - max_pfn;
		else
			extra_pages = 0;
	}

	extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);

	/*
	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
	 * factor the base size.  On non-highmem systems, the base
	 * size is the full initial memory allocation; on highmem it
	 * is limited to the max size of lowmem, so that it doesn't
	 * get completely filled.
	 *
	 * In principle there could be a problem in lowmem systems if
	 * the initial memory is also very large with respect to
	 * lowmem, but we won't try to deal with that here.
	 */
	extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
			  max_pfn + extra_pages);

	if (extra_limit >= max_pfn)
		extra_pages = extra_limit - max_pfn;
	else
		extra_pages = 0;

	xen_add_extra_mem(extra_pages);

	/*
	 * Set P2M for all non-RAM pages and E820 gaps to be identity
	 * type PFNs. We supply it with the non-sanitized version
	 * of the E820.
	 */
	identity_pages = xen_set_identity(map_raw, memmap.nr_entries);
	printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
	return "Xen";
}
Ejemplo n.º 21
0
Archivo: init.c Proyecto: E-LLP/linux
/*
 * First memory setup routine called from setup_arch()
 * 1. setup swapper's mm @init_mm
 * 2. Count the pages we have and setup bootmem allocator
 * 3. zone setup
 */
void __init setup_arch_memory(void)
{
	unsigned long zones_size[MAX_NR_ZONES];
	unsigned long zones_holes[MAX_NR_ZONES];

	init_mm.start_code = (unsigned long)_text;
	init_mm.end_code = (unsigned long)_etext;
	init_mm.end_data = (unsigned long)_edata;
	init_mm.brk = (unsigned long)_end;

	/* first page of system - kernel .vector starts here */
	min_low_pfn = ARCH_PFN_OFFSET;

	/* Last usable page of low mem */
	max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);

#ifdef CONFIG_HIGHMEM
	min_high_pfn = PFN_DOWN(high_mem_start);
	max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
#endif

	max_mapnr = max_pfn - min_low_pfn;

	/*------------- bootmem allocator setup -----------------------*/

	/*
	 * seed the bootmem allocator after any DT memory node parsing or
	 * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
	 *
	 * Only low mem is added, otherwise we have crashes when allocating
	 * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
	 * avail memory, ending in highmem with a > 32-bit address. However
	 * it then tries to memset it with a truncaed 32-bit handle, causing
	 * the crash
	 */

	memblock_add(low_mem_start, low_mem_sz);
	memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start)
		memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
#endif

	memblock_dump_all();

	/*----------------- node/zones setup --------------------------*/
	memset(zones_size, 0, sizeof(zones_size));
	memset(zones_holes, 0, sizeof(zones_holes));

	zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
	zones_holes[ZONE_NORMAL] = 0;

#ifdef CONFIG_HIGHMEM
	zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;

	/* This handles the peripheral address space hole */
	zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
#endif

	/*
	 * We can't use the helper free_area_init(zones[]) because it uses
	 * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
	 * when our kernel doesn't start at PAGE_OFFSET, i.e.
	 * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE
	 */
	free_area_init_node(0,			/* node-id */
			    zones_size,		/* num pages per zone */
			    min_low_pfn,	/* first pfn of node */
			    zones_holes);	/* holes */

#ifdef CONFIG_HIGHMEM
	high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
	kmap_init();
#endif
}
Ejemplo n.º 22
0
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
	return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
				    __pa(MAX_DMA_ADDRESS));
}
Ejemplo n.º 23
0
	 * Aperture has to be naturally aligned. This means an 2GB aperture won't
	 * have much chance of finding a place in the lower 4GB of memory.
	 * Unfortunately we cannot move it up because that would make the
	 * IOMMU useless.
	 */
	p = __alloc_bootmem_nopanic(aper_size, aper_size, 0);
	if (!p || __pa(p)+aper_size > 0xffffffff) {
		printk("Cannot allocate aperture memory hole (%p,%uK)\n",
		       p, aper_size>>10);
		if (p)
			free_bootmem(__pa(p), aper_size);
		return 0;
	}
	printk("Mapping aperture over %d KB of RAM @ %lx\n",
	       aper_size >> 10, __pa(p)); 
	insert_aperture_resource((u32)__pa(p), aper_size);
	return (u32)__pa(p); 
}

static int __init aperture_valid(u64 aper_base, u32 aper_size)
{ 
	if (!aper_base) 
		return 0;
	if (aper_size < 64*1024*1024) { 
		printk("Aperture too small (%d MB)\n", aper_size>>20);
		return 0;
	}
	if (aper_base + aper_size > 0x100000000UL) {
		printk("Aperture beyond 4GB. Ignoring.\n");
		return 0; 
	}
Ejemplo n.º 24
0
static void __init pcpu_fc_free(void *ptr, size_t size)
{
	free_bootmem(__pa(ptr), size);
}
Ejemplo n.º 25
0
static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u64 val)
{
        u64 prot = val & pg_level[level].mask;

	if (st->start_address >= start_vmalloc_allocated &&
		st->start_address <= start_vmalloc_allocated) {
		goto skip_process;
	}
	
        if (!st->level && !ro) {
                st->level = level;
                st->current_prot = prot;
        } else if (prot != st->current_prot || level != st->level ||
                   addr >= st->marker[1].start_address) {
                if (st->current_prot &&
			addr_in_valid_range (st->marker->start_address)) {
			if(ro) {
				n_entries++;
				goto skip_process;
			}

			if (entry < n_entries) {
				int nr_pages = (addr - st->start_address) / PAGE_SIZE;
				struct kernel_map_info *info = &st->k_map->map_info[entry];

				info->start_addr = st->start_address;
				info->end_addr = addr;

				if (nr_pages >= MAX_PHYS_ADDR)
					nr_pages = MAX_PHYS_ADDR - 1;
				info->n_pages = nr_pages;

				if (addr_from_kernel (st->marker->start_address)) {
					info->phys_addr[0] = __pa (st->start_address);
					info->phys_addr[1] = __pa (addr);
					info->n_phys_addr = 2;
					entry++;
				} else if (addr_from_vmalloc (st->marker->start_address) &&				
						level == 4) {
					int i;
					unsigned long aux_addr;
					for (i = 0, aux_addr = st->start_address; i < nr_pages; i++, aux_addr += PAGE_SIZE) {
						unsigned long pfn = vmalloc_to_pfn ((void *) aux_addr);
						if (!pfn_valid (pfn))
								info->phys_addr[i] = 0;
							else
								info->phys_addr[i] = (pfn << PAGE_SHIFT);
					}
					info->n_phys_addr = info->n_pages;
					entry++;
				}
			}
		}
						
skip_process:
                if (addr >= st->marker[1].start_address) {
                        st->marker++;
                }
                st->start_address = addr;
                st->current_prot = prot;
                st->level = level;
        }

#ifdef CONFIG_ARM64
	if (addr >= st->marker[1].start_address) {
		st->marker++;
	}
#endif
}
int default_machine_kexec_prepare(struct kimage *image)
{
	int i;
	unsigned long begin, end;	/* limits of segment */
	unsigned long low, high;	/* limits of blocked memory range */
	struct device_node *node;
	const unsigned long *basep;
	const unsigned int *sizep;

	if (!ppc_md.hpte_clear_all)
		return -ENOENT;

	/*
	 * Since we use the kernel fault handlers and paging code to
	 * handle the virtual mode, we must make sure no destination
	 * overlaps kernel static data or bss.
	 */
	for (i = 0; i < image->nr_segments; i++)
		if (image->segment[i].mem < __pa(_end))
			return -ETXTBSY;

	/*
	 * For non-LPAR, we absolutely can not overwrite the mmu hash
	 * table, since we are still using the bolted entries in it to
	 * do the copy.  Check that here.
	 *
	 * It is safe if the end is below the start of the blocked
	 * region (end <= low), or if the beginning is after the
	 * end of the blocked region (begin >= high).  Use the
	 * boolean identity !(a || b)  === (!a && !b).
	 */
	if (htab_address) {
		low = __pa(htab_address);
		high = low + htab_size_bytes;

		for (i = 0; i < image->nr_segments; i++) {
			begin = image->segment[i].mem;
			end = begin + image->segment[i].memsz;

			if ((begin < high) && (end > low))
				return -ETXTBSY;
		}
	}

	/* We also should not overwrite the tce tables */
	for (node = of_find_node_by_type(NULL, "pci"); node != NULL;
			node = of_find_node_by_type(node, "pci")) {
		basep = of_get_property(node, "linux,tce-base", NULL);
		sizep = of_get_property(node, "linux,tce-size", NULL);
		if (basep == NULL || sizep == NULL)
			continue;

		low = *basep;
		high = low + (*sizep);

		for (i = 0; i < image->nr_segments; i++) {
			begin = image->segment[i].mem;
			end = begin + image->segment[i].memsz;

			if ((begin < high) && (end > low))
				return -ETXTBSY;
		}
	}

	return 0;
}
Ejemplo n.º 27
0
static void __init
setup_memory(void)
{
        unsigned long bootmap_size;
	unsigned long start_pfn, end_pfn;
	int i;

	/*
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	start_pfn = PFN_UP(__pa(&_end));
	end_pfn = max_pfn = PFN_DOWN(memory_end);

#ifdef CONFIG_BLK_DEV_INITRD
	/*
	 * Move the initrd in case the bitmap of the bootmem allocater
	 * would overwrite it.
	 */

	if (INITRD_START && INITRD_SIZE) {
		unsigned long bmap_size;
		unsigned long start;

		bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
		bmap_size = PFN_PHYS(bmap_size);

		if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
			start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;

			if (start + INITRD_SIZE > memory_end) {
				pr_err("initrd extends beyond end of "
				       "memory (0x%08lx > 0x%08lx) "
				       "disabling initrd\n",
				       start + INITRD_SIZE, memory_end);
				INITRD_START = INITRD_SIZE = 0;
			} else {
				pr_info("Moving initrd (0x%08lx -> "
					"0x%08lx, size: %ld)\n",
					INITRD_START, start, INITRD_SIZE);
				memmove((void *) start, (void *) INITRD_START,
					INITRD_SIZE);
				INITRD_START = start;
			}
		}
	}
#endif

	/*
	 * Initialize the boot-time allocator
	 */
	bootmap_size = init_bootmem(start_pfn, end_pfn);

	/*
	 * Register RAM areas with the bootmem allocator.
	 */

	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
		unsigned long start_chunk, end_chunk, pfn;

		if (memory_chunk[i].type != CHUNK_READ_WRITE)
			continue;
		start_chunk = PFN_DOWN(memory_chunk[i].addr);
		end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
		end_chunk = min(end_chunk, end_pfn);
		if (start_chunk >= end_chunk)
			continue;
		add_active_range(0, start_chunk, end_chunk);
		pfn = max(start_chunk, start_pfn);
		for (; pfn < end_chunk; pfn++)
			page_set_storage_key(PFN_PHYS(pfn),
					     PAGE_DEFAULT_KEY, 0);
	}

	psw_set_key(PAGE_DEFAULT_KEY);

	free_bootmem_with_active_regions(0, max_pfn);

	/*
	 * Reserve memory used for lowcore/command line/kernel image.
	 */
	reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
	reserve_bootmem((unsigned long)_stext,
			PFN_PHYS(start_pfn) - (unsigned long)_stext,
			BOOTMEM_DEFAULT);
	/*
	 * Reserve the bootmem bitmap itself as well. We do this in two
	 * steps (first step was init_bootmem()) because this catches
	 * the (very unlikely) case of us accidentally initializing the
	 * bootmem allocator with an invalid RAM area.
	 */
	reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
			BOOTMEM_DEFAULT);

#ifdef CONFIG_BLK_DEV_INITRD
	if (INITRD_START && INITRD_SIZE) {
		if (INITRD_START + INITRD_SIZE <= memory_end) {
			reserve_bootmem(INITRD_START, INITRD_SIZE,
					BOOTMEM_DEFAULT);
			initrd_start = INITRD_START;
			initrd_end = initrd_start + INITRD_SIZE;
		} else {
			pr_err("initrd extends beyond end of "
			       "memory (0x%08lx > 0x%08lx) "
			       "disabling initrd\n",
			       initrd_start + INITRD_SIZE, memory_end);
			initrd_start = initrd_end = 0;
		}
	}
#endif
}
Ejemplo n.º 28
0
void __init setup_arch(char **cmdline_p)
{
	extern int mem_reserve(unsigned long, unsigned long, int);
	extern void bootmem_init(void);

	memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
	*cmdline_p = command_line;

	/* Reserve some memory regions */

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start < initrd_end) {
		initrd_is_mapped = mem_reserve(__pa(initrd_start),
					       __pa(initrd_end), 0);
		initrd_below_start_ok = 1;
 	} else {
		initrd_start = 0;
	}
#endif

	mem_reserve(__pa(&_stext),__pa(&_end), 1);

	mem_reserve(__pa(&_WindowVectors_text_start),
		    __pa(&_WindowVectors_text_end), 0);

	mem_reserve(__pa(&_DebugInterruptVector_literal_start),
		    __pa(&_DebugInterruptVector_text_end), 0);

	mem_reserve(__pa(&_KernelExceptionVector_literal_start),
		    __pa(&_KernelExceptionVector_text_end), 0);

	mem_reserve(__pa(&_UserExceptionVector_literal_start),
		    __pa(&_UserExceptionVector_text_end), 0);

	mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
		    __pa(&_DoubleExceptionVector_text_end), 0);

	bootmem_init();

	platform_setup(cmdline_p);


	paging_init();
	zones_init();

#ifdef CONFIG_VT
# if defined(CONFIG_VGA_CONSOLE)
	conswitchp = &vga_con;
# elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
# endif
#endif

#ifdef CONFIG_PCI
	platform_pcibios_init();
#endif
}
Ejemplo n.º 29
0
int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
	u64 tb_cache_size)
{
	int result;
	u64 tb_size;

	BUG_ON(!lpm_priv);
	BUG_ON(tb_type != PS3_LPM_TB_TYPE_NONE
		&& tb_type != PS3_LPM_TB_TYPE_INTERNAL);

	if (tb_type == PS3_LPM_TB_TYPE_NONE && tb_cache)
		dev_dbg(sbd_core(), "%s:%u: bad in vals\n", __func__, __LINE__);

	if (!atomic_add_unless(&lpm_priv->open, 1, 1)) {
		dev_dbg(sbd_core(), "%s:%u: busy\n", __func__, __LINE__);
		return -EBUSY;
	}

	/* Note tb_cache needs 128 byte alignment. */

	if (tb_type == PS3_LPM_TB_TYPE_NONE) {
		lpm_priv->tb_cache_size = 0;
		lpm_priv->tb_cache_internal = NULL;
		lpm_priv->tb_cache = NULL;
	} else if (tb_cache) {
		if (tb_cache != (void *)_ALIGN_UP((unsigned long)tb_cache, 128)
			|| tb_cache_size != _ALIGN_UP(tb_cache_size, 128)) {
			dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n",
				__func__, __LINE__);
			result = -EINVAL;
			goto fail_align;
		}
		lpm_priv->tb_cache_size = tb_cache_size;
		lpm_priv->tb_cache_internal = NULL;
		lpm_priv->tb_cache = tb_cache;
	} else {
		lpm_priv->tb_cache_size = PS3_LPM_DEFAULT_TB_CACHE_SIZE;
		lpm_priv->tb_cache_internal = kzalloc(
			lpm_priv->tb_cache_size + 127, GFP_KERNEL);
		if (!lpm_priv->tb_cache_internal) {
			dev_err(sbd_core(), "%s:%u: alloc internal tb_cache "
				"failed\n", __func__, __LINE__);
			result = -ENOMEM;
			goto fail_malloc;
		}
		lpm_priv->tb_cache = (void *)_ALIGN_UP(
			(unsigned long)lpm_priv->tb_cache_internal, 128);
	}

	result = lv1_construct_lpm(lpm_priv->node_id, tb_type, 0, 0,
				ps3_mm_phys_to_lpar(__pa(lpm_priv->tb_cache)),
				lpm_priv->tb_cache_size, &lpm_priv->lpm_id,
				&lpm_priv->outlet_id, &tb_size);

	if (result) {
		dev_err(sbd_core(), "%s:%u: lv1_construct_lpm failed: %s\n",
			__func__, __LINE__, ps3_result(result));
		result = -EINVAL;
		goto fail_construct;
	}

	lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT;
	lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT;
	lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
	lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;

	dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%llx, outlet_id 0x%llx, "
		"tb_size 0x%llx\n", __func__, __LINE__, lpm_priv->lpm_id,
		lpm_priv->outlet_id, tb_size);

	return 0;

fail_construct:
	kfree(lpm_priv->tb_cache_internal);
	lpm_priv->tb_cache_internal = NULL;
fail_malloc:
fail_align:
	atomic_dec(&lpm_priv->open);
	return result;
}
Ejemplo n.º 30
0
void __init setup_arch(char **cmdline_p)
{
	void atlas_setup(void);
	void baget_setup(void);
	void cobalt_setup(void);
	void ddb_setup(void);
	void decstation_setup(void);
	void deskstation_setup(void);
	void jazz_setup(void);
	void sni_rm200_pci_setup(void);
	void ip22_setup(void);
        void ev96100_setup(void);
	void malta_setup(void);
	void ikos_setup(void);
	void momenco_ocelot_setup(void);
	void nino_setup(void);
	void nec_osprey_setup(void);
	void jmr3927_setup(void);
 	void it8172_setup(void);
	void swarm_setup(void);
	void hp_setup(void);

	unsigned long bootmap_size;
	unsigned long start_pfn, max_pfn, max_low_pfn, first_usable_pfn;
#ifdef CONFIG_BLK_DEV_INITRD
	unsigned long tmp;
	unsigned long* initrd_header;
#endif

	int i;

#ifdef CONFIG_BLK_DEV_FD
	fd_ops = &no_fd_ops;
#endif

#ifdef CONFIG_BLK_DEV_IDE
	ide_ops = &no_ide_ops;
#endif

#ifdef CONFIG_PC_KEYB
	kbd_ops = &no_kbd_ops;
#endif
	
	rtc_ops = &no_rtc_ops;

	switch(mips_machgroup)
	{
#ifdef CONFIG_BAGET_MIPS
	case MACH_GROUP_BAGET: 
		baget_setup();
		break;
#endif
#ifdef CONFIG_MIPS_COBALT
        case MACH_GROUP_COBALT:
                cobalt_setup();
                break;
#endif
#ifdef CONFIG_DECSTATION
	case MACH_GROUP_DEC:
		decstation_setup();
		break;
#endif
#ifdef CONFIG_MIPS_ATLAS
	case MACH_GROUP_UNKNOWN:
		atlas_setup();
		break;
#endif
#ifdef CONFIG_MIPS_JAZZ
	case MACH_GROUP_JAZZ:
		jazz_setup();
		break;
#endif
#ifdef CONFIG_MIPS_MALTA
	case MACH_GROUP_UNKNOWN:
		malta_setup();
		break;
#endif
#ifdef CONFIG_MOMENCO_OCELOT
	case MACH_GROUP_MOMENCO:
		momenco_ocelot_setup();
		break;
#endif
#ifdef CONFIG_SGI_IP22
	/* As of now this is only IP22.  */
	case MACH_GROUP_SGI:
		ip22_setup();
		break;
#endif
#ifdef CONFIG_SNI_RM200_PCI
	case MACH_GROUP_SNI_RM:
		sni_rm200_pci_setup();
		break;
#endif
#ifdef CONFIG_DDB5074
	case MACH_GROUP_NEC_DDB:
		ddb_setup();
		break;
#endif
#ifdef CONFIG_DDB5476
       case MACH_GROUP_NEC_DDB:
               ddb_setup();
               break;
#endif
#ifdef CONFIG_DDB5477
       case MACH_GROUP_NEC_DDB:
               ddb_setup();
               break;
#endif
#ifdef CONFIG_NEC_OSPREY
	case MACH_GROUP_NEC_VR41XX:
		nec_osprey_setup();
		break;
#endif
#ifdef CONFIG_MIPS_EV96100
	case MACH_GROUP_GALILEO:
		ev96100_setup();
		break;
#endif
#ifdef CONFIG_MIPS_EV64120
	case MACH_GROUP_GALILEO:
		ev64120_setup();
		break;
#endif
#if defined(CONFIG_MIPS_IVR) || defined(CONFIG_MIPS_ITE8172)
	case  MACH_GROUP_ITE:
	case  MACH_GROUP_GLOBESPAN:
		it8172_setup();
		break;
#endif  
#ifdef CONFIG_NINO
	case MACH_GROUP_PHILIPS:
		nino_setup();
		break;
#endif
#ifdef CONFIG_MIPS_PB1000
	case MACH_GROUP_ALCHEMY:
		au1000_setup();
		break;
#endif
#ifdef CONFIG_MIPS_PB1500
	case MACH_GROUP_ALCHEMY:
		au1500_setup();
		break;
#endif
#ifdef CONFIG_TOSHIBA_JMR3927
	case MACH_GROUP_TOSHIBA:
		jmr3927_setup();
		break;
#endif
#ifdef CONFIG_SIBYTE_SWARM
	case MACH_GROUP_SIBYTE:
		swarm_setup();
		break;
#endif
#ifdef CONFIG_HP_LASERJET
        case MACH_GROUP_HP_LJ:
                hp_setup();
                break;
#endif
	default:
		panic("Unsupported architecture");
	}

	strncpy(command_line, arcs_cmdline, sizeof command_line);
	command_line[sizeof command_line - 1] = 0;
	strcpy(saved_command_line, command_line);
	*cmdline_p = command_line;

	parse_mem_cmdline();

#define PFN_UP(x)	(((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
#define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
#define PFN_PHYS(x)	((x) << PAGE_SHIFT)

#define MAXMEM		HIGHMEM_START
#define MAXMEM_PFN	PFN_DOWN(MAXMEM)

#ifdef CONFIG_BLK_DEV_INITRD
	tmp = (((unsigned long)&_end + PAGE_SIZE-1) & PAGE_MASK) - 8; 
	if (tmp < (unsigned long)&_end) 
		tmp += PAGE_SIZE;
	initrd_header = (unsigned long *)tmp;
	if (initrd_header[0] == 0x494E5244) {
		initrd_start = (unsigned long)&initrd_header[2];
		initrd_end = initrd_start + initrd_header[1];
	}
	start_pfn = PFN_UP(__pa((&_end)+(initrd_end - initrd_start) + PAGE_SIZE));
#else
	/*
	 * Partially used pages are not usable - thus
	 * we are rounding upwards.
	 */
	start_pfn = PFN_UP(__pa(&_end));
#endif	/* CONFIG_BLK_DEV_INITRD */

	/* Find the highest page frame number we have available.  */
	max_pfn = 0;
	first_usable_pfn = -1UL;
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
		      + boot_mem_map.map[i].size);

		if (start >= end)
			continue;
		if (end > max_pfn)
			max_pfn = end;
		if (start < first_usable_pfn) {
			if (start > start_pfn) {
				first_usable_pfn = start;
			} else if (end > start_pfn) {
				first_usable_pfn = start_pfn;
			}
		}
	}

	/*
	 * Determine low and high memory ranges
	 */
	max_low_pfn = max_pfn;
	if (max_low_pfn > MAXMEM_PFN) {
		max_low_pfn = MAXMEM_PFN;
#ifndef CONFIG_HIGHMEM
		/* Maximum memory usable is what is directly addressable */
		printk(KERN_WARNING "Warning only %ldMB will be used.\n",
		       MAXMEM>>20);
		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
#endif
	}