示例#1
0
/*
 * Allocate DP-Ram and memory buffers. We need to allocate a transmit and 
 * receive buffer descriptors from dual port ram, and a character
 * buffer area from host mem. If we are allocating for the console we need
 * to do it from bootmem
 */
int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
{
	int dpmemsz, memsz;
	u8 *dp_mem;
	uint dp_offset;
	u8 *mem_addr;
	dma_addr_t dma_addr = 0;

	pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);

	dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
	dp_offset = cpm_dpalloc(dpmemsz, 8);
	if (IS_DPERR(dp_offset)) {
		printk(KERN_ERR
		       "cpm_uart_cpm.c: could not allocate buffer descriptors\n");
		return -ENOMEM;
	}

	dp_mem = cpm_dpram_addr(dp_offset);

	memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
	    L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
	if (is_con)
		mem_addr = alloc_bootmem(memsz);
	else
		mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr,
					      GFP_KERNEL);

	if (mem_addr == NULL) {
		cpm_dpfree(dp_offset);
		printk(KERN_ERR
		       "cpm_uart_cpm.c: could not allocate coherent memory\n");
		return -ENOMEM;
	}

	pinfo->dp_addr = dp_offset;
	pinfo->mem_addr = mem_addr;
	pinfo->dma_addr = dma_addr;

	pinfo->rx_buf = mem_addr;
	pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
						       * pinfo->rx_fifosize);

	pinfo->rx_bd_base = (volatile cbd_t *)dp_mem;
	pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos;

	return 0;
}
/*
 * The trick of making the zero page strongly ordered no longer
 * works. We no longer want to make a second alias to the zero
 * page that is strongly ordered. Manually changing the bits
 * in the page table for the zero page would have side effects
 * elsewhere that aren't necessary. The result is that we need
 * to get a page from else where. Given when the first call
 * to write_to_strongly_ordered_memory occurs, using bootmem
 * to get a page makes the most sense.
 */
void map_page_strongly_ordered(void)
{
#if defined(CONFIG_ARCH_MSM7X27)
	long unsigned int phys;

	if (strongly_ordered_page)
		return;

	strongly_ordered_page = alloc_bootmem(PAGE_SIZE);
	phys = __pa(strongly_ordered_page);
	ioremap_page((long unsigned int) strongly_ordered_page,
		phys,
		get_mem_type(MT_DEVICE_STRONGLY_ORDERED));
	printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
#endif
}
示例#3
0
static int __init xen_console_init(void)
{
	if (!is_running_on_xen())
		goto out;

	if (is_initial_xendomain()) {
		if (xc_mode == XC_DEFAULT)
			xc_mode = XC_SERIAL;
		kcons_info.write = kcons_write_dom0;
	} else {
		if (!xen_start_info->console.domU.evtchn)
			goto out;
		if (xc_mode == XC_DEFAULT)
			xc_mode = XC_XVC;
		kcons_info.write = kcons_write;
	}

	switch (xc_mode) {
	case XC_XVC:
		strcpy(kcons_info.name, "xvc");
		if (xc_num == -1)
			xc_num = 0;
		break;

	case XC_SERIAL:
		strcpy(kcons_info.name, "ttyS");
		if (xc_num == -1)
			xc_num = 0;
		break;

	case XC_TTY:
		strcpy(kcons_info.name, "tty");
		if (xc_num == -1)
			xc_num = 1;
		break;

	default:
		goto out;
	}

	wbuf = alloc_bootmem(wbuf_size);

	register_console(&kcons_info);

 out:
	return 0;
}
示例#4
0
static ssize_t __init setup_pcpu_4k(size_t static_size)
{
	size_t pages_size;
	unsigned int cpu;
	int i, j;
	ssize_t ret;

	pcpu4k_nr_static_pages = PFN_UP(static_size);

	/* unaligned allocations can't be freed, round up to page size */
	pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
			       * sizeof(pcpu4k_pages[0]));
	pcpu4k_pages = alloc_bootmem(pages_size);

	/* allocate and copy */
	j = 0;
	for_each_possible_cpu(cpu)
		for (i = 0; i < pcpu4k_nr_static_pages; i++) {
			void *ptr;

			ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
			if (!ptr)
				goto enomem;

			memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
			pcpu4k_pages[j++] = virt_to_page(ptr);
		}

	/* we're ready, commit */
	pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
		pcpu4k_nr_static_pages, static_size);

	ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
				     PERCPU_FIRST_CHUNK_RESERVE, -1,
				     -1, NULL, pcpu4k_populate_pte);
	goto out_free_ar;

enomem:
	while (--j >= 0)
		free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
	ret = -ENOMEM;
out_free_ar:
	free_bootmem(__pa(pcpu4k_pages), pages_size);
	return ret;
}
示例#5
0
void __init device_tree_init(void)
{
    unsigned long base, size;
    void *fdt_copy;

    set_io_port_base(KSEG1);

    /*
     * Load the builtin devicetree. This causes the chosen node to be
     * parsed resulting in our memory appearing
     */

    printk ("DTB: device_tree_init - DBG\n");

    __dt_setup_arch(&__image_dtb);


    printk ("DTB: device_tree_init - after __dt_setup_arch - DBG\n");
    if (!initial_boot_params)
        return;

    printk ("DTB: device_tree_init - initial_boot_params - DBG\n");

    base = virt_to_phys((void *)initial_boot_params);
    size = be32_to_cpu(initial_boot_params->totalsize);

    /* Before we do anything, lets reserve the dt blob */
    reserve_bootmem(base, size, BOOTMEM_DEFAULT);

    /* The strings in the flattened tree are referenced directly by the
     * device tree, so copy the flattened device tree from init memory
     * to regular memory.
     */
    fdt_copy = alloc_bootmem(size);
    memcpy(fdt_copy, initial_boot_params, size);
    initial_boot_params = fdt_copy;

    unflatten_device_tree();

    /* free the space reserved for the dt blob */
    //free_bootmem(base, size);
    printk ("DTB: device_tree_init - end - DBG\n");

}
示例#6
0
文件: main.c 项目: kzlin129/tt-gpl
static void __init setup_per_cpu_areas(void)
{
    unsigned long size, i;
    char *ptr;

    /* Copy section for each CPU (we discard the original) */
    size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
#ifdef CONFIG_MODULES
    if (size < PERCPU_ENOUGH_ROOM)
        size = PERCPU_ENOUGH_ROOM;
#endif

    ptr = alloc_bootmem(size * NR_CPUS);

    for (i = 0; i < NR_CPUS; i++, ptr += size) {
        __per_cpu_offset[i] = ptr - __per_cpu_start;
        memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
    }
}
示例#7
0
static void __init resource_init(void)
{
	struct resource *res;

	code_resource.start = __pa(&_text);
	code_resource.end = __pa(&_etext) - 1;
	data_resource.start = __pa(&_etext);
	data_resource.end = __pa(&_edata) - 1;

	res = alloc_bootmem(sizeof(struct resource));
	res->name = "System RAM";
	res->start = MEMORY_START;
	res->end = MEMORY_START + MEMORY_SIZE - 1;
	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
	request_resource(&iomem_resource, res);

	request_resource(res, &code_resource);
	request_resource(res, &data_resource);
}
static void __init qsd8x50_allocate_memory_regions(void)
{
	void *addr;
	unsigned long size;
	if (machine_is_qsd8x50a_st1_5())
		size = MSM_FB_SIZE_ST15;
	else
		size = MSM_FB_SIZE;

	addr = alloc_bootmem(size); // (void *)MSM_FB_BASE;
	if (!addr)
		printk("Failed to allocate bootmem for framebuffer\n");


	msm_fb_resources[0].start = __pa(addr);
	msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
	pr_info(KERN_ERR "using %lu bytes of SMI at %lx physical for fb\n",
		size, (unsigned long)addr);
}
示例#9
0
static int __init log_buf_len_setup(char *str)
{
	unsigned size = memparse(str, &str);
	unsigned long flags;

#ifdef CONFIG_LOGBUFFER
	/* Log buffer size is LOGBUFF_LEN bytes */
	printk(KERN_NOTICE "Ignoring log_buf_len param\n");
	return 1;
#endif
	if (size)
		size = roundup_pow_of_two(size);
	if (size > log_buf_len) {
		unsigned start, dest_idx, offset;
		char *new_log_buf;

		new_log_buf = alloc_bootmem(size);
		if (!new_log_buf) {
			printk(KERN_WARNING "log_buf_len: allocation failed\n");
			goto out;
		}

		spin_lock_irqsave(&logbuf_lock, flags);
		log_buf_len = size;
		log_buf = new_log_buf;

		offset = start = min(con_start, log_start);
		dest_idx = 0;
		while (start != log_end) {
			log_buf[dest_idx] = __log_buf[start & (__LOG_BUF_LEN - 1)];
			start++;
			dest_idx++;
		}
		log_start -= offset;
		con_start -= offset;
		log_end -= offset;
		spin_unlock_irqrestore(&logbuf_lock, flags);

		printk(KERN_NOTICE "log_buf_len: %d\n", log_buf_len);
	}
out:
	return 1;
}
示例#10
0
static inline void resource_init(void)
{
	int i;

	code_resource.start = virt_to_bus(&_ftext);
	code_resource.end = virt_to_bus(&_etext) - 1;
	data_resource.start = virt_to_bus(&_fdata);
	data_resource.end = virt_to_bus(&_edata) - 1;

	/*
	 * Request address space for all standard RAM.
	 */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		struct resource *res;

		res = alloc_bootmem(sizeof(struct resource));
		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
		case BOOT_MEM_ROM_DATA:
			res->name = "System RAM";
			break;
		case BOOT_MEM_RESERVED:
		default:
			res->name = "reserved";
		}

		res->start = boot_mem_map.map[i].addr;
		res->end = boot_mem_map.map[i].addr +
			   boot_mem_map.map[i].size - 1;

		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
		request_resource(&iomem_resource, res);

		/*
		 *  We dont't know which RAM region contains kernel data,
		 *  so we try it repeatedly and let the resource manager
		 *  test it.
		 */
		request_resource(res, &code_resource);
		request_resource(res, &data_resource);
	}
}
示例#11
0
static void __init sec_log_save_old(void)
{
	/* provide previous log as last_kmsg */
	last_kmsg_size =
	    min((unsigned)(1 << CONFIG_LOG_BUF_SHIFT), *sec_log_ptr);
	last_kmsg_buffer = (char *)alloc_bootmem(last_kmsg_size);

	if (last_kmsg_size && last_kmsg_buffer) {
		unsigned i;
		for (i = 0; i < last_kmsg_size; i++)
			last_kmsg_buffer[i] =
			    sec_log_buf[(*sec_log_ptr - last_kmsg_size +
					 i) & (sec_log_size - 1)];

		pr_info("%s: saved old log at %d@%p\n",
			__func__, last_kmsg_size, last_kmsg_buffer);
	} else
		pr_err("%s: failed saving old log %d@%p\n",
		       __func__, last_kmsg_size, last_kmsg_buffer);
}
示例#12
0
static void __init setup_per_cpu_areas(void)
{
	unsigned long size, i;
	char *ptr;
	unsigned long nr_possible_cpus = num_possible_cpus();

	/* Copy section for each CPU (we discard the original) */
	size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
#ifdef CONFIG_MODULES
	if (size < PERCPU_ENOUGH_ROOM)
		size = PERCPU_ENOUGH_ROOM;
#endif
	ptr = alloc_bootmem(size * nr_possible_cpus);

	for_each_possible_cpu(i) {
		__per_cpu_offset[i] = ptr - __per_cpu_start;
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
		ptr += size;
	}
}
示例#13
0
/*
 * Called at boot time while the bootmem allocator is active,
 * to allocate contiguous physical memory for the real memory
 * areas for guests.
 */
void kvm_rma_init(void)
{
	unsigned long i;
	unsigned long j, npages;
	void *rma;
	struct page *pg;

	/* Only do this on PPC970 in HV mode */
	if (!cpu_has_feature(CPU_FTR_HVMODE) ||
	    !cpu_has_feature(CPU_FTR_ARCH_201))
		return;

	if (!kvm_rma_size || !kvm_rma_count)
		return;

	/* Check that the requested size is one supported in hardware */
	if (lpcr_rmls(kvm_rma_size) < 0) {
		pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
		return;
	}

	npages = kvm_rma_size >> PAGE_SHIFT;
	rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info));
	for (i = 0; i < kvm_rma_count; ++i) {
		rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size);
		pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma,
			kvm_rma_size >> 20);
		rma_info[i].base_virt = rma;
		rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT;
		rma_info[i].npages = npages;
		list_add_tail(&rma_info[i].list, &free_rmas);
		atomic_set(&rma_info[i].use_count, 0);

		pg = pfn_to_page(rma_info[i].base_pfn);
		for (j = 0; j < npages; ++j) {
			atomic_inc(&pg->_count);
			++pg;
		}
	}
}
示例#14
0
static int __init log_buf_len_setup(char *str)
{
	unsigned long size = memparse(str, &str);
	unsigned long flags;

	if (size)
		size = roundup_pow_of_two(size);
	if (size > log_buf_len) {
		unsigned long start, dest_idx, offset;
		char * new_log_buf;

		new_log_buf = alloc_bootmem(size);
		if (!new_log_buf) {
			printk("log_buf_len: allocation failed\n");
			goto out;
		}

		spin_lock_irqsave(&logbuf_lock, flags);
		log_buf_len = size;
		log_buf = new_log_buf;

		offset = start = min(con_start, log_start);
		dest_idx = 0;
		while (start != log_end) {
			log_buf[dest_idx] = __log_buf[start & (__LOG_BUF_LEN - 1)];
			start++;
			dest_idx++;
		}
		log_start -= offset;
		con_start -= offset;
		log_end -= offset;
		spin_unlock_irqrestore(&logbuf_lock, flags);

		printk("log_buf_len: %d\n", log_buf_len);
	}
out:

	return 1;
}
示例#15
0
static int __init com20020isa_setup(char *s)
{
	struct net_device *dev;
	struct arcnet_local *lp;
	int ints[8];

	s = get_options(s, 8, ints);
	if (!ints[0])
		return 1;
	dev = alloc_bootmem(sizeof(struct net_device) + sizeof(struct arcnet_local));
	memset(dev, 0, sizeof(struct net_device) + sizeof(struct arcnet_local));
	lp = dev->priv = (struct arcnet_local *) (dev + 1);
	dev->init = com20020isa_probe;

	switch (ints[0]) {
	default:		/* ERROR */
		printk("com90xx: Too many arguments.\n");
	case 6:		/* Timeout */
		lp->timeout = ints[6];
	case 5:		/* CKP value */
		lp->clockp = ints[5];
	case 4:		/* Backplane flag */
		lp->backplane = ints[4];
	case 3:		/* Node ID */
		dev->dev_addr[0] = ints[3];
	case 2:		/* IRQ */
		dev->irq = ints[2];
	case 1:		/* IO address */
		dev->base_addr = ints[1];
	}
	if (*s)
		strncpy(dev->name, s, 9);
	else
		strcpy(dev->name, "arc%d");
	if (register_netdev(dev))
		printk(KERN_ERR "com20020: Cannot register arcnet device\n");

	return 1;
}
示例#16
0
int request_irq(unsigned int irq, 
		irqreturn_t (*handler)(int, void *, struct pt_regs *),
                unsigned long flags, const char *devname, void *dev_id)
{
	irq_handler_t *irq_handle;
	if (irq < 0 || irq >= NR_IRQS) {
		printk(KERN_ERR "Incorrect IRQ %d from %s\n", irq, devname);
		return -EINVAL;
	}

	if (irq_list[irq] || (h8300_enable_irq_pin(irq) == -EBUSY))
		return -EBUSY;

	if (use_kmalloc)
		irq_handle = kmalloc(sizeof(irq_handler_t), GFP_ATOMIC);
	else {
		/* use bootmem allocater */
		irq_handle = (irq_handler_t *)alloc_bootmem(sizeof(irq_handler_t));
		irq_handle = (irq_handler_t *)((unsigned long)irq_handle | 0x80000000);
	}

	if (irq_handle == NULL)
		return -ENOMEM;

	irq_handle->handler = handler;
	irq_handle->flags   = flags;
	irq_handle->count   = 0;
	irq_handle->dev_id  = dev_id;
	irq_handle->devname = devname;
	irq_list[irq] = irq_handle;

	if (irq_handle->flags & IRQF_SAMPLE_RANDOM)
		rand_initialize_irq(irq);

	enable_irq(irq);
	return 0;
}
示例#17
0
void __init msm7x30_allocate_memory_regions(void)
{
	void *addr;
	unsigned long size;
/*
   Request allocation of Hardware accessible PMEM regions
   at the beginning to make sure they are allocated in EBI-0.
   This will allow 7x30 with two mem banks enter the second
   mem bank into Self-Refresh State during Idle Power Collapse.

    The current HW accessible PMEM regions are
    1. Frame Buffer.
       LCDC HW can access msm_fb_resources during Idle-PC.

    2. Audio
       LPA HW can access android_pmem_audio_pdata during Idle-PC.
*/
	size = fb_size ? : MSM_FB_SIZE;
	addr = alloc_bootmem(size);
	msm_fb_resources[0].start = __pa(addr);
	msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
	pr_info("allocating %lu bytes at %p (%lx physical) for fb\n",
		size, addr, __pa(addr));
#ifdef CONFIG_LGE_HIDDEN_RESET_PATCH
	fb_phys_addr = __pa(addr);
#endif
#ifdef CONFIG_FB_MSM_LCDC_LGDISPLAY_WVGA_OLED 
	/* LGE_CHANGE 
	* Copy the oled display screen to oled frame buffer
	* 2011-03-22, [email protected]
	*/
#ifdef CONFIG_FB_MSM_DEFAULT_DEPTH_RGB565
	memcpy(addr, __va(0x2FD00000), 480*800*2);
#elif defined (CONFIG_FB_MSM_DEFAULT_DEPTH_RGBA8888) \
    | defined (CONFIG_FB_MSM_DEFAULT_DEPTH_ARGB8888)
//	memcpy(addr, __va(0x2FD00000), 480*800*4);
#endif
#endif

	size = pmem_audio_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_audio_pdata.start = __pa(addr);
		android_pmem_audio_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for audio "
			"pmem arena\n", size, addr, __pa(addr));
	}

	size = pmem_kernel_ebi1_size;
	if (size) {
		addr = alloc_bootmem_aligned(size, 0x100000);
		android_pmem_kernel_ebi1_pdata.start = __pa(addr);
		android_pmem_kernel_ebi1_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for kernel"
			" ebi1 pmem arena\n", size, addr, __pa(addr));
	}

	size = pmem_sf_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_pdata.start = __pa(addr);
		android_pmem_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for sf "
			"pmem arena\n", size, addr, __pa(addr));
	}

	if machine_is_msm7x30_fluid()
		size = fluid_pmem_adsp_size;
	else
示例#18
0
static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
					   void *tce_mem, u64 tce_size)
{
	struct pnv_phb *phb;
	const __be64 *prop64;
	u64 phb_id;
	int64_t rc;
	static int primary = 1;

	pr_info(" Initializing p5ioc2 PHB %s\n", np->full_name);

	prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
	if (!prop64) {
		pr_err("  Missing \"ibm,opal-phbid\" property !\n");
		return;
	}
	phb_id = be64_to_cpup(prop64);
	pr_devel("  PHB-ID  : 0x%016llx\n", phb_id);
	pr_devel("  TCE AT  : 0x%016lx\n", __pa(tce_mem));
	pr_devel("  TCE SZ  : 0x%016llx\n", tce_size);

	rc = opal_pci_set_phb_tce_memory(phb_id, __pa(tce_mem), tce_size);
	if (rc != OPAL_SUCCESS) {
		pr_err("  Failed to set TCE memory, OPAL error %lld\n", rc);
		return;
	}

	phb = alloc_bootmem(sizeof(struct pnv_phb));
	if (phb) {
		memset(phb, 0, sizeof(struct pnv_phb));
		phb->hose = pcibios_alloc_controller(np);
	}
	if (!phb || !phb->hose) {
		pr_err("  Failed to allocate PCI controller\n");
		return;
	}

	spin_lock_init(&phb->lock);
	phb->hose->first_busno = 0;
	phb->hose->last_busno = 0xff;
	phb->hose->private_data = phb;
	phb->hub_id = hub_id;
	phb->opal_id = phb_id;
	phb->type = PNV_PHB_P5IOC2;
	phb->model = PNV_PHB_MODEL_P5IOC2;

	phb->regs = of_iomap(np, 0);

	if (phb->regs == NULL)
		pr_err("  Failed to map registers !\n");
	else {
		pr_devel("  P_BUID     = 0x%08x\n", in_be32(phb->regs + 0x100));
		pr_devel("  P_IOSZ     = 0x%08x\n", in_be32(phb->regs + 0x1b0));
		pr_devel("  P_IO_ST    = 0x%08x\n", in_be32(phb->regs + 0x1e0));
		pr_devel("  P_MEM1_H   = 0x%08x\n", in_be32(phb->regs + 0x1a0));
		pr_devel("  P_MEM1_L   = 0x%08x\n", in_be32(phb->regs + 0x190));
		pr_devel("  P_MSZ1_L   = 0x%08x\n", in_be32(phb->regs + 0x1c0));
		pr_devel("  P_MEM_ST   = 0x%08x\n", in_be32(phb->regs + 0x1d0));
		pr_devel("  P_MEM2_H   = 0x%08x\n", in_be32(phb->regs + 0x2c0));
		pr_devel("  P_MEM2_L   = 0x%08x\n", in_be32(phb->regs + 0x2b0));
		pr_devel("  P_MSZ2_H   = 0x%08x\n", in_be32(phb->regs + 0x2d0));
		pr_devel("  P_MSZ2_L   = 0x%08x\n", in_be32(phb->regs + 0x2e0));
	}

	/* Interpret the "ranges" property */
	/* This also maps the I/O region and sets isa_io/mem_base */
	pci_process_bridge_OF_ranges(phb->hose, np, primary);
	primary = 0;

	phb->hose->ops = &pnv_pci_ops;

	/* Setup MSI support */
	pnv_pci_init_p5ioc2_msis(phb);

	/* Setup TCEs */
	phb->dma_dev_setup = pnv_pci_p5ioc2_dma_dev_setup;
	pnv_pci_setup_iommu_table(&phb->p5ioc2.iommu_table,
				  tce_mem, tce_size, 0);
}
示例#19
0
void __init setup_arch(char **cmdline_p)
{
	unsigned long kernel_end;

#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
	struct e820entry *machine_e820;
	struct xen_memory_map memmap;
#endif

#ifdef CONFIG_XEN
	/* Register a call for panic conditions. */
	atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);

 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
	kernel_end = 0;		/* dummy */
 	screen_info = SCREEN_INFO;

	if (xen_start_info->flags & SIF_INITDOMAIN) {
		/* This is drawn from a dump from vgacon:startup in
		 * standard Linux. */
		screen_info.orig_video_mode = 3;
		screen_info.orig_video_isVGA = 1;
		screen_info.orig_video_lines = 25;
		screen_info.orig_video_cols = 80;
		screen_info.orig_video_ega_bx = 3;
		screen_info.orig_video_points = 16;
	} else
		screen_info.orig_video_isVGA = 0;

	edid_info = EDID_INFO;
	saved_video_mode = SAVED_VIDEO_MODE;
	bootloader_type = LOADER_TYPE;

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);


#endif

	setup_xen_features();

	HYPERVISOR_vm_assist(VMASST_CMD_enable,
			     VMASST_TYPE_writable_pagetables);

	ARCH_SETUP
#else
 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
 	screen_info = SCREEN_INFO;
	edid_info = EDID_INFO;
	saved_video_mode = SAVED_VIDEO_MODE;
	bootloader_type = LOADER_TYPE;

#ifdef CONFIG_BLK_DEV_RAM
	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif
#endif	/* !CONFIG_XEN */
	setup_memory_region();
	copy_edd();

	if (!MOUNT_ROOT_RDONLY)
		root_mountflags &= ~MS_RDONLY;
	init_mm.start_code = (unsigned long) &_text;
	init_mm.end_code = (unsigned long) &_etext;
	init_mm.end_data = (unsigned long) &_edata;
	init_mm.brk = (unsigned long) &_end;

#ifndef CONFIG_XEN
	code_resource.start = virt_to_phys(&_text);
	code_resource.end = virt_to_phys(&_etext)-1;
	data_resource.start = virt_to_phys(&_etext);
	data_resource.end = virt_to_phys(&_edata)-1;
#endif

	parse_cmdline_early(cmdline_p);

	early_identify_cpu(&boot_cpu_data);

	/*
	 * partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	end_pfn = e820_end_of_ram();
	num_physpages = end_pfn;		/* for pfn_valid */

	check_efer();

#ifndef CONFIG_XEN
	discover_ebda();
#endif

	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));

#ifdef CONFIG_ACPI_NUMA
	/*
	 * Parse SRAT to discover nodes.
	 */
	acpi_numa_init();
#endif

#ifdef CONFIG_NUMA
	numa_initmem_init(0, end_pfn); 
#else
	contig_initmem_init(0, end_pfn);
#endif

	/* Reserve direct mapping */
	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
				(table_end - table_start) << PAGE_SHIFT);

	/* reserve kernel */
	kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
	reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);

#ifdef CONFIG_XEN
	/* reserve physmap, start info and initial page tables */
	reserve_bootmem(kernel_end, (table_start<<PAGE_SHIFT)-kernel_end);
#else
	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem_generic(0, PAGE_SIZE);

	/* reserve ebda region */
	if (ebda_addr)
		reserve_bootmem_generic(ebda_addr, ebda_size);
#endif

#ifdef CONFIG_SMP
	/*
	 * But first pinch a few for the stack/trampoline stuff
	 * FIXME: Don't need the extra page at 4K, but need to fix
	 * trampoline before removing it. (see the GDT stuff)
	 */
	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);

	/* Reserve SMP trampoline */
	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
#endif

#ifdef CONFIG_ACPI_SLEEP
       /*
        * Reserve low memory region for sleep support.
        */
       acpi_reserve_bootmem();
#endif
#ifdef CONFIG_XEN
#ifdef CONFIG_BLK_DEV_INITRD
	if (xen_start_info->mod_start) {
		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
			/*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
			initrd_start = INITRD_START + PAGE_OFFSET;
			initrd_end = initrd_start+INITRD_SIZE;
			initrd_below_start_ok = 1;
		} else {
			printk(KERN_ERR "initrd extends beyond end of memory "
				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				(unsigned long)(INITRD_START + INITRD_SIZE),
				(unsigned long)(end_pfn << PAGE_SHIFT));
			initrd_start = 0;
		}
	}
#endif
#else	/* CONFIG_XEN */
#ifdef CONFIG_BLK_DEV_INITRD
	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
			initrd_start =
				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
			initrd_end = initrd_start+INITRD_SIZE;
		}
		else {
			printk(KERN_ERR "initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
			    (unsigned long)(INITRD_START + INITRD_SIZE),
			    (unsigned long)(end_pfn << PAGE_SHIFT));
			initrd_start = 0;
		}
	}
#endif
#endif	/* !CONFIG_XEN */
#ifdef CONFIG_KEXEC
	if (crashk_res.start != crashk_res.end) {
		reserve_bootmem(crashk_res.start,
			crashk_res.end - crashk_res.start + 1);
	}
#endif

	paging_init();
#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * Find and reserve possible boot-time SMP configuration:
	 */
	find_smp_config();
#endif
#ifdef CONFIG_XEN
	{
		int i, j, k, fpp;
		unsigned long va;

		/* 'Initial mapping' of initrd must be destroyed. */
		for (va = xen_start_info->mod_start;
		     va < (xen_start_info->mod_start+xen_start_info->mod_len);
		     va += PAGE_SIZE) {
			HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
		}

		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
			/* Make sure we have a large enough P->M table. */
			phys_to_machine_mapping = alloc_bootmem(
				end_pfn * sizeof(unsigned long));
			memset(phys_to_machine_mapping, ~0,
			       end_pfn * sizeof(unsigned long));
			memcpy(phys_to_machine_mapping,
			       (unsigned long *)xen_start_info->mfn_list,
			       xen_start_info->nr_pages * sizeof(unsigned long));
			free_bootmem(
				__pa(xen_start_info->mfn_list),
				PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
						sizeof(unsigned long))));

			/* Destroyed 'initial mapping' of old p2m table. */
			for (va = xen_start_info->mfn_list;
			     va < (xen_start_info->mfn_list +
				   (xen_start_info->nr_pages*sizeof(unsigned long)));
			     va += PAGE_SIZE) {
				HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
			}

			/*
			 * Initialise the list of the frames that specify the
			 * list of frames that make up the p2m table. Used by
                         * save/restore.
			 */
			pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
			HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
				virt_to_mfn(pfn_to_mfn_frame_list_list);

			fpp = PAGE_SIZE/sizeof(unsigned long);
			for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
				if ((j % fpp) == 0) {
					k++;
					BUG_ON(k>=fpp);
					pfn_to_mfn_frame_list[k] =
						alloc_bootmem(PAGE_SIZE);
					pfn_to_mfn_frame_list_list[k] =
						virt_to_mfn(pfn_to_mfn_frame_list[k]);
					j=0;
				}
				pfn_to_mfn_frame_list[k][j] =
					virt_to_mfn(&phys_to_machine_mapping[i]);
			}
			HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
		}

	}

	if (xen_start_info->flags & SIF_INITDOMAIN)
		dmi_scan_machine();

	if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
	{
		acpi_disabled = 1;
#ifdef  CONFIG_ACPI
		acpi_ht = 0;
#endif
	}
#endif

#ifndef CONFIG_XEN
	check_ioapic();
#endif

	zap_low_mappings(0);

	/*
	 * set this early, so we dont allocate cpu0
	 * if MADT list doesnt list BSP first
	 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
	 */
	cpu_set(0, cpu_present_map);
#ifdef CONFIG_ACPI
	/*
	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
	 * Call this early for SRAT node setup.
	 */
	acpi_boot_table_init();

	/*
	 * Read APIC and some other early information from ACPI tables.
	 */
	acpi_boot_init();
#endif

	init_cpu_to_node();

#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * get boot-time SMP configuration:
	 */
	if (smp_found_config)
		get_smp_config();
#ifndef CONFIG_XEN
	init_apic_mappings();
#endif
#endif
#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
	prefill_possible_map();
#endif

	/*
	 * Request address space for all standard RAM and ROM resources
	 * and also for regions reported as reserved by the e820.
	 */
#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
	probe_roms();
	if (xen_start_info->flags & SIF_INITDOMAIN) {
		machine_e820 = alloc_bootmem_low_pages(PAGE_SIZE);

		memmap.nr_entries = E820MAX;
		set_xen_guest_handle(memmap.buffer, machine_e820);

		BUG_ON(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap));

		e820_reserve_resources(machine_e820, memmap.nr_entries);
	}
#elif !defined(CONFIG_XEN)
	probe_roms();
	e820_reserve_resources(e820.map, e820.nr_map);
#endif

	request_resource(&iomem_resource, &video_ram_resource);

	{
	unsigned i;
	/* request I/O space for devices used on all i[345]86 PCs */
	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
		request_resource(&ioport_resource, &standard_io_resources[i]);
	}

#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
	if (xen_start_info->flags & SIF_INITDOMAIN) {
		e820_setup_gap(machine_e820, memmap.nr_entries);
		free_bootmem(__pa(machine_e820), PAGE_SIZE);
	}
#elif !defined(CONFIG_XEN)
	e820_setup_gap(e820.map, e820.nr_map);
#endif

#ifdef CONFIG_GART_IOMMU
	iommu_hole_init();
#endif

#ifdef CONFIG_XEN
	{
		struct physdev_set_iopl set_iopl;

		set_iopl.iopl = 1;
		HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);

		if (xen_start_info->flags & SIF_INITDOMAIN) {
			if (!(xen_start_info->flags & SIF_PRIVILEGED))
				panic("Xen granted us console access "
				      "but not privileged status");
		       
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
			conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
			conswitchp = &dummy_con;
#endif
#endif
		} else {
			extern int console_use_vt;
			console_use_vt = 0;
		}
	}
#else	/* CONFIG_XEN */

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
	conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
#endif
#endif

#endif /* !CONFIG_XEN */
}
示例#20
0
asmlinkage void __init start_kernel(void)
{
	char * command_line;
	extern char saved_command_line[];

/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
	lock_kernel();
	printk(linux_banner);
	setup_arch(&command_line);
	printk("Kernel command line: %s\n", saved_command_line);
	parse_options(command_line);
	trap_init();
	init_IRQ();
	sched_init();
	softirq_init();
	time_init();

	/*
	 * HACK ALERT! This is early. We're enabling the console before
	 * we've done PCI setups etc, and console_init() must be aware of
	 * this. But we do want output early, in case something goes wrong.
	 */
	console_init();
#ifdef CONFIG_MODULES
	init_modules();
#endif
	if (prof_shift) {
		unsigned int size;
		/* only text is profiled */
		prof_len = (unsigned long) &_etext - (unsigned long) &_stext;
		prof_len >>= prof_shift;
		
		size = prof_len * sizeof(unsigned int) + PAGE_SIZE-1;
		prof_buffer = (unsigned int *) alloc_bootmem(size);
	}

	kmem_cache_init();
	sti();
	calibrate_delay();
#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && !initrd_below_start_ok &&
			initrd_start < min_low_pfn << PAGE_SHIFT) {
		printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
		    "disabling it.\n",initrd_start,min_low_pfn << PAGE_SHIFT);
		initrd_start = 0;
	}
#endif
	mem_init();
	kmem_cache_sizes_init();
	pgtable_cache_init();

	/*
	 * For architectures that have highmem, num_mappedpages represents
	 * the amount of memory the kernel can use.  For other architectures
	 * it's the same as the total pages.  We need both numbers because
	 * some subsystems need to initialize based on how much memory the
	 * kernel can use.
	 */
	if (num_mappedpages == 0)
		num_mappedpages = num_physpages;
  
	fork_init(num_mappedpages);
	proc_caches_init();
	vfs_caches_init(num_physpages);
	buffer_init(num_physpages);
	page_cache_init(num_physpages);
#if defined(CONFIG_ARCH_S390)
	ccwcache_init();
#endif
	signals_init();
#ifdef CONFIG_PROC_FS
	proc_root_init();
#endif
	check_bugs();
	printk("POSIX conformance testing by UNIFIX\n");

	/* 
	 *	We count on the initial thread going ok 
	 *	Like idlers init is an unlocked kernel thread, which will
	 *	make syscalls (and thus be locked).
	 */
	smp_init();
#if defined(CONFIG_SYSVIPC)
	ipc_init();
#endif
	rest_init();
}
void __init msm_msm7x2x_allocate_memory_regions(void)
{
	void *addr;
	unsigned long size;

	size = pmem_mdp_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_pdata.start = __pa(addr);
		android_pmem_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for mdp "
				"pmem arena\n", size, addr, __pa(addr));
	}

	size = pmem_adsp_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_adsp_pdata.start = __pa(addr);
		android_pmem_adsp_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for adsp "
				"pmem arena\n", size, addr, __pa(addr));
	}

	size = pmem_audio_size;
	if (size) {
		addr = alloc_bootmem(size);
		android_pmem_audio_pdata.start = __pa(addr);
		android_pmem_audio_pdata.size = size;
		pr_info("allocating %lu bytes (at %lx physical) for audio "
				"pmem arena\n", size , __pa(addr));
	}

	size = pmem_fb_size ? : MSM_FB_SIZE;
	addr = alloc_bootmem(size);
	msm_fb_resources[0].start = __pa(addr);
	msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
	pr_info("allocating %lu bytes at %p (%lx physical) for fb\n",
			size, addr, __pa(addr));

	size = pmem_kernel_ebi1_size;
	if (size) {
		addr = alloc_bootmem_aligned(size, 0x100000);
		android_pmem_kernel_ebi1_pdata.start = __pa(addr);
		android_pmem_kernel_ebi1_pdata.size = size;
		pr_info("allocating %lu bytes at %p (%lx physical) for kernel"
				" ebi1 pmem arena\n", size, addr, __pa(addr));
	}
#ifdef CONFIG_ARCH_MSM7X27
	size = MSM_GPU_PHYS_SIZE;
	addr = alloc_bootmem(size);
	kgsl_resources[1].start = __pa(addr);
	kgsl_resources[1].end = kgsl_resources[1].start + size - 1;
	pr_info("allocating %lu bytes at %p (at %lx physical) for KGSL\n",
			size, addr, __pa(addr));
#endif

	// LGE_CHANGE_S [[email protected]] 2010-08-06, lge_mtd_direct_access
#ifdef CONFIG_MACH_MSM7X27_THUNDERC
	// PAGE_NUM_PER_BLK*PAGE_SIZE_BYTE
	lge_mtd_direct_access_addr = alloc_bootmem(64*2048);
#endif
	// LGE_CHANGE_E [[email protected]] 2010-08-06
}
int __init pq2ads_pci_init_irq(void)
{
	struct pq2ads_pci_pic *priv;
	struct irq_host *host;
	struct device_node *np;
	int ret = -ENODEV;
	int irq;

	np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic");
	if (!np) {
		printk(KERN_ERR "No pci pic node in device tree.\n");
		of_node_put(np);
		goto out;
	}

	irq = irq_of_parse_and_map(np, 0);
	if (irq == NO_IRQ) {
		printk(KERN_ERR "No interrupt in pci pic node.\n");
		of_node_put(np);
		goto out;
	}

	priv = alloc_bootmem(sizeof(struct pq2ads_pci_pic));
	if (!priv) {
		of_node_put(np);
		ret = -ENOMEM;
		goto out_unmap_irq;
	}

	/* PCI interrupt controller registers: status and mask */
	priv->regs = of_iomap(np, 0);
	if (!priv->regs) {
		printk(KERN_ERR "Cannot map PCI PIC registers.\n");
		goto out_free_bootmem;
	}

	/* mask all PCI interrupts */
	out_be32(&priv->regs->mask, ~0);
	mb();

	host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NUM_IRQS,
	                      &pci_pic_host_ops, NUM_IRQS);
	if (!host) {
		ret = -ENOMEM;
		goto out_unmap_regs;
	}

	host->host_data = priv;

	priv->host = host;
	host->host_data = priv;
	set_irq_data(irq, priv);
	set_irq_chained_handler(irq, pq2ads_pci_irq_demux);

	of_node_put(np);
	return 0;

out_unmap_regs:
	iounmap(priv->regs);
out_free_bootmem:
	free_bootmem((unsigned long)priv,
	             sizeof(struct pq2ads_pci_pic));
	of_node_put(np);
out_unmap_irq:
	irq_dispose_mapping(irq);
out:
	return ret;
}
示例#23
0
static void __init sec_last_log_buf_reserve(void)
{
	last_log_buf = (char *)alloc_bootmem(s_log_buf_msk + 1);
}
示例#24
0
文件: bootmem.c 项目: B-Rich/codezero
pmd_table_t *alloc_boot_pmd(void)
{
	return alloc_bootmem(sizeof(pmd_table_t), sizeof(pmd_table_t));
}
示例#25
0
文件: page.c 项目: gurugio/gurugio
void paging_init(unsigned int phy_mem_size)
{
	int l;
	pte_t *pte;
	pgd_t *pgd;
	unsigned long addr;
	unsigned long pte_start;
	int pgd_count;


#ifdef DEBUG
	caos_printf("Init Page Global Dir\n");
#endif

	swapper_pg_dir = (pgd_t *)SWAPPER_PG_DIR_ADDR;

	// PRINT PAGE DIR ENTRY!!
	//
#ifdef DEBUG
	caos_printf("## PGD check : %x -> 0x23=accessed, su, R/W, Pre\n", swapper_pg_dir[0] ); 
	

	caos_printf("## Physical memory 0~4MB is mapping to 0xC0000000~0xC0400000\n");
	caos_printf("0xB8000 -> 0xC00B8000, First char of scrren C->A\n");
	do { char *t=(char *)0xC00b8000; *t = 'A'; } while (0);
#endif

	//
	// swapper_pg_dir[300] is already allocated for 0~4MB in setup.asm
	// This is mapping PAGE DIR for 4MB~ end of physical memory
	//
	pgd = swapper_pg_dir+0x301;
	pte_start = (unsigned long)alloc_bootmem();
	addr = pte_start + (_PAGE_RW | _PAGE_PRESENT);
	pgd_count = phy_mem_size/4;

	for (l=1; l<pgd_count; l++) {
		set_pgd(pgd, __pgd(addr));
#ifdef DEBUG
		caos_printf("swapper[%d]=%x ", l, addr);
#endif
		addr = (unsigned long)alloc_bootmem() + (_PAGE_RW|_PAGE_PRESENT);
		pgd++;
	}

#ifdef DEBUG
	for (l=0x2ff; l<0x309; l++)
		caos_printf("swapper_pg_dir[%d]=%x  ", l, swapper_pg_dir[l]);
	caos_printf("\n");
#endif

	//
	// mapping PAGE TABLE for 4MB ~ end of physical memory
	//
	pte = (pte_t *)pte_start;	// page table at 0x5000;
	addr = (4*0x100000) + (_PAGE_RW | _PAGE_PRESENT);	// 4MB +
	pgd_count = phy_mem_size/4;

	for (l=1024; l<PTRS_PER_PTE*pgd_count; l++) {	// fill out 7 tables
		set_pte(pte, __pte(addr));
		addr += PAGE_SIZE;
		pte++;
	}

#ifdef DEBUG
	caos_printf("TEST PHY mapping..");
	do {
		char *pt;
		for (pt = (char *)0xC0400000; pt<(char *)0xC0000000+phy_mem_size*0x100000; pt+=0x100000)
			*pt = 'a';
		caos_printf("MEM test %x ", (unsigned long)pt);
		if (*pt == 'a')
			caos_printf("OK\n");
		else
			caos_printf("FAIL\n");
	} while (0);
#endif
}
示例#26
0
文件: page.c 项目: yulin724/StarxOS
int init_paging()
{
    u32int addr;

    page_directory = (u32int *)alloc_bootmem(0x1000, 1);
    memset(page_directory, 0, 0x1000);

    printf_bochs("page_directory at %x\n", page_directory);

/*
struct page_directory_entry {
    u8int present:1;
    u8int read_or_write:1;
    u8int level:1;
    u8int reserved1:2;
    u8int accessed:1;
    u8int dirty:1;
    u8int reserved2:2;
    u8int avail:3;
    u32int page_frame_address:20;
} __attribute__((packed));
*/
    printf_bochs("_brk_end: %x\n", _brk_end);
    for (addr = 0; addr < _brk_end; addr += 0x1000) {
        u32int pd_index;
        u32int pt_index;
        page_directory_entry_t pde;
        page_table_entry_t pte;
        u32int *page_table;

        pd_index = addr / 0x400000; // addr/4M
        pt_index = addr / 0x1000 % 1024;

        //printf_bochs("pd_index:%x page_directory[pd_index]=%x, pt_index:%x\n", pd_index, page_directory[pd_index], pt_index);

        if (!page_directory[pd_index]) {
            pde.present = 1;
            pde.read_or_write = 1;
            pde.level = 0;
            pde.reserved1 = 0;
            pde.accessed = 1;
            pde.dirty = 0;
            pde.reserved2 = 0;
            pde.avail = 0;

            page_table = (u32int *)alloc_bootmem(0x1000,1);
            pde.page_frame_address = (long)page_table / 0x1000; // addr/4k
            page_directory[pd_index] = pde.page_frame_address << 12 | 0x23 ;

            //printf_bochs("page_table at %x\n", page_table);
            //printf_bochs("page_directory[%x]@%x=%x, %x ~ %x\n", pd_index, &(page_directory[pd_index]), page_directory[pd_index], addr, addr + 0x400000);
        }

        if (!page_table[pt_index]) {
            pte.present = 1;
            pte.read_or_write = 1;
            pte.level = 0;
            pte.reserved1 = 0;
            pte.accessed = 1;
            pte.dirty = 0;
            pte.reserved2 = 0;
            pte.avail = 0;

            pte.page_frame_address = addr / 0x1000; // addr/4k
            page_table[pt_index] = pte.page_frame_address << 12 | 0x23 ;

            //printf_bochs( "page_table[%x]=%x, %x ~ %x\n", pt_index, page_table[pt_index], addr, addr+0x1000);
        }
    }

    write_cr3((u32int)page_directory);
    write_cr0(read_cr0() | 0x80000000);
    page_enabled=1;
    //BOCHS_DEBUGGER_ENTER;

    register_irq_service(14, page_fault);

    return 0;
}
示例#27
0
asmlinkage void __init start_kernel(void)
{
	char * command_line;
	extern struct kernel_param __start___param[], __stop___param[];
#ifdef CONFIG_RTAI_RTSPMM
        unsigned int indice_part;
        /* Size of the needed memory block by the configuration */
        unsigned long rt_mem_block_size = 0;
#endif
/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
	lock_kernel();
	page_address_init();
	printk(linux_banner);
	setup_arch(&command_line);
	setup_per_cpu_areas();

	/*
	 * Mark the boot cpu "online" so that it can call console drivers in
	 * printk() and can access its per-cpu storage.
	 */
	smp_prepare_boot_cpu();

	/*
	 * Set up the scheduler prior starting any interrupts (such as the
	 * timer interrupt). Full topology setup happens at smp_init()
	 * time - but meanwhile we still have a functioning scheduler.
	 */
	sched_init();
	/*
	 * Disable preemption - early bootup scheduling is extremely
	 * fragile until we cpu_idle() for the first time.
	 */
	preempt_disable();
	build_all_zonelists();
	page_alloc_init();
	early_init_hardirqs();
	printk("Kernel command line: %s\n", saved_command_line);
	parse_early_param();
	parse_args("Booting kernel", command_line, __start___param,
		   __stop___param - __start___param,
		   &unknown_bootoption);
	sort_main_extable();
	trap_init();
	rcu_init();
	init_IRQ();
	pidhash_init();
	init_timers();
	softirq_init();
	time_init();

	/*
	 * HACK ALERT! This is early. We're enabling the console before
	 * we've done PCI setups etc, and console_init() must be aware of
	 * this. But we do want output early, in case something goes wrong.
	 */
	console_init();
	if (panic_later)
		panic(panic_later, panic_param);

#ifdef CONFIG_RTAI_RTSPMM
        /* Allocate a big and continuous memory block for the module SPMM
           included in the RTAI functionalities */
        printk("--- Memory Allocation for the module rt_spmm ---\n");
        /* WARNING
           We need to add some space for the structures vrtxptext and vrtxpt and the partitions bitmap
           that the module rt_spmm uses to handle the blocks in each partition */
        /* for each defined partitions */
        for(indice_part = 0; indice_part <  RT_MAX_PART_NUM; indice_part ++)
          {
            if ((rt_partitions_table[indice_part].block_size != 0) &&
                (rt_partitions_table[indice_part].num_of_blocks != 0))
              {
                rt_partitions_table[indice_part].part_size =
                  (rt_partitions_table[indice_part].block_size + XN_NBBY)
                  *rt_partitions_table[indice_part].num_of_blocks +
                  + sizeof(vrtxptext_t)+sizeof(vrtxpt_t);
                rt_mem_block_size += rt_partitions_table[indice_part].part_size;
              }
          }
#ifdef CONFIG_RTAI_PART_DMA
        printk("Allocate memory in the low part of memory\n");
        rt_mem_block_ptr=(void*)alloc_bootmem_low(rt_mem_block_size + PAGE_SIZE-1);
#else
        printk("Allocate memory in the standard part of memory\n");
        rt_mem_block_ptr=(void*)alloc_bootmem(rt_mem_block_size + PAGE_SIZE-1);
#endif /* CONFIG_PART_DMA */
        printk("Needed Memory Size : %lu\n", rt_mem_block_size);
        printk("Allocated Memory Size : %lu\n", rt_mem_block_size + PAGE_SIZE-1);
        printk("Memory block address : 0x%x\n", (unsigned int)rt_mem_block_ptr);
        printk("-----------------------------------------------\n");
#endif /* CONFIG_RTAI_RTSPMM */

	profile_init();
	local_irq_enable();
#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && !initrd_below_start_ok &&
			initrd_start < min_low_pfn << PAGE_SHIFT) {
		printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
		    "disabling it.\n",initrd_start,min_low_pfn << PAGE_SHIFT);
		initrd_start = 0;
	}
#endif
	vfs_caches_init_early();
	mem_init();
	kmem_cache_init();
	numa_policy_init();
	if (late_time_init)
		late_time_init();
	calibrate_delay();
	pidmap_init();
	pgtable_cache_init();
	prio_tree_init();
	anon_vma_init();
#ifdef CONFIG_X86
	if (efi_enabled)
		efi_enter_virtual_mode();
#endif
	fork_init(num_physpages);
	proc_caches_init();
	buffer_init();
	unnamed_dev_init();
	security_init();
	vfs_caches_init(num_physpages);

#ifdef CONFIG_MOT_FEAT_DEVICE_TREE
	mothwcfg_init();
#endif /* CONFIG_MOT_FEAT_DEVICE_TREE */

	radix_tree_init();
	signals_init();
	/* rootfs populating might need page-writeback */
	page_writeback_init();
#ifdef CONFIG_PROC_FS
	proc_root_init();
#endif
	check_bugs();

	acpi_early_init(); /* before LAPIC and SMP init */

	/* Do the rest non-__init'ed, we're now alive */
	rest_init();
}
示例#28
0
/**
 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
 * @mask: pointer to cpumask_var_t where the cpumask is returned
 *
 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 * a nop (in <linux/cpumask.h>).
 * Either returns an allocated (zero-filled) cpumask, or causes the
 * system to panic.
 */
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
	*mask = alloc_bootmem(cpumask_size());
}
/*
 * Initialize the context management stuff.
 */
void __init mmu_context_init(void)
{
	/* Mark init_mm as being active on all possible CPUs since
	 * we'll get called with prev == init_mm the first time
	 * we schedule on a given CPU
	 */
	init_mm.context.active = NR_CPUS;

	/*
	 *   The MPC8xx has only 16 contexts.  We rotate through them on each
	 * task switch.  A better way would be to keep track of tasks that
	 * own contexts, and implement an LRU usage.  That way very active
	 * tasks don't always have to pay the TLB reload overhead.  The
	 * kernel pages are mapped shared, so the kernel can run on behalf
	 * of any task that makes a kernel entry.  Shared does not mean they
	 * are not protected, just that the ASID comparison is not performed.
	 *      -- Dan
	 *
	 * The IBM4xx has 256 contexts, so we can just rotate through these
	 * as a way of "switching" contexts.  If the TID of the TLB is zero,
	 * the PID/TID comparison is disabled, so we can use a TID of zero
	 * to represent all kernel pages as shared among all contexts.
	 * 	-- Dan
	 */
	if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
		first_context = 0;
		last_context = 15;
	} else {
		first_context = 1;
		last_context = 255;
	}

#ifdef DEBUG_CLAMP_LAST_CONTEXT
	last_context = DEBUG_CLAMP_LAST_CONTEXT;
#endif
	/*
	 * Allocate the maps used by context management
	 */
	context_map = alloc_bootmem(CTX_MAP_SIZE);
	context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
	stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);

#ifdef CONFIG_SMP
	register_cpu_notifier(&mmu_context_cpu_nb);
#endif

	printk(KERN_INFO
	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
	       2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
	       last_context - first_context + 1);

	/*
	 * Some processors have too few contexts to reserve one for
	 * init_mm, and require using context 0 for a normal task.
	 * Other processors reserve the use of context zero for the kernel.
	 * This code assumes first_context < 32.
	 */
	context_map[0] = (1 << first_context) - 1;
	next_context = first_context;
	nr_free_contexts = last_context - first_context + 1;
}
示例#30
0
int smu_init (void)
{
	struct device_node *np;
	u32 *data;

        np = of_find_node_by_type(NULL, "smu");
        if (np == NULL)
		return -ENODEV;

	if (smu_cmdbuf_abs == 0) {
		printk(KERN_ERR "SMU: Command buffer not allocated !\n");
		return -EINVAL;
	}

	smu = alloc_bootmem(sizeof(struct smu_device));
	if (smu == NULL)
		return -ENOMEM;
	memset(smu, 0, sizeof(*smu));

	spin_lock_init(&smu->lock);
	smu->of_node = np;
	/* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
	 * 32 bits value safely
	 */
	smu->cmd_buf_abs = (u32)smu_cmdbuf_abs;
	smu->cmd_buf = (struct smu_cmd_buf *)abs_to_virt(smu_cmdbuf_abs);

	np = of_find_node_by_name(NULL, "smu-doorbell");
	if (np == NULL) {
		printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n");
		goto fail;
	}
	data = (u32 *)get_property(np, "reg", NULL);
	of_node_put(np);
	if (data == NULL) {
		printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
		goto fail;
	}

	/* Current setup has one doorbell GPIO that does both doorbell
	 * and ack. GPIOs are at 0x50, best would be to find that out
	 * in the device-tree though.
	 */
	smu->db_req = 0x50 + *data;
	smu->db_ack = 0x50 + *data;

	/* Doorbell buffer is currently hard-coded, I didn't find a proper
	 * device-tree entry giving the address. Best would probably to use
	 * an offset for K2 base though, but let's do it that way for now.
	 */
	smu->db_buf = ioremap(0x8000860c, 0x1000);
	if (smu->db_buf == NULL) {
		printk(KERN_ERR "SMU: Can't map doorbell buffer pointer !\n");
		goto fail;
	}

	sys_ctrler = SYS_CTRLER_SMU;
	return 0;

 fail:
	smu = NULL;
	return -ENXIO;

}