Example #1
0
void init_task(struct Task_t *t, uint16_t cs, uint16_t ds, uint16_t ss)
{
    memset(t, 0, sizeof(struct Task_t));
    uint32_t page_dir_pn = alloc_phy_page(0, 1023);
    uint32_t stack;
    t->block_event = NONBLOCKED;
    t->page_dir = (uint32_t *)(page_dir_pn << 12);
    if(CURRENT_TASK)
        t->ppid = CURRENT_TASK->pid;
    else
        t->ppid = 7;
    map_kernel_page(page_dir_pn, page_dir_pn, true, false, true);
    memset(t->page_dir, 0, 4096);
    t->page_dir[0] = PAGE_DIR[0];
    t->block_event = 0;
    t->next = NULL;
    t->status = READY;
    t->tss.cr3 = (uint32_t)t->page_dir;
    t->tss.eflags = 0x202;
    t->tss.cs = cs;
    t->tss.ds = ds;
    t->tss.es = ds;
    t->tss.fs = ds;
    t->tss.gs = ds;
    t->tss.ss = ss;

    t->sem_list_size = 0;
    t->sigint_handler = sigint_default;

    stack = alloc_phy_page(1024, max_page_num);
    map_page(t->page_dir, (0xFFFFFFFF - 4096 + 1) >> 12, stack, false, true, true);
    t->tss.esp = 0xFFFFFFFF;
}
Example #2
0
void map_page(uint32_t *page_dir, uint32_t virt_page_num, uint32_t phy_page_num,
        bool global, bool user, bool read_write)
{
    uint16_t pd_idx = PD_IDX(virt_page_num),
             pt_idx = PT_IDX(virt_page_num);
    uint32_t *page_table;

    if(!(page_dir[pd_idx] & 0x1)) {
        uint32_t pt_page_num = alloc_phy_page(0, 1023);
        map_kernel_page(pt_page_num, pt_page_num, true, false, true);
        memset((void *)(pt_page_num << 12), 0, 4096);
        page_dir[pd_idx] = make_pde(pt_page_num, user, read_write);
    }

    page_table = (uint32_t *)(page_dir[pd_idx] & 0xfffff000);
    page_table[pt_idx] = make_pte(phy_page_num, global, user, read_write);
    ++phy_mem_rc[phy_page_num];
    __asm__ volatile(
        ".intel_syntax noprefix;"
        "mov eax, cr3;"
        "mov cr3, eax;"
        ".att_syntax;"
        :::"eax"
    );
}
Example #3
0
static void __init mmu_mapin_immr(void)
{
	unsigned long p = PHYS_IMMR_BASE;
	unsigned long v = VIRT_IMMR_BASE;
	unsigned long f = pgprot_val(PAGE_KERNEL_NCG);
	int offset;

	for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
		map_kernel_page(v + offset, p + offset, f);
}
Example #4
0
/**
 * __ioremap_at - Low level function to establish the page tables
 *                for an IO mapping
 */
void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
			    unsigned long flags)
{
	unsigned long i;

	/* Make sure we have the base flags */
	if ((flags & _PAGE_PRESENT) == 0)
		flags |= pgprot_val(PAGE_KERNEL);

	/* We don't support the 4K PFN hack with ioremap */
	if (flags & H_PAGE_4K_PFN)
		return NULL;

	WARN_ON(pa & ~PAGE_MASK);
	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
	WARN_ON(size & ~PAGE_MASK);

	for (i = 0; i < size; i += PAGE_SIZE)
		if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
			return NULL;

	return (void __iomem *)ea;
}
Example #5
0
/*
 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
 * the vmalloc space using normal page tables, though the size of
 * pages encoded in the PTEs can be different
 */
int __meminit vmemmap_create_mapping(unsigned long start,
				     unsigned long page_size,
				     unsigned long phys)
{
	/* Create a PTE encoding without page size */
	unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
		_PAGE_KERNEL_RW;

	/* PTEs only contain page size encodings up to 32M */
	BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);

	/* Encode the size in the PTE */
	flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;

	/* For each PTE for that area, map things. Note that we don't
	 * increment phys because all PTEs are of the large size and
	 * thus must have the low bits clear
	 */
	for (i = 0; i < page_size; i += PAGE_SIZE)
		BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags)));

	return 0;
}
Example #6
0
void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from IOREMAP_TOP
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * If the address lies within the first 16 MB, assume it's in ISA
	 * memory space
	 */
	if (p < 16*1024*1024)
		p += _ISA_MEM_BASE;

#ifndef CONFIG_CRASH_DUMP
	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 */
	if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
	    page_is_ram(__phys_to_pfn(p))) {
		printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
		       (unsigned long long)p, __builtin_return_address(0));
		return NULL;
	}
#endif

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped?  Perhaps overlapped by a previous
	 * mapping.
	 */
	v = p_block_mapped(p);
	if (v)
		goto out;

	if (slab_is_available()) {
		struct vm_struct *area;
		area = get_vm_area_caller(size, VM_IOREMAP, caller);
		if (area == 0)
			return NULL;
		area->phys_addr = p;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	/*
	 * Should check if it is a candidate for a BAT mapping
	 */

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_kernel_page(v + i, p + i, prot);
	if (err) {
		if (slab_is_available())
			vunmap((void *)v);
		return NULL;
	}

out:
	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}