Пример #1
0
/*
 * To avoid races, we repeat for each process after having
 * swapped something in. That gets rid of a few pesky races,
 * and "swapoff" isn't exactly timing critical.
 */
static int try_to_unuse(unsigned int type)
{
	int nr;
	unsigned long page = get_free_page(GFP_KERNEL);

	if (!page)
		return -ENOMEM;
	nr = 0;
	while (nr < NR_TASKS) {
		struct task_struct * p = task[nr];
		if (p) {
			if (unuse_process(p->mm, type, page)) {
				page = get_free_page(GFP_KERNEL);
				if (!page)
					return -ENOMEM;
				continue;
			}
		}
		nr++;
	}
	free_page(page);
#ifdef CONFIG_SYSVIPC	
	shm_unuse(type);
#endif
	return 0;
}
Пример #2
0
/* Must be called with interrupts enabled */
int gs_init_port(struct gs_port *port)
{
	unsigned long flags;
	unsigned long page;

	save_flags (flags);
	if (!tmp_buf) {
		page = get_free_page(GFP_KERNEL);

		cli (); /* Don't expect this to make a difference. */ 
		if (tmp_buf)
			free_page(page);
		else
			tmp_buf = (unsigned char *) page;
		restore_flags (flags);

		if (!tmp_buf) {
			return -ENOMEM;
		}
	}

	if (port->flags & ASYNC_INITIALIZED)
		return 0;

	if (!port->xmit_buf) {
		/* We may sleep in get_free_page() */
		unsigned long tmp;

		tmp = get_free_page(GFP_KERNEL);

		/* Spinlock? */
		cli ();
		if (port->xmit_buf) 
			free_page (tmp);
		else
			port->xmit_buf = (unsigned char *) tmp;
		restore_flags (flags);

		if (!port->xmit_buf)
			return -ENOMEM;
	}

	cli();

	if (port->tty) 
		clear_bit(TTY_IO_ERROR, &port->tty->flags);

	port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;

	gs_set_termios(port->tty, NULL);

	port->flags |= ASYNC_INITIALIZED;
	port->flags &= ~GS_TX_INTEN;

	restore_flags(flags);
	return 0;
}
Пример #3
0
 * 可能没有自己独立的页目录表(因为创建进程时页目录表也是共享的),因此,
 * 我们还要考虑是否为其中某些进程分配页目录表。二级页表页面同样需考虑。
 * I: vaddr - 发生页保护中断的页面的虚拟地址.
 */
BOOL un_page_wp(unsigned long vaddr)
{
	unsigned long addr, tmp;

	if(vaddr >= V_KERNEL_ZONE_START)	/* 用于调试内核代码 */
		panic("un_page_wp: BAD, write to kernel address space.");
	if(vaddr < current->code_start || vaddr >= current->data_end
		|| (vaddr >= current->code_end && vaddr < current->data_start))
		panic("un_page_wp: out of code or data space limit.");

	/* 进程还没有自己独立的页目录表,则分配 */
	if(1 < mem_map[M2MAP(current->tss.cr3)]) {
		addr = current->tss.cr3;
		if(!copy_page_dir(&(current->tss.cr3))) {
			k_printf("un_page_wp: have no free-page!");
			return FALSE;
		}
		load_cr3(current->tss.cr3);
		--mem_map[M2MAP(addr)];
	}
	addr = ADDR_IN_1PT(current->tss.cr3, vaddr);

	/* 进程还没有自己独立的二级页表页面,则分配 */
	if(1 < mem_map[M2MAP(ALIGN_PAGE(*(unsigned long *)addr))]) {
		/* get_free_page而不是clean,因为接下来会复制整个页面 */
		if(NULL == (tmp=get_free_page())) {
			k_printf("un_page_wp: have no free-page!");
			return FALSE;
		}
		memcpy((char *)tmp, (char *)ALIGN_PAGE(*(unsigned long *)addr),
			PAGE_SIZE);
		--mem_map[M2MAP(ALIGN_PAGE(*(unsigned long *)addr))];
		*(unsigned long *)addr = tmp + P_UWP;
	}
	addr = ALIGN_PAGE(*(unsigned long *)addr);
	addr = ADDR_IN_2PT(addr, vaddr);

	/* 最终页面引用次数为1则设置其可写,否则分配独立页面 */
	if(1 == mem_map[M2MAP(ALIGN_PAGE(*(unsigned long *)addr))]) {
		*(unsigned long *)addr |= 0x00000002;	/* 设置页面可写 */
	} else {
		if(NULL == (tmp=get_free_page())) {
			k_printf("un_page_wp: have no enough memory!");
			return FALSE;
		}
		memcpy((char *)tmp, (char *)ALIGN_PAGE(*(unsigned long *)addr),
			PAGE_SIZE);
		--mem_map[M2MAP(ALIGN_PAGE(*(unsigned long *)addr))];
		*(unsigned long *)addr = tmp + P_UWP;
	}

	/* 刷新TLB */
Пример #4
0
// XXX horrible back door to map a page quickly regardless of translation map object, etc.
// used only during VM setup.
int vm_translation_map_quick_map(kernel_args *ka, addr_t va, addr_t pa, unsigned int attributes, addr_t (*get_free_page)(kernel_args *))
{
	addr_t pgtable_phys;
	unsigned long *pgtable;
	int index;

	TMAP_TRACE("quick_map: va 0x%lx pa 0x%lx, attributes 0x%x\n", va, pa, attributes);

	// look up and dereference the first entry
	pgtable_phys = ka->arch_args.phys_pgdir;
	get_physical_page_tmap(pgtable_phys, (addr_t *)&pgtable, PHYSICAL_PAGE_NO_WAIT);
//	dprintf("phys 0x%lx, virt %p\n", pgtable_phys, pgtable);
	index = PGTABLE0_ENTRY(va);
	ASSERT(PGENT_PRESENT(pgtable[index]));

	// level 2
	pgtable_phys = PGENT_TO_ADDR(pgtable[index]);
	get_physical_page_tmap(pgtable_phys, (addr_t *)&pgtable, PHYSICAL_PAGE_NO_WAIT);
	index = PGTABLE1_ENTRY(va);
	if (!PGENT_PRESENT(pgtable[index])) {
		pgtable_phys = get_free_page(ka);
		pgtable[index] = pgtable_phys | 3;
		TMAP_TRACE("had to allocate level 2: paddr 0x%lx\n", pgtable_phys);
	} else {
		pgtable_phys = PGENT_TO_ADDR(pgtable[index]);
//		dprintf("level 2: paddr 0x%lx\n", pgtable_phys);
	}

	// level 3
	get_physical_page_tmap(pgtable_phys, (addr_t *)&pgtable, PHYSICAL_PAGE_NO_WAIT);
	index = PGTABLE2_ENTRY(va);
	if (!PGENT_PRESENT(pgtable[index])) {
		pgtable_phys = get_free_page(ka);
		pgtable[index] = pgtable_phys | 3;
		TMAP_TRACE("had to allocate level 3: paddr 0x%lx\n", pgtable_phys);
	} else {
		pgtable_phys = PGENT_TO_ADDR(pgtable[index]);
//		dprintf("level 3: paddr 0x%lx\n", pgtable_phys);
	}

	// map the page
	get_physical_page_tmap(pgtable_phys, (addr_t *)&pgtable, PHYSICAL_PAGE_NO_WAIT);
	index = PGTABLE3_ENTRY(va);
	pa = ROUNDOWN(pa, PAGE_SIZE);
	pgtable[index] = pa | ((attributes & LOCK_RW) ? (1<<1) : 0) | ((attributes & LOCK_KERNEL) ? 0 : (1<<2)) | 1;

	return 0;
}
Пример #5
0
/*
 * 该函数获取指定页面并且清0。返回获取的页面的物理地址。
 */
unsigned long get_clean_page(void)
{
	unsigned long addr = get_free_page();
	if(addr)
		zeromem((char *)addr, PAGE_SIZE);
	return addr;
}
Пример #6
0
/*
 * 该函数将页表与内存页面挂接。
 * I: page - 页面起始物理地址;
 *	  vaddr - 页面映射的虚拟线性地址;
 * 	  flag - 页面属性标志.
 * O: 若错误,返回FALSE.
 */
BOOL put_page(unsigned long page, unsigned long vaddr, unsigned long flag)
{
	if(page<buffer_mem_end || page>(high_mem-PAGE_SIZE))
		panic("put_page: page address out of the limit.");
	if(vaddr > (V_USER_ZONE_END-PAGE_SIZE))
		panic("put_page: vaddr out of the limit.");
	if(page & (PAGE_SIZE-1))	/* 内存页面起始地址不是PAGE_SIZE对齐 */
		panic("put_page: page address with wrong alignment.");
	if(vaddr & (PAGE_SIZE-1))	/* 虚拟线性地址不是PAGE_SIZE对齐 */
		panic("put_page: vaddr with wrong alignment.");
	if(flag!=P_URP && flag!=P_UWP && flag!=P_SRP && flag!=P_SWP)
		panic("put_page: ERROR page-flag.");

	/* 将要放置的页目录表项的地址 */
	unsigned long put_addr = ADDR_IN_1PT(current->tss.cr3, vaddr);
	/* 页目录表项中P位是否置位 */
	if(!(*(unsigned long *)put_addr & 1)) {
		unsigned long tmp;
		if(NULL == (tmp = get_free_page()))
			return FALSE;
		*(unsigned long *)put_addr = tmp + P_UWP; /* 用户态,读写 */
	}
	/* 得到二级页表所在页面的首地址 */
	put_addr = ALIGN_PAGE(*(unsigned long *)put_addr);
	/* 得到二级页表项的地址 */
	put_addr = ADDR_IN_2PT(put_addr, vaddr);
	*(unsigned long *)put_addr = page + flag;

	return TRUE;
}
Пример #7
0
void create_kernel_thread(void *function)
{
	unsigned long page;
	struct thread *thread;
	uint32_t esp0;
	uint32_t *esp;

	page = get_free_page(GFP_KERNEL);
	thread = PAGE2PTR(page);
	esp0 = PAGE2BYTE(page + 1);
	esp = (uint32_t *) (esp0 - (6 * sizeof(uint32_t)));

	thread->cr3 = kernel_cr3;
	thread->esp0 = esp0;
	thread->esp = (uint32_t) esp;

	thread->state = THREAD_UNREADY;

	thread->need_reschedule = 0;
	thread->priority = MAX_PRIORITY;
	thread->counter = MAX_PRIORITY;

	esp[0] = 0;			/* ebx */
	esp[1] = 0;			/* edi */
	esp[2] = 0;			/* esi */
	esp[3] = 0;			/* ebp */
	esp[4] = EF_IF;			/* eflags */
	esp[5] = (uint32_t) function;	/* eip */

	add_thread(thread);
	resume_thread(thread);

	return;
}
Пример #8
0
/**
 * Create a kernel task. It's state will be TASK_RUNNABLE and
 * the type will be TASK_KERN.
 * @start: function the task will start at, no arguments and returns void
 */
struct task_struct *ktask_create(void (*start)(void), const char *name) {
    struct task_struct *task;
    uint64_t *stack;

    stack = (uint64_t *)get_free_page(0);
    if(!stack)
        return NULL;

    task = kmalloc(sizeof(*task));
    if(!task)
        goto out_stack;

    memset(task, 0, sizeof(*task));

    task->type = TASK_KERN;
    task->state = TASK_RUNNABLE;
    /* Put the start function on the stack for switch_to  */
    task->first_switch = 1;
    task->foreground = 1; /* all kernel threads can read input */
    stack[510] = (uint64_t)start;
    task->kernel_rsp = (uint64_t)&stack[510];
    task->mm = &kernel_mm;
    kernel_mm.mm_count++;
    task->pid = get_next_pid();
    task_set_cmdline(task, name);
    strcpy(task->cwd, "/"); /* set cwd to root for ktasks */
    task->timeslice = TIMESLICE_BASE;

    task_add_new(task); /* add to run queue and task list */
    return task;

out_stack:
    free_page((uint64_t)stack);
    return NULL;
}
Пример #9
0
void flush_merge_buffer()
{
	UINT32 new_row, new_psn;
	UINT32 new_bank = g_target_bank;

	int i;
	if( g_target_sect != 0 ){
		// get free page from target bank
		new_row = get_free_page(new_bank);

		// set registers to write a data to nand flash memory
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		// Address is merge buffer address which contains actual data
		SETREG(FCP_DMA_ADDR, MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE);
		SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR * g_target_sect);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(new_bank),new_row);
		SETREG(FCP_ROW_H(new_bank),new_row);

		flash_issue_cmd(new_bank,RETURN_ON_ISSUE);
		
		// for lba -> psn mapping information 
		new_psn = new_bank * SECTORS_PER_BANK + new_row * SECTORS_PER_PAGE;
		// Update mapping information
		for(i = 0 ;i < g_target_sect; i++ )
		{
			set_psn( g_merge_buffer_lsn[i],
					new_psn + i );
		}
	}
}
Пример #10
0
struct pdir_ptr_t* init_pdir_ptr(){
	uint64_t page_address = get_free_page() * FOUR_KB_HEXA;
	struct pdir_ptr_t* pdir_ptr = (struct pdir_ptr_t*)(page_address);
	debug("Initializing pdir_ptr at free page:%p\n",(page_address));
	memset(pdir_ptr,0,512*sizeof(struct pdir_ptr_t));//initialze to zero	
	return pdir_ptr;
}
Пример #11
0
/*
 * upon a create, we allocate an empty protocol data, and grab a page to
 * buffer writes
 */
static int
unix_proto_create(struct socket *sock, int protocol)
{
	struct unix_proto_data *upd;

	PRINTK("unix_proto_create: socket 0x%x, proto %d\n", sock, protocol);
	if (protocol != 0) {
		PRINTK("unix_proto_create: protocol != 0\n");
		return -EINVAL;
	}
	if (!(upd = unix_data_alloc())) {
		printk("unix_proto_create: can't allocate buffer\n");
		return -ENOMEM;
	}
	if (!(upd->buf = (char *)get_free_page())) {
		printk("unix_proto_create: can't get page!\n");
		unix_data_deref(upd);
		return -ENOMEM;
	}
	upd->protocol = protocol;
	upd->socket = sock;
	UN_DATA(sock) = upd;
	PRINTK("unix_proto_create: allocated data 0x%x\n", upd);
	return 0;
}
Пример #12
0
static int startup(struct LEON_serial * info)
{
	unsigned long flags;
	
	if (info->flags & S_INITIALIZED)
		return 0;

	if (!info->xmit_buf) {
		info->xmit_buf = (unsigned char *) get_free_page(GFP_KERNEL);
		if (!info->xmit_buf)
			return -ENOMEM;
	}

	save_flags(flags); cli();

	/*
	 * Clear the FIFO buffers and disable them
	 * (they will be reenabled in change_speed())
	 */

	change_speed(info);

	info->xmit_fifo_size = 1;
	leon->uartctrl1 = UCTRL_RE | UCTRL_RI | UCTRL_TE /*| UCTRL_TI*/;
	//leon->uartdata1;

	if (info->tty)
		clear_bit(TTY_IO_ERROR, &info->tty->flags);
	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;

	info->flags |= S_INITIALIZED;
	restore_flags(flags);
	return 0;
}
Пример #13
0
/*
 * page not present ... go through shm_pages
 */
static unsigned long shm_nopage(struct vm_area_struct * shmd, unsigned long address, int no_share)
{
    pte_t pte;
    struct shmid_kernel *shp;
    unsigned int id, idx;

    id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
    idx = (address - shmd->vm_start + shmd->vm_offset) >> PAGE_SHIFT;

#ifdef DEBUG_SHM
    if (id > max_shmid) {
        printk ("shm_nopage: id=%d too big. proc mem corrupted\n", id);
        return 0;
    }
#endif
    shp = shm_segs[id];

#ifdef DEBUG_SHM
    if (shp == IPC_UNUSED || shp == IPC_NOID) {
        printk ("shm_nopage: id=%d invalid. Race.\n", id);
        return 0;
    }
#endif
    /* This can occur on a remap */

    if (idx >= shp->shm_npages) {
        return 0;
    }

    pte = __pte(shp->shm_pages[idx]);
    if (!pte_present(pte)) {
        unsigned long page = get_free_page(GFP_USER);
        if (!page)
            return -1;
        pte = __pte(shp->shm_pages[idx]);
        if (pte_present(pte)) {
            free_page (page); /* doesn't sleep */
            goto done;
        }
        if (!pte_none(pte)) {
            rw_swap_page_nocache(READ, pte_val(pte), (char *)page);
            pte = __pte(shp->shm_pages[idx]);
            if (pte_present(pte))  {
                free_page (page); /* doesn't sleep */
                goto done;
            }
            swap_free(pte_val(pte));
            shm_swp--;
        }
        shm_rss++;
        pte = pte_mkdirty(mk_pte(page, PAGE_SHARED));
        shp->shm_pages[idx] = pte_val(pte);
    } else
        --current->maj_flt;  /* was incremented in do_no_page */

done:	/* pte_val(pte) == shp->shm_pages[idx] */
    current->min_flt++;
    atomic_inc(&mem_map[MAP_NR(pte_page(pte))].count);
    return pte_page(pte);
}
Пример #14
0
void attach_vidc(struct address_info *hw_config)
{
	char name[32];
	int i;

	sprintf(name, "VIDC %d-bit sound", hw_config->card_subtype);
	conf_printf(name, hw_config);

	for (i = 0; i < 2; i++)
	{
		dma_buf[i] = get_free_page(GFP_KERNEL);
		dma_pbuf[i] = virt_to_phys(dma_buf[i]);
	}

	if (sound_alloc_dma(hw_config->dma, "VIDCsound"))
	{
		printk(KERN_ERR "VIDCsound: can't allocate virtual DMA channel\n");
		return;
	}
	if (request_irq(hw_config->irq, vidc_sound_dma_irq, 0, "VIDCsound", &dma_start))
	{
		printk(KERN_ERR "VIDCsound: can't allocate DMA interrupt\n");
		return;
	}
//	vidc_synth_init(hw_config);
	vidc_audio_init(hw_config);
	vidc_mixer_init(hw_config);
}
Пример #15
0
/*
 * Request a page from the page allocator and tokenize
 * it to buffers of the given bucket's buffer sizes.
 *
 * BIG-FAT-NOTE! Call with bucket lock held
 */
static void *get_tokenized_page(int bucket_idx)
{
	struct page *page;
	char *buf, *start, *end;
	int buf_len;

	page = get_free_page(ZONE_ANY);
	page->in_bucket = 1;
	page->bucket_idx = bucket_idx;

	start = page_address(page);
	end = start + PAGE_SIZE;

	buf = start;
	buf_len = 1 << bucket_idx;
	while (buf < (end - buf_len)) {
		*(void **)buf = buf + buf_len;
		sign_buf(buf, FREEBUF_SIG);
		buf += buf_len;
	}
	*(void **)buf = NULL;
	sign_buf(buf, FREEBUF_SIG);

	return start;
}
Пример #16
0
static void expand(u32int new_size, heap_t *heap)
{
	ASSERT(new_size > heap->end_address - heap->start_address);

	if (new_size & 0xfffff000)
	{
		new_size &= 0xfffff000;
		new_size += 0x1000;
	}

	ASSERT(heap->start_address + new_size <= heap->max_address);

	u32int old_size = heap->end_address - heap->start_address;

	u32int i = old_size;
	while (i < new_size)
	{
		if (!get_page_addr(heap->start_address + i))
			put_page((u32int)get_free_page(), heap->start_address + i);

		i += 0x1000;
	}

	heap->end_address = heap->start_address + new_size;
}
Пример #17
0
void swap_in(unsigned long *table_ptr)
{
	int swap_nr;
	unsigned long page;

	if (!swap_bitmap) {
		printk("Trying to swap in without swap bit-map");
		return;
	}
	if (1 & *table_ptr) {
		printk("trying to swap in present page\n\r");
		return;
	}
	swap_nr = *table_ptr >> 1;
	if (!swap_nr) {
		printk("No swap page in swap_in\n\r");
		return;
	}
	if (!(page = get_free_page()))
		oom();
	read_swap_page(swap_nr, (char *) page);
	if (setbit(swap_bitmap,swap_nr))
		printk("swapping in multiply from same page\n\r");
	*table_ptr = page | (PAGE_DIRTY | 7);
}
Пример #18
0
static inline unsigned char *alloc_buf(int prio)
{
	if (PAGE_SIZE != N_TTY_BUF_SIZE)
		return (unsigned char *)kmalloc(N_TTY_BUF_SIZE, prio);
	else
		return (unsigned char *)get_free_page(prio);
}
Пример #19
0
struct pml4_t* init_pml4(){
	uint64_t page_address = get_free_page() * FOUR_KB_HEXA;
	struct pml4_t* pml4 = (struct pml4_t*)(page_address);
	debug("Initializing pml14 at free page:%p\n",(page_address));
	memset(pml4,0,512*sizeof(struct pml4_t));//initialze to zero	
	return pml4;
}
Пример #20
0
/* *
 * usbd_proc_read - implement proc file system read.
 * @file
 * @buf
 * @count
 * @pos
 *
 * Standard proc file system read function.
 */
static ssize_t usbd_proc_read (struct file *file, char *buf, size_t count, loff_t * pos)
{
        struct usb_device_instance *device;
	unsigned long page;
	int len = 0;
	int index;

	MOD_INC_USE_COUNT;
	// get a page, max 4095 bytes of data...
	if (!(page = get_free_page (GFP_KERNEL))) {
		MOD_DEC_USE_COUNT;
		return -ENOMEM;
	}

	len = 0;
	index = (*pos)++;

	switch (index) {
        case 0:
		len += sprintf ((char *) page + len, "USBD Status\n");
                break;

        case 1:
		len += sprintf ((char *) page + len, "Cable: %s\n", udc_connected ()? "Plugged" : "Unplugged");
                break;

        case 2:
                if ((device = device_array[0])) {
                        struct usb_function_instance * function_instance;
                        struct usb_bus_instance * bus;

                        len += sprintf ((char *) page + len, "Device status: %s\n", USBD_DEVICE_STATUS(device->status));
                        len += sprintf ((char *) page + len, "Device state: %s\n", USBD_DEVICE_STATE(device->device_state));

                        if ((function_instance = device->function_instance_array+0)) {
                                len += sprintf ((char *) page + len, "Function: %s\n", 
                                                function_instance->function_driver->name);
                        }
                        if ((bus= device->bus)) {
                                len += sprintf ((char *) page + len, "Bus interface: %s\n", 
                                                bus->driver->name);
                        }
                }
                break;

        default:
                break;
        }


	if (len > count) {
		len = -EINVAL;
	} else if (len > 0 && copy_to_user (buf, (char *) page, len)) {
		len = -EFAULT;
	}
	free_page (page);
	MOD_DEC_USE_COUNT;
	return len;
}
Пример #21
0
/* this routine handles present pages, when users try to write
   to a shared page.
   */
void do_wp_page(struct vm_area_struct *vma, unsigned long address, int write_access)
{
    pgd_t *pgd;
    pmd_t *pmd;
    pte_t *page_table,pte;
    unsigned long old_page, new_page;

    new_page = get_free_page(GFP_KERNEL);

    pgd = pgd_offset(vma->vm_task, address);
    if(pgd_none(*pgd))
        goto end_wp_page;
    if(pgd_bad(*pgd))
        goto bad_wp_page;
    pmd = pmd_offset(pgd,address);
    if(pmd_none(*pmd))
        goto end_wp_page;
    if(pmd_bad(*pmd))
        goto bad_wp_page;
    page_table = pte_offset(pmd,address);
    pte = *page_table;
    if(!pte_present(pte))
        goto end_wp_page;
    if(pte_write(pte))
        goto  end_wp_page;
    old_page = pte_page(pte);
    if(old_page >= main_memory_end)
        goto bad_wp_page;

    (vma->vm_task->mm->min_flt)++;

    if(mem_map[MAP_NR(old_page)].flags & PAGE_PRESENT)
    {
        if(new_page)
        {
            if(mem_map[MAP_NR(old_page)].flags & MAP_PAGE_RESERVED)
                ++(vma->vm_task->mm->rss);
            copy_page(old_page, new_page);
            *page_table = pte_mkwrite(pte_mkdirty(mk_pte((unsigned long)&new_page, vma->vm_page_prot)));
            free_page(old_page);
            return;
        }
        pte_val(*page_table) &= PAGE_BAD;
        free_page(old_page);
        oom();
        return;
    }
    *page_table = pte_mkdirty(pte_mkwrite(pte));
    if(new_page)
        free_page(new_page);
    return;
bad_wp_page:
    printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
    goto end_wp_page;
end_wp_page:
    if(new_page)
        free_page(new_page);
    return;
}
Пример #22
0
int init_module(void){
    pagemem = get_free_page(0);
    printk("<1> pagemem=%s",pagemem);
    kmallocmem = kmalloc(100,0);
    printk("<1> kmallocmem=%s",kmallocmem);
    vmallocmem = vmalloc(1000000);
    printk("<1> vmallocmem=%s",vmallocmem);
}
Пример #23
0
uint64_t create_empty_page(uint64_t address,uint64_t cr3_content){
	address = align_4KB(address);
	uint64_t physical_addr = get_free_page() - VIRTUAL_BASE_ADDRESS;
//	print("[create_empty_page] mapping = %x to %x\n",address,physical_addr);
	createPageTableEntry(address,physical_addr,cr3_content);
	//		//	check_page_mapping(address,physical_addr,cr3_content);
	return physical_addr;			
}			
Пример #24
0
/*
 * 该函数获取一个空闲页面并且放入页表。返回获取的页面的物理地址。
 */
unsigned long get_mapped_free_page(unsigned long vaddr, unsigned long flag)
{
	unsigned long addr = get_free_page();
	if(addr && !put_page(addr, vaddr, flag))
		return NULL;
	else
		return addr;
}
Пример #25
0
int sys_swapon(const char * specialfile)
{
	struct inode * swap_inode;
	int i,j;

	if (!suser())
		return -EPERM;
	if (!(swap_inode  = namei(specialfile)))
		return -ENOENT;
	if (swap_file || swap_device || swap_bitmap) {
		iput(swap_inode);
		return -EBUSY;
	}
	if (S_ISBLK(swap_inode->i_mode)) {
		swap_device = swap_inode->i_rdev;
		iput(swap_inode);
	} else if (S_ISREG(swap_inode->i_mode))
		swap_file = swap_inode;
	else {
		iput(swap_inode);
		return -EINVAL;
	}
	swap_bitmap = (char *) get_free_page();
	if (!swap_bitmap) {
		iput(swap_file);
		swap_device = 0;
		swap_file = NULL;
		printk("Unable to start swapping: out of memory :-)\n");
		return -ENOMEM;
	}
	read_swap_page(0,swap_bitmap);
	if (strncmp("SWAP-SPACE",swap_bitmap+4086,10)) {
		printk("Unable to find swap-space signature\n\r");
		free_page((long) swap_bitmap);
		iput(swap_file);
		swap_device = 0;
		swap_file = NULL;
		swap_bitmap = NULL;
		return -EINVAL;
	}
	memset(swap_bitmap+4086,0,10);
	j = 0;
	for (i = 1 ; i < SWAP_BITS ; i++)
		if (bit(swap_bitmap,i))
			j++;
	if (!j) {
		printk("Empty swap-file\n");
		free_page((long) swap_bitmap);
		iput(swap_file);
		swap_device = 0;
		swap_file = NULL;
		swap_bitmap = NULL;
		return -EINVAL;
	}
	printk("Adding Swap: %d pages (%d bytes) swap-space\n\r",j,j*4096);
	return 0;
}
Пример #26
0
void *malloc(unsigned int len)
{
	struct _bucket_dir	*bdir;
	struct bucket_desc	*bdesc;
	void			*retval;

	/*
	 * First we search the bucket_dir to find the right bucket change
	 * for this request.
	 */
	for (bdir = bucket_dir; bdir->size; bdir++)
		if (bdir->size >= len)
			break;
	if (!bdir->size) {
		printk("malloc called with impossibly large argument (%d)\n",
			len);
		panic("malloc: bad arg");
	}
	/*
	 * Now we search for a bucket descriptor which has free space
	 */
	cli();	/* Avoid race conditions */
	for (bdesc = bdir->chain; bdesc; bdesc = bdesc->next) 
		if (bdesc->freeptr)
			break;
	/*
	 * If we didn't find a bucket with free space, then we'll 
	 * allocate a new one.
	 */
	if (!bdesc) {
		char		*cp;
		int		i;

		if (!free_bucket_desc)	
			init_bucket_desc();
		bdesc = free_bucket_desc;
		free_bucket_desc = bdesc->next;
		bdesc->refcnt = 0;
		bdesc->bucket_size = bdir->size;
		bdesc->page = bdesc->freeptr = (void *) (cp = (char *) get_free_page());
		if (!cp)
			panic("Out of memory in kernel malloc()");
		/* Set up the chain of free objects */
		for (i=PAGE_SIZE/bdir->size; i > 1; i--) {
			*((char **) cp) = cp + bdir->size;
			cp += bdir->size;
		}
		*((char **) cp) = 0;
		bdesc->next = bdir->chain; /* OK, link it in! */
		bdir->chain = bdesc;
	}
	retval = (void *) bdesc->freeptr;
	bdesc->freeptr = *((void **) retval);
	bdesc->refcnt++;
	sti();	/* OK, we're safe again */
	return(retval);
}
Пример #27
0
/*
 * Try to increase the number of buffers available: the size argument
 * is used to determine what kind of buffers we want. Currently only
 * 1024-byte buffers are supported by the rest of the system, but I
 * think this will change eventually.
 */
void grow_buffers(int size)
{
	unsigned long page;
	int i;
	struct buffer_head *bh, *tmp;

	if ((size & 511) || (size > 4096)) {
		printk("grow_buffers: size = %d\n",size);
		return;
	}
	page = get_free_page(GFP_BUFFER);
	if (!page)
		return;
	tmp = NULL;
	i = 0;
	for (i = 0 ; i+size <= 4096 ; i += size) {
		bh = get_unused_buffer_head();
		if (!bh)
			goto no_grow;
		bh->b_this_page = tmp;
		tmp = bh;
		bh->b_data = (char * ) (page+i);
		bh->b_size = size;
	}
	tmp = bh;
	while (1) {
		if (free_list) {
			tmp->b_next_free = free_list;
			tmp->b_prev_free = free_list->b_prev_free;
			free_list->b_prev_free->b_next_free = tmp;
			free_list->b_prev_free = tmp;
		} else {
			tmp->b_prev_free = tmp;
			tmp->b_next_free = tmp;
		}
		free_list = tmp;
		++nr_buffers;
		if (tmp->b_this_page)
			tmp = tmp->b_this_page;
		else
			break;
	}
	tmp->b_this_page = bh;
	return;
/*
 * In case anything failed, we just free everything we got.
 */
no_grow:
	bh = tmp;
	while (bh) {
		tmp = bh;
		bh = bh->b_this_page;
		put_unused_buffer_head(tmp);
	}	
	free_page(page);
}
Пример #28
0
/*
 * 'copy_string()' copies argument/envelope strings from user
 * memory to free pages in kernel mem. These are in a format ready
 * to be put directly into the top of new user memory.
 *
 * Modified by TYT, 11/24/91 to add the from_kmem argument, which specifies
 * whether the string and the string array are from user or kernel segments:
 * 
 * from_kmem     argv *        argv **
 *    0          user space    user space
 *    1          kernel space  user space
 *    2          kernel space  kernel space
 * 
 * We do this by playing games with the fs segment register.  Since it
 * is expensive to load a segment register, we try to avoid calling
 * set_fs() unless we absolutely have to.
 */
unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
		unsigned long p, int from_kmem)
{
	char *tmp, *pag = NULL;
	int len, offset = 0;
	unsigned long old_fs, new_fs;

	if ((long)p <= 0)
		return p;	/* bullet-proofing */
	new_fs = get_ds();
	old_fs = get_fs();
	if (from_kmem==2)
		set_fs(new_fs);
	while (argc-- > 0) {
		if (from_kmem == 1)
			set_fs(new_fs);
		if (!(tmp = get_user(argv+argc)))
			panic("VFS: argc is wrong");
		if (from_kmem == 1)
			set_fs(old_fs);
		len = count(tmp, 1, p);
		if (len < 0 || len >= p) {	/* EFAULT or E2BIG */
			set_fs(old_fs);
			return len < 0 ? len : -E2BIG;
		}
		tmp += ++len;
		while (len) {
			--p; --tmp; --len;
			if (--offset < 0) {
				offset = p % PAGE_SIZE;
				if (from_kmem==2)
					set_fs(old_fs);
				if (!(pag = (char *) page[p/PAGE_SIZE]) &&
				    !(pag = (char *) page[p/PAGE_SIZE] =
				      (unsigned long *) get_free_page(GFP_USER))) 
					return -EFAULT;
				if (from_kmem==2)
					set_fs(new_fs);

			}
			if (len == 0 || offset == 0)
			  *(pag + offset) = get_user(tmp);
			else {
			  int bytes_to_copy = (len > offset) ? offset : len;
			  tmp -= bytes_to_copy;
			  p -= bytes_to_copy;
			  offset -= bytes_to_copy;
			  len -= bytes_to_copy;
			  memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
			}
		}
	}
	if (from_kmem==2)
		set_fs(old_fs);
	return p;
}
Пример #29
0
/**
 * Return a copy of the current task.
 */
struct task_struct *fork_curr_task(void) {
    struct task_struct *task;
    uint64_t *kstack, *curr_kstack;
    int i;

    kstack = (uint64_t *)get_free_page(0);
    if(!kstack)
        return NULL;

    task = kmalloc(sizeof(*task));
    if(!task)
        goto out_stack;

    /* Half the remaining timeslice (split between parent and child) */
    curr_task->timeslice >>= 1;

    memcpy(task, curr_task, sizeof(*task));     /* Exact copy of parent */

    /* deep copy the current mm */
    task->mm = mm_deep_copy();
    if(task->mm == NULL)
        goto out_task;

    /* Copy the curr_task's kstack */
    curr_kstack = (uint64_t *)ALIGN_DOWN(read_rsp(), PAGE_SIZE);
    memcpy(kstack, curr_kstack, PAGE_SIZE);
    task->kernel_rsp = (uint64_t)&kstack[510];  /* new kernel stack */
    task->pid = get_next_pid();                 /* new pid */
    task->parent = curr_task;                   /* new parent */
    task->chld = task->sib = NULL;              /* no children/siblings yet */
    task->next_task = task->prev_task = task->next_rq = NULL;

    /* Increment reference counts on any open files */
    for(i = 0; i < TASK_FILES_MAX; i++) {
        struct file *fp = task->files[i];
        if(fp) {
            fp->f_count++;
        }
    }

    /* Add this new child to the parent */
    add_child(curr_task, task);

    /* TODO: Here we steal our parent's foreground status */
    if(curr_task->pid > 2)
        curr_task->foreground = 0;/* change to 1; to let all tasks read */

    task_add_new(task); /* add to run queue and task list */

    return task;
out_task:
    kfree(task);
out_stack:
    free_page((uint64_t)kstack);
    return NULL;
}
Пример #30
0
/*
 * 'copy_string()' copies argument/envelope strings from user
 * memory to free pages in kernel mem. These are in a format ready
 * to be put directly into the top of new user memory.
 *
 * Modified by TYT, 11/24/91 to add the from_kmem argument, which specifies
 * whether the string and the string array are from user or kernel segments:
 * 
 * from_kmem     argv *        argv **
 *    0          user space    user space
 *    1          kernel space  user space
 *    2          kernel space  kernel space
 * 
 * We do this by playing games with the fs segment register.  Since it
 * is expensive to load a segment register, we try to avoid calling
 * set_fs() unless we absolutely have to.
 */
unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
		unsigned long p, int from_kmem)
{
	char *tmp, *tmp1, *pag = NULL;
	int len, offset = 0;
	unsigned long old_fs, new_fs;

	if (!p)
		return 0;	/* bullet-proofing */
	new_fs = get_ds();
	old_fs = get_fs();
	if (from_kmem==2)
		set_fs(new_fs);
	while (argc-- > 0) {
		if (from_kmem == 1)
			set_fs(new_fs);
		if (!(tmp1 = tmp = get_user(argv+argc)))
			panic("VFS: argc is wrong");
		if (from_kmem == 1)
			set_fs(old_fs);
		while (get_user(tmp++));
		len = tmp - tmp1;
		if (p < len) {	/* this shouldn't happen - 128kB */
			set_fs(old_fs);
			return 0;
		}
		while (len) {
			--p; --tmp; --len;
			if (--offset < 0) {
				offset = p % PAGE_SIZE;
				if (from_kmem==2)
					set_fs(old_fs);
				if (!(pag = (char *) page[p/PAGE_SIZE]) &&
				    !(pag = (char *) page[p/PAGE_SIZE] =
				      (unsigned long *) get_free_page(GFP_USER))) 
					return 0;
				if (from_kmem==2)
					set_fs(new_fs);

			}
			if (len == 0 || offset == 0)
			  *(pag + offset) = get_user(tmp);
			else {
			  int bytes_to_copy = (len > offset) ? offset : len;
			  tmp -= bytes_to_copy;
			  p -= bytes_to_copy;
			  offset -= bytes_to_copy;
			  len -= bytes_to_copy;
			  memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
			}
		}
	}
	if (from_kmem==2)
		set_fs(old_fs);
	return p;
}