Esempio n. 1
0
static inline struct page_descriptor *get_kmalloc_pages( unsigned long priority, unsigned long order, int dma )
{
	uint32 nPtr;
	int nRetryCount = 0;

	for ( ;; )
	{
		nPtr = get_free_pages(  /* priority, */ ( 1 << order ), dma | MEMF_CLEAR );
		if ( ( priority & MEMF_NOBLOCK ) || nPtr != 0 )
		{
			if ( nPtr != 0 )
			{
				atomic_add( &g_sSysBase.ex_nKernelMemPages, ( 1 << order ) );
			}
			break;
		}
		if ( shrink_caches( ( 1 << order ) * PAGE_SIZE ) == 0 && ( priority & MEMF_OKTOFAIL ) )
		{
			break;
		}
		if ( nRetryCount++ > 100 )
		{
			if ( nRetryCount % 10 == 0 )
			{
				printk( "get_kmalloc_pages( %d ) retried %d times\n", ( 1 << order ), nRetryCount );
			}
		}
	}

	return ( ( struct page_descriptor * )nPtr );
}
Esempio n. 2
0
File: shm.c Progetto: WareX97/aPlus
static void* shm_map(shm_node_t* node) {
    void* p = (void*) get_free_pages(node->size / PAGE_SIZE, 0, 0);
    KASSERT(p);

    map_page((virtaddr_t) p, node->physaddr, node->size);
    return p;
}
Esempio n. 3
0
unsigned long __get_free_pages(const int priority, const int order)
{
    if(priority == GFP_ATOMIC)
        return get_free_pages(order);

    return 0;
}
Esempio n. 4
0
int register_hash_table(u32 rows, u32 (*hash_fn)(u32 key))
{
    struct hash_table_s *hash_table;
    struct hash_entry_s *hash_entry;
    int page_order = 0;  
    int hashd;  
    int i;

    hashd = get_empty_hash_table();
    if (hashd == -1)
	return -EAGAIN;

    for (i = 0; i < MAX_HASH_ORDER; i++) {
        if (rows < (((1 << i) * PAGE_SIZE) / sizeof(struct hash_entry_s))) {
	    page_order = i;
	    break;
	}
    }

    if (i == MAX_HASH_ORDER)
	return -EINVAL;

    /* this will be done by slab allocator, but we have no it :( */
    hash_table = &hash_tables[hashd];
    hash_entry = (struct hash_entry_s *)get_free_pages(page_order);
    
    if (!hash_table) {
	put_empty_hash_table(hashd);
	return -ENOMEM;
    }

    if (hash_fn)
	hash_table->hash_fn = hash_fn;
    else
	hash_table->hash_fn = hash_fn_default;

    hash_table->max_entries = ((1 << page_order) * PAGE_SIZE) / sizeof(struct hash_entry_s);
    hash_table->cur_entries = 0;
    hash_table->page_order  = page_order;
    hash_table->hashd       = hashd;        
    hash_table->hash_entry  = hash_entry;
    memset((void *)(hash_table->hash_entry), 0xFF, (1 << page_order) * PAGE_SIZE);
//	   hash_table->max_entries * sizeof(struct hash_entry_s));

    printk(MOD_NAME "registering hashing table (%d rows/%d pages)\n", hash_table->max_entries, page_order);

    spin_lock_init(&hash_table->hash_lock);
    
    return hashd;
}
Esempio n. 5
0
void plat_boot(void){
	int i;
	for(i=0;init[i];i++){
		init[i]();
	}
	init_sys_mmu();
	start_mmu();
	test_mmu();
	test_printk();
//	timer_init();
	init_page_map();
	char *p1,*p2,*p3,*p4;
	p1=(char *)get_free_pages(0,6);
	printk("the return address of get_free_pages %x\n",p1);
	p2=(char *)get_free_pages(0,6);
	printk("the return address of get_free_pages %x\n",p2);
	put_free_pages(p2,6);
	put_free_pages(p1,6);
	p3=(char *)get_free_pages(0,7);
	printk("the return address of get_free_pages %x\n",p3);
	p4=(char *)get_free_pages(0,7);
	printk("the return address of get_free_pages %x\n",p4);
	while(1);
}
Esempio n. 6
0
/**
 * Init buffer for kmalloc.
 * Get some pages and init linked list.
 * @return if returns -1 we don't have enough pages. 0 is success.
 */
int init_kmalloc_area(void)
{
	unsigned long n;

	kmalloc_area = (char *) get_free_pages(32);
	if (!kmalloc_area)
		return -1;

	memset(kmalloc_area, 0, KMALLOC_MEMORY_SIZE);

	n = (unsigned long) kmalloc_area;
	base = (struct kmalloc_header *) n;

	base->size = KMALLOC_MEMORY_SIZE;
	base->prev = 0;
	next_free_area = base;

//	printk("base address is 0x%x\n", base);
	return 0;
}
Esempio n. 7
0
/* alloc slab pages default for 4 pages */
static inline void * slab_alloc(struct kmem_cache_t *cachep)
{
    int i = 0;
    uint64_t start_addr= 0, address = 0;
    uint32_t index = 0;

    if(cachep == NULL)
        return NULL;

    start_addr = get_free_pages(DEFAULT_SLAB_PAGES);
    address = start_addr;
    /*printf("slab address = %x.\n",start_addr);*/

    for(i = 0;i < DEFAULT_SLAB_PAGES;++i)
    {
        index = (start_addr - start_page_mem) >> PAGE_SHIFT;
        /*printf("index = %d.\n",index);*/
        mem_map[index]->lru.next = (void *)cachep;
    }

    return (void *)address;
}
Esempio n. 8
0
/* alloc slab pages default for 4 pages */
static inline void * slab_alloc(kmem_cache_t *cachep)
{
    if(cachep == NULL)
    {
        return NULL;
    }

    int i = 0;
    addr_t start_addr = 0, address = 0;
    struct page *page = NULL;

    start_addr = page_address(get_free_pages(MIGRATE_RESERVE, DEFAULT_SLAB_PAGES));
    address = start_addr;

    for(i = 0; i < DEFAULT_SLAB_PAGES; ++i)
    {
        page = pfn_to_page(start_addr);
        page->slab_list.next = (void *)cachep;
        start_addr += PAGE_SIZE;
    }

    return (void *)address;
}
Esempio n. 9
0
int32 get_free_page( int nFlags )
{
	return ( get_free_pages( 1, nFlags ) );
}
Esempio n. 10
0
void *get_free_page(int flags) {

  return get_free_pages(1, flags);
}
Esempio n. 11
0
asmlinkage int sys_fork(struct pt_regs regs) {
    struct task_struct *tsk;
    int nr;
    long flags;
    unsigned long used_memory = 0;

    save_flags(flags); cli();

    // find a free entry in the process table
    nr = find_empty_process();

    if (nr < 0) {
        printk("fork: pids not available at the moment!\n");
        goto fork_no_entry;
    }

    // allocate a page for the process descriptor
    tsk = (struct task_struct *) get_free_page();

    if (!tsk) {
        goto fork_no_mem;
    }

    // copy descriptor: pid and counter will contain different values for
    // father and child
    *tsk = *current;
    tsk->pid = nr;
    tsk->counter = tsk->priority;

    // if we are forking the idle process, we assume its child will call an
    // exec just after the fork. In this case we do not duplicate code/data.
    // If we are forking whatever process, so it is necessary allocate a
    // page for it (containing code and data) e setup its LDT to the new
    // address space
    if (current->pid != 0) {
        // allocate memory for code/data
        tsk->mem = (char *) get_free_pages(current->used_pages);

        if (!tsk->mem) {
            goto fork_data_no_mem;
        }

        // total memory used by current process
        used_memory = current->used_pages * PAGE_SIZE;

        // copy process data
        memcpy(tsk->mem, current->mem, used_memory);

        // set up LDT
        set_code_desc(&(tsk->ldt[1]), (u_long) tsk->mem, used_memory);
        set_data_desc(&(tsk->ldt[2]), (u_long) tsk->mem, used_memory);
    }

    // setup TSS
    tsk->tss.back_link = 0;
    tsk->tss.esp0 = PAGE_SIZE + (unsigned long) tsk;
    tsk->tss.ss0 = KERNEL_DS;

    tsk->tss.esp1 = 0;
    tsk->tss.ss1 = 0;
    tsk->tss.esp2 = 0;
    tsk->tss.ss2 = 0;
    tsk->tss.cr3 = 0;

    tsk->tss.eip = regs.eip;
    tsk->tss.eflags = regs.eflags;

    tsk->tss.eax = 0;
    tsk->tss.ecx = regs.ecx;
    tsk->tss.edx = regs.edx;
    tsk->tss.ebx = regs.ebx;

    tsk->tss.esp = regs.esp;
    tsk->tss.ebp = regs.ebp;
    tsk->tss.esi = regs.esi;
    tsk->tss.edi = regs.edi;

    tsk->tss.es = regs.xes & 0xffff;
    tsk->tss.cs = regs.xcs & 0xffff;
    tsk->tss.ss = regs.xss & 0xffff;
    tsk->tss.ds = regs.xds & 0xffff;

    // it is not necessary set FS and GS

    tsk->tss.ldt = _LDT(nr);
    tsk->tss.trace = 0;
    tsk->tss.bitmap = 0xDFFF;
    tsk->tss.tr = _TSS(nr);

    set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY, &(tsk->tss));
    set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY, &(tsk->ldt), 3);
    task[nr] = tsk;

    restore_flags(flags);
    return nr;

fork_data_no_mem:
    free_page(tsk);
fork_no_mem:
    restore_flags(flags);
    return -ENOMEM;
fork_no_entry:
    restore_flags(flags);
    return -EAGAIN;
}
Esempio n. 12
0
unsigned long alloc_mem(const size_t size)
{
    int order = power(size>>PAGE_SHIFT);
    return get_free_pages(order);
}