Esempio n. 1
0
static void
zfs_init(void)
{
	/*
	 * Initialize our context globals
	 */
	zfs_context_init();

	/*
	 * Initialize slab allocator and taskq layers
	 */
	kmem_init();

	/*
	 * Initialize .zfs directory structures
	 */
#if 0
	zfsctl_init();
#endif
	/*
	 * Initialize znode cache, vnode ops, etc...
	 */
	zfs_znode_init();

	/*
	 * Initialize /dev/zfs
	 */
	zfs_ioctl_init();
}
Esempio n. 2
0
void kmain (void)
{
    cpu = &cpus[0];

    uart_init (P2V(UART0));

    init_vmm ();
    kpt_freerange (align_up(&end, PT_SZ), P2V_WO(INIT_KERNMAP));
    paging_init (INIT_KERNMAP, PHYSTOP);

    kmem_init ();
    kmem_init2(P2V(INIT_KERNMAP), P2V(PHYSTOP));

    trap_init ();				// vector table and stacks for models
   
    gic_init(P2V(VIC_BASE));			// arm v2 gic init
    uart_enable_rx ();				// interrupt for uart
    consoleinit ();				// console
    pinit ();					// process (locks)

    binit ();					// buffer cache
    fileinit ();				// file table
    iinit ();					// inode cache
    ideinit ();					// ide (memory block device)

#ifdef INCLUDE_REMOVED
    timer_init (HZ);				// the timer (ticker)
#endif

    sti ();
    userinit();					// first user process
    scheduler();				// start running processes
}
Esempio n. 3
0
void vm_mem_bootstrap(void)
{
	vm_offset_t	start, end;

	/*
	 *	Initializes resident memory structures.
	 *	From here on, all physical memory is accounted for,
	 *	and we use only virtual addresses.
	 */

	vm_page_bootstrap(&start, &end);

	/*
	 *	Initialize other VM packages
	 */

	slab_bootstrap();
	vm_object_bootstrap();
	vm_map_init();
	kmem_init(start, end);
	pmap_init();
	slab_init();
	kalloc_init();
	vm_fault_init();
	vm_page_module_init();
	memory_manager_default_init();
}
Esempio n. 4
0
File: kinit.c Progetto: Zeke-OS/zeke
/**
 * Run all kernel module initializers.
 */
void exec_init_array(void)
{
    extern void dyndebug_early_boot_init(void);
    extern void kmem_init(void);
    extern void dynmem_init(void);
    extern void vralloc_init(void);
    int n;

#ifdef configDYNDEBUG
    dyndebug_early_boot_init();
#endif

    kputs("\n\nZeKe PreInit\n");
    n = __hw_preinit_array_end - __hw_preinit_array_start;
    exec_array(__hw_preinit_array_start, n);

    /*
     * Memory allocator initializers.
     */
    kmem_init();
    dynmem_init();
    vralloc_init();

    kputs("SubsysInit\n");
    n  = __init_array_end - __init_array_start;
    exec_array(__init_array_start, n);

    kputs("PostInit\n");
    disable_interrupt();
    n = __hw_postinit_array_end - __hw_postinit_array_start;
    exec_array(__hw_postinit_array_start, n);
    enable_interrupt();
}
Esempio n. 5
0
void mm_init() {

    /* Initialize physical memory: */
    pmem_init();

    /* Initialize kernel memory: */
    kmem_init();

#if 0
    /* Output statistics: */
    printk("\n");
    printk("Physical Memory Manager Statistics: \n");
    printk("====================================\n");
    printk("kernel: start: %x, end: %x\n",
            KERNEL_PHYSICAL_START, KERNEL_PHYSICAL_END);
    printk("Accessible RAM Size (Approximate): %dMB.\n",
            ((pmem_usable_pages + 0x100 - 1) & 0xFFFFFF00)>>8);

    printk("\n");
    printk("Kernel Space Memory:\n");
    printk("=====================\n");
    printk("0x%x:0x%x - Kernel image.\n",
            KERNEL_VIRTUAL_START, KERNEL_VIRTUAL_END);
    printk("0x%x:0x%x - Lower memory image.\n",
            LOWMEM_VIRTUAL_START, LOWMEM_VIRTUAL_END);
#endif

}
Esempio n. 6
0
void
vm_mem_bootstrap(void)
{
	vm_offset_t	start, end;

	/*
	 *	Initializes resident memory structures.
	 *	From here on, all physical memory is accounted for,
	 *	and we use only virtual addresses.
	 */

	vm_page_bootstrap(&start, &end);

	/*
	 *	Initialize other VM packages
	 */

	zone_bootstrap();
	vm_object_bootstrap();
	vm_map_init();
	kmem_init(start, end);
	pmap_init();
	zone_init((vm_size_t)ptoa(vm_page_free_count));
	kalloc_init();
#if	MACH_RT
	rtalloc_init();
#endif	/* MACH_RT */
	vm_fault_init();
	vm_page_module_init();
	memory_manager_default_init();
}
Esempio n. 7
0
/*
 * Initialization code.
 *
 * Called from kernel_start() routine that is
 * implemented in HAL.
 * We assume that the following machine state has
 * been already set before this routine.
 *	- Kernel BSS section is filled with 0.
 *	- Kernel stack is configured.
 *	- All interrupts are disabled.
 *	- Minimum page table is set. (MMU systems only)
 */
int
main(void)
{

	sched_lock();
	diag_init();
	DPRINTF((BANNER));

	/*
	 * Initialize memory managers.
	 */
	page_init();
	kmem_init();

	/*
	 * Do machine-dependent
	 * initialization.
	 */
	machine_startup();

	/*
	 * Initialize kernel core.
	 */
	vm_init();
	task_init();
	thread_init();
	sched_init();
	exception_init();
	timer_init();
	object_init();
	msg_init();

	/*
	 * Enable interrupt and
	 * initialize devices.
	 */
	irq_init();
	clock_init();
	device_init();

	/*
	 * Set up boot tasks.
	 */
	task_bootstrap();

	/*
	 * Start scheduler and
	 * enter idle loop.
	 */
	sched_unlock();
	thread_idle();

	/* NOTREACHED */
	return 0;
}
Esempio n. 8
0
/* ARGSUSED*/
static void
vm_mem_init(void *dummy)
{
	/*
	 * Initializes resident memory structures. From here on, all physical
	 * memory is accounted for, and we use only virtual addresses.
	 */
	vm_set_page_size();
	vm_page_startup();

	/*
	 * Initialize other VM packages
	 */
	vm_object_init1();
	vm_map_startup();
	kmem_init();
	pmap_init();
}
Esempio n. 9
0
/*
	ok, this is pman init stage two. we will execute this code, and then jump to the process 
	manager main processing loop.
	
	What we will do here, is setup the page pool. And initialize System services, along with structures.
	Notice, we are now task 0 on the system.
*/	
void pman_init_stage2()
{
	UINT32 linear, physical; 
	struct pm_thread *pmthr = NULL;
	struct pm_task *pmtsk = NULL;
	int i = 0;
    int init_size = 0;
    
	/* get rid of the init stuff */
	destroy_thread(INIT_THREAD_NUM);
	destroy_task(INIT_TASK_NUM);
	
	/*
	Open used ports
	*/
	for(i = 0; i <= 12; i++)
	{
		open_port(i, 3, PRIV_LEVEL_ONLY);
	}
	
	/* 
		Init stage 1 has placed bootinfo at PMAN_MULTIBOOTINFO_PHYS 
		before initializing the pool we need to know memory size
		and that information is there. So lets map it on our page table.
	*/
	linear = PMAN_MULTIBOOT_LINEAR + SARTORIS_PROCBASE_LINEAR;
  	physical = PMAN_MULTIBOOT_PHYS; 

	map_pages(PMAN_TASK, linear, physical, PMAN_MULTIBOOT_PAGES, PGATT_WRITE_ENA, 2);

	/* Reallocate init image */
	init_size = init_reloc();

    pman_print_set_color(0x7);
	pman_print("Mapping Malloc %i pages", PMAN_MALLOC_PAGES);
       
	/* Pagein remaining pages for kmalloc */
	linear = PMAN_MALLOC_LINEAR + SARTORIS_PROCBASE_LINEAR; // place after multiboot (this will invalidate the map src/dest linear address, 
                                                            // we cannot use that area anymore, but it's ok, we used it for init copy only.)
  	physical = PMAN_MALLOC_PHYS; 

	map_pages(PMAN_TASK, linear, physical, PMAN_MALLOC_PAGES, PGATT_WRITE_ENA, 2);

	pman_print("Initializing tasks/threads.");

    /* Show MMAP information */
	if(((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->flags & MB_INFO_MMAP && ((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_length > 0)
	{		 
		//Calculate multiboot mmap linear address.
		//Sartoris loader left MMAP just after multiboot info structure.
		
		((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_addr = PMAN_MULTIBOOT_LINEAR + sizeof(struct multiboot_info);

		pman_print("Multiboot MMAP Size: %i ", ((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_length);
		pman_print("Multiboot mmap linear address: %x", ((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_addr);

		struct mmap_entry *entry = NULL;
		entry = (struct mmap_entry *)((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_addr;

		int kk = 0, mmlen = ((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_length / entry->size;
		for(kk = 0; kk < mmlen; kk++)
		{
			pman_print("Multiboot entry size: %i start: %x end: %x type: %i", entry->size, (UINT32)entry->start, (UINT32)entry->end, entry->type);		

			entry = (struct mmap_entry *)((UINT32)entry + entry->size);
		}
	}
	else
	{
		pman_print("No MMAP present.");
	}

    /* Initialize vmm subsystem */
	vmm_init((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR, PMAN_INIT_RELOC_PHYS, PMAN_INIT_RELOC_PHYS + init_size);
	
    tsk_init();
	thr_init();

	/* Mark SCHED_THR as taken! */
	pmtsk = tsk_create(PMAN_TASK);
	pmtsk->state = TSK_NORMAL;

    pmthr = thr_create(SCHED_THR, pmtsk);
	pmthr->state = THR_INTHNDL;		// ehm... well... it IS an interrupt handler :D
	pmthr->task_id = PMAN_TASK;
	pmthr->state = THR_INTHNDL;	
    
	pman_print("Initializing allocator and interrupts.");
    /* Initialize kernel memory allocator */
	kmem_init(PMAN_MALLOC_LINEAR, PMAN_MALLOC_PAGES);
	
	/* get our own interrupt handlers, override microkernel defaults */
	int_init();
	
	/* Initialize Scheduler subsystem */
	sch_init();
    
	pman_print("InitFS2 Service loading...");
	
	/* Load System Services and init Loader */
	loader_init((ADDR)PHYSICAL2LINEAR(PMAN_INIT_RELOC_PHYS));

	//pman_print_clr(7);
	pman_print("Loading finished, return INIT image memory to POOL...");

	/* Put now unused Init-Fs pages onto vmm managed address space again. */
	vmm_add_mem((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR
				,PHYSICAL2LINEAR(PMAN_INIT_RELOC_PHYS)
				,PHYSICAL2LINEAR(PMAN_INIT_RELOC_PHYS + init_size));
	
	pman_print("Signals Initialization...");

	/* Initialize global signals container */
	init_signals();

	pman_print("Commands Initialization...");

	/* Initialize Commands subsystem. */
	cmd_init();

	pman_print_set_color(12);
	pman_print("PMAN: Initialization step 2 completed.");

	/* Create Scheduler int handler */
	if(create_int_handler(32, SCHED_THR, FALSE, 0) < 0)
		pman_print_and_stop("Could not create Scheduler thread.");

	/* This is it, we are finished! */
	process_manager();
}
Esempio n. 10
0
void
uvm_init(void)
{
	vaddr_t kvm_start, kvm_end;

	/*
	 * step 0: ensure that the hardware set the page size
	 */

	if (uvmexp.pagesize == 0) {
		panic("uvm_init: page size not set");
	}

	/*
	 * step 1: zero the uvm structure
	 */

	memset(&uvm, 0, sizeof(uvm));
	averunnable.fscale = FSCALE;
	uvm_amap_init();

	/*
	 * step 2: init the page sub-system.  this includes allocating the
	 * vm_page structures, and setting up all the page queues (and
	 * locks).  available memory will be put in the "free" queue.
	 * kvm_start and kvm_end will be set to the area of kernel virtual
	 * memory which is available for general use.
	 */

	uvm_page_init(&kvm_start, &kvm_end);

	/*
	 * step 3: init the map sub-system.  allocates the static pool of
	 * vm_map_entry structures that are used for "special" kernel maps
	 * (e.g. kernel_map, kmem_map, etc...).
	 */

	uvm_map_init();

	/*
	 * step 4: setup the kernel's virtual memory data structures.  this
	 * includes setting up the kernel_map/kernel_object.
	 */

	uvm_km_init(kvm_start, kvm_end);

	/*
	 * step 5: init the pmap module.   the pmap module is free to allocate
	 * memory for its private use (e.g. pvlists).
	 */

	pmap_init();

	/*
	 * step 6: init the kernel memory allocator.   after this call the
	 * kernel memory allocator (malloc) can be used. this includes
	 * setting up the kmem_map.
	 */

	kmeminit();

#ifdef DEBUG
	debug_init();
#endif

	/*
	 * step 7: init all pagers and the pager_map.
	 */

	uvm_pager_init();

	/*
	 * step 8: init the uvm_loan() facility.
	 */

	uvm_loan_init();

	/*
	 * Initialize pools.  This must be done before anyone manipulates
	 * any vm_maps because we use a pool for some map entry structures.
	 */

	pool_subsystem_init();

	/*
	 * init slab memory allocator kmem(9).
	 */

	kmem_init();

	/*
	 * the VM system is now up!  now that kmem is up we can resize the
	 * <obj,off> => <page> hash table for general use and enable paging
	 * of kernel objects.
	 */

	uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
	    UAO_FLAG_KERNSWAP);

	uvmpdpol_reinit();

	/*
	 * init anonymous memory systems
	 */

	uvm_anon_init();

	uvm_uarea_init();

	/*
	 * init readahead module
	 */

	uvm_ra_init();
}
Esempio n. 11
0
void* kmalloc(size_t size)
{
        kmem_chunk* cur_elem;
        kmem_chunk* new_elem;

        size = (size % sizeof(dword)) ? 
                (size / sizeof(dword) + 1) * sizeof(dword) :
                size;
        
        if (kernel_heap == NULL) {
                kmem_init(); /* if kernel_heap is NULL */
                             /* we need initialization */
                if (kernel_heap == NULL) kdie("Can't init kernel heap.");

                if ((kernel_heap->area_size - 2 * sizeof(kmem_chunk)) >= size) {
                        kernel_heap->size = size;
#ifdef TEDIOS_MAL_DEBUG
                        kprint("Memory first allocation with size %u\r\n", size);
                        kmem_chunk_dump(kernel_heap);
#endif
                        return ((void*) kernel_heap + sizeof(kmem_chunk));
                } else {
                        errno = ENOMEM;
                        return NULL;
                }
        }

        if (size == 0) return NULL;

        cur_elem = kernel_heap;
        
        while (!(cur_elem == NULL || FREE_AFTER(cur_elem) 
                 >= (size + sizeof(kmem_chunk)))) {
              cur_elem = cur_elem->next;
        }
#ifdef TEDIOS_MAL_DEBUG
        kprint("I found FREE SPACE! (or end :) )\r\n"
                "Chunk addr: %p\r\n"
                "Chunk size: %x\r\n"
                "Free size:  %x\r\n", cur_elem, cur_elem->size,
                FREE_AFTER(cur_elem));
#endif
        if (cur_elem == NULL) return NULL;

        new_elem = (kmem_chunk*) ((void*) cur_elem 
                                  + cur_elem->size + sizeof(kmem_chunk));

        CHUNK_INIT(new_elem,
                   cur_elem->next,
                   cur_elem,
                   size,
                   0)

        cur_elem->next->prev = new_elem;
        cur_elem->next = new_elem;

#ifdef TEDIOS_MAL_DEBUG
        kprint("Memory was allocated with size %d\r\n", size);
        kmem_chunk_dump(new_elem);
#endif

        return ((void*) new_elem + sizeof(kmem_chunk));
}
Esempio n. 12
0
File: i686.c Progetto: via/akaris-ng
void
i686_kmain(unsigned long magic, multiboot_info_t *info) {

  bootvideo_cls();

  parse_cmdline(info->cmdline);

  if (use_serial)
    i686_tty_init(0, 9600);

  i686_kernel.debug = i686_debug;

  if (magic != MULTIBOOT_BOOTLOADER_MAGIC) {
    i686_debug("Not booted from multiboot loader!\n");
    while (1);
  }
 
  i686_debug("mods_addr: %x\nmod_start: %x\n", info->mods_addr,
      0);

  i686_kernel.mutex = &i686_mutex;
  i686_kernel.bsp = (struct cpu *)i686_cpu_alloc();
  i686_kernel.bsp->kvirt = i686_virtmem_init(&i686_kernel);
  i686_kernel.phys = i686_physmem_alloc(&i686_kernel, info);

  kmem_init(i686_kernel.bsp->allocator);
  i686_kernel.bsp->v.init(i686_kernel.bsp);

  i686_debug("Location GDT entry: %x\n", ((struct i686_cpu *)i686_kernel.bsp)->gdt);

  virtaddr_t a;
  physaddr_t p;
  virtmem_error_t e1 = virtmem_kernel_alloc(i686_kernel.bsp->kvirt, &a, 1);
  assert(e1 == VIRTMEM_SUCCESS);
  physmem_error_t e2 = physmem_page_alloc(i686_kernel.bsp->localmem, 0, &p);
  assert(e2 == PHYSMEM_SUCCESS);
  virtmem_kernel_map_virt_to_phys(i686_kernel.bsp->kvirt, p, a);
  i686_debug("Allocated address: %x(->%x)\n", a, p);

  char *s = (char *)a;

  strcpy(s, "This shows the validity of this memory");
  i686_debug("%x contains: %s\n", a, s);

  struct kmem_cache *s1 = kmem_alloc(i686_kernel.bsp->allocator);
  kmem_cache_init(i686_kernel.bsp->allocator,
      s1, i686_kernel.bsp, "test", 128, NULL, NULL);

  char *t1 = kmem_cache_alloc(s1);
  i686_debug("cache at %x provided us with %x\n", s1, t1);
  strcpy(t1, "This shows the validity of the slab allocation");
  i686_debug("%x contains: %s\n", t1, t1);

  i686_address_space_init();
  struct address_space *as;
  struct memory_region *mr;
  address_space_alloc(&as);
  memory_region_alloc(&mr);

  e1 = virtmem_kernel_alloc(i686_kernel.bsp->kvirt, &a, 1);
  virtmem_kernel_map_virt_to_phys(i686_kernel.bsp->kvirt, (physaddr_t)as->pd, a);
  
  address_space_init_region(as, mr, (virtaddr_t)0x1000000, 0x2000);
  memory_region_set_flags(mr, 1, 1);
  memory_region_map(as, mr, NULL);

  const char *teststr = "This is a test string to be copied to userspace.";
  char testcpybuf[128];
  char opcodes[] = {0xeb, 0xfe};
  virtmem_copy_kernel_to_user(i686_kernel.bsp->kvirt, as->pd, (void *)0x1000ffc, 
      (const void *)teststr, strlen(teststr) + 1);
  virtmem_copy_user_to_kernel(i686_kernel.bsp->kvirt, (void *)&testcpybuf, 
      as->pd, (const void *)0x1000ffc, strlen(teststr) + 1);
  i686_debug("testcpybuf contains '%s'\n", testcpybuf);
  virtmem_copy_kernel_to_user(i686_kernel.bsp->kvirt, as->pd, (void *)0x1000000, 
      (const void *)opcodes, 2);


  struct thread *thr1;
  scheduler_thread_alloc(cpu()->sched, &thr1);
  thread_init(thr1, as);
  thr1->state = THREAD_RUNNABLE;
  scheduler_thread_add(cpu()->sched, thr1);
  scheduler_reschedule(cpu()->sched);
  virtmem_user_setup_kernelspace(i686_kernel.bsp->kvirt, as->pd);
  virtmem_set_context(i686_kernel.bsp->kvirt, as->pd);
  scheduler_resume(cpu()->sched);
  while (1);
}