コード例 #1
0
ファイル: portability.c プロジェクト: affinitydb/server
BOOL WINAPI DllMain( HINSTANCE dll, DWORD reason, LPVOID reserved ) {
    if ( reason == DLL_PROCESS_ATTACH ) { dl_init(); }
    else if ( reason == DLL_PROCESS_DETACH ) { dl_fini(); }
    else if ( reason == DLL_THREAD_ATTACH ) { pt_init(); }
    else if ( reason == DLL_THREAD_DETACH ) { pt_fini(); }
    else { }                    /* ignore */ 
    return 1;
}
コード例 #2
0
ファイル: pt_stag_types.C プロジェクト: DeanHowarth/QUDA-CPS
ParTransStagTypes::ParTransStagTypes(Lattice & latt) :
                                   ParTrans(latt)
{
  cname = "ParTransStagTypes";
  char *fname = "ParTransStagTypes(Lattice&)";
  VRB.Func(cname,fname);
  old_str_ord = lat.StrOrd();
  if (old_str_ord != STAG){
    lat.Convert(STAG);
  }
  pt_init(lat);
  pt_init_g();
}
コード例 #3
0
ファイル: kmain.c プロジェクト: lygood2007/Weenix
/**
 * This is the first real C function ever called. It performs a lot of
 * hardware-specific initialization, then creates a pseudo-context to
 * execute the bootstrap function in.
 */
void
kmain()
{
        GDB_CALL_HOOK(boot);

        dbg_init();
        dbgq(DBG_CORE, "Kernel binary:\n");
        dbgq(DBG_CORE, "  text: 0x%p-0x%p\n", &kernel_start_text, &kernel_end_text);
        dbgq(DBG_CORE, "  data: 0x%p-0x%p\n", &kernel_start_data, &kernel_end_data);
        dbgq(DBG_CORE, "  bss:  0x%p-0x%p\n", &kernel_start_bss, &kernel_end_bss);

        page_init();

        pt_init();
        slab_init();
        pframe_init();

        acpi_init();
        apic_init();
	      pci_init();
        intr_init();

        gdt_init();

        /* initialize slab allocators */
#ifdef __VM__
        anon_init();
        shadow_init();
#endif
        vmmap_init();
        proc_init();
        kthread_init();

#ifdef __DRIVERS__
        bytedev_init();
        blockdev_init();
#endif

        void *bstack = page_alloc();
        pagedir_t *bpdir = pt_get();
        KASSERT(NULL != bstack && "Ran out of memory while booting.");
        context_setup(&bootstrap_context, bootstrap, 0, NULL, bstack, PAGE_SIZE, bpdir);
        context_make_active(&bootstrap_context);

        panic("\nReturned to kmain()!!!\n");
}
コード例 #4
0
ファイル: kmain.c プロジェクト: Alicehang/bubble
/**
 * This is the first real C function ever called. It performs a lot of
 * hardware-specific initialization, then creates a pseudo-context to
 * execute the bootstrap function in.
 */
void
kmain()
{
        GDB_CALL_HOOK(boot);

        dbg_init();
        dbgq(DBG_CORE, "Kernel binary:\n");
        dbgq(DBG_CORE, "  text: 0x%p-0x%p\n", &kernel_start_text, &kernel_end_text);
        dbgq(DBG_CORE, "  data: 0x%p-0x%p\n", &kernel_start_data, &kernel_end_data);
        dbgq(DBG_CORE, "  bss:  0x%p-0x%p\n", &kernel_start_bss, &kernel_end_bss);

        page_init();

        pt_init();
        slab_init();
        pframe_init();

        acpi_init();
        apic_init();
	      pci_init();
        intr_init();

        gdt_init();

        /* initialize slab allocators */
#ifdef __VM__
        anon_init();
        shadow_init();
#endif
        vmmap_init();
        proc_init();
        kthread_init();

#ifdef __DRIVERS__
        bytedev_init();
        blockdev_init();
#endif

        void *bstack = page_alloc();
        pagedir_t *bpdir = pt_get();
        KASSERT(NULL != bstack && "Ran out of memory while booting.");
        /* This little loop gives gdb a place to synch up with weenix.  In the
         * past the weenix command started qemu was started with -S which
         * allowed gdb to connect and start before the boot loader ran, but
         * since then a bug has appeared where breakpoints fail if gdb connects
         * before the boot loader runs.  See
         *
         * https://bugs.launchpad.net/qemu/+bug/526653
         *
         * This loop (along with an additional command in init.gdb setting
         * gdb_wait to 0) sticks weenix at a known place so gdb can join a
         * running weenix, set gdb_wait to zero  and catch the breakpoint in
         * bootstrap below.  See Config.mk for how to set GDBWAIT correctly.
         *
         * DANGER: if GDBWAIT != 0, and gdb is not running, this loop will never
         * exit and weenix will not run.  Make SURE the GDBWAIT is set the way
         * you expect.
         */
        while (gdb_wait) ;
        context_setup(&bootstrap_context, bootstrap, 0, NULL, bstack, PAGE_SIZE, bpdir);
        context_make_active(&bootstrap_context);

        panic("\nReturned to kmain()!!!\n");
}
コード例 #5
0
ファイル: main.c プロジェクト: michalwojciech/cs170s10proj2
/*===========================================================================*
 *				sef_cb_init_fresh			     *
 *===========================================================================*/
PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
{
/* Initialize the vm server. */
	int s, i;
	int click, clicksforgotten = 0;
	struct memory mem_chunks[NR_MEMS];
	struct boot_image image[NR_BOOT_PROCS];
	struct boot_image *ip;
	struct rprocpub rprocpub[NR_BOOT_PROCS];
	phys_bytes limit = 0;

#if SANITYCHECKS
	incheck = nocheck = 0;
	FIXME("VM SANITYCHECKS are on");
#endif

	vm_paged = 1;
	env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
#if SANITYCHECKS
	env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
#endif

	/* Get chunks of available memory. */
	get_mem_chunks(mem_chunks);

	/* Initialize VM's process table. Request a copy of the system
	 * image table that is defined at the kernel level to see which
	 * slots to fill in.
	 */
	if (OK != (s=sys_getimage(image)))
		vm_panic("couldn't get image table: %d\n", s);

	/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
	memset(vmproc, 0, sizeof(vmproc));

	for(i = 0; i < ELEMENTS(vmproc); i++) {
		vmproc[i].vm_slot = i;
	}

	/* Walk through boot-time system processes that are alive
	 * now and make valid slot entries for them.
	 */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		phys_bytes proclimit;
		struct vmproc *vmp;

		if(ip->proc_nr >= _NR_PROCS) { vm_panic("proc", ip->proc_nr); }
		if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

		/* Initialize normal process table slot or special SYSTEM
		 * table slot. Kernel memory is already reserved.
		 */
		GETVMP(vmp, ip->proc_nr);

		/* reset fields as if exited */
		clear_proc(vmp);

		/* Get memory map for this process from the kernel. */
		if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
			vm_panic("couldn't get process mem_map",s);

		/* Remove this memory from the free list. */
		reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

		/* Set memory limit. */
		proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
			vmp->vm_arch.vm_seg[S].mem_len) - 1;

		if(proclimit > limit)
			limit = proclimit;

		vmp->vm_flags = VMF_INUSE;
		vmp->vm_endpoint = ip->endpoint;
		vmp->vm_stacktop =
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);

		if (vmp->vm_arch.vm_seg[T].mem_len != 0)
			vmp->vm_flags |= VMF_SEPARATE;
	}

	/* Architecture-dependent initialization. */
	pt_init(limit);

	/* Initialize tables to all physical memory. */
	mem_init(mem_chunks);
	meminit_done = 1;

	/* Give these processes their own page table. */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		int s;
		struct vmproc *vmp;
		vir_bytes old_stacktop, old_stack;

		if(ip->proc_nr < 0) continue;

		GETVMP(vmp, ip->proc_nr);

               if(!(ip->flags & PROC_FULLVM))
                       continue;

		old_stack = 
			vmp->vm_arch.vm_seg[S].mem_vir +
			vmp->vm_arch.vm_seg[S].mem_len - 
			vmp->vm_arch.vm_seg[D].mem_len;

        	if(pt_new(&vmp->vm_pt) != OK)
			vm_panic("VM: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
		old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);
		if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
			VM_STACKTOP - old_stacktop) != OK) {
			vm_panic("VM: vmctl for new stack failed", NO_NUM);
		}

		FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
			vmp->vm_arch.vm_seg[D].mem_len,
			old_stack);

		if(proc_new(vmp,
			VM_PROCSTART,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
			BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len -
				vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
				VM_STACKTOP) != OK) {
			vm_panic("failed proc_new for boot process", NO_NUM);
		}
	}

	/* Set up table of calls. */
#define CALLMAP(code, func) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= NR_VM_CALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
}

	/* Set call table to 0. This invalidates all calls (clear
	 * vmc_func).
	 */
	memset(vm_calls, 0, sizeof(vm_calls));

	/* Basic VM calls. */
	CALLMAP(VM_MMAP, do_mmap);
	CALLMAP(VM_MUNMAP, do_munmap);
	CALLMAP(VM_MUNMAP_TEXT, do_munmap);
	CALLMAP(VM_MAP_PHYS, do_map_phys);
	CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);

	/* Calls from PM. */
	CALLMAP(VM_EXIT, do_exit);
	CALLMAP(VM_FORK, do_fork);
	CALLMAP(VM_BRK, do_brk);
	CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem);
	CALLMAP(VM_PUSH_SIG, do_push_sig);
	CALLMAP(VM_WILLEXIT, do_willexit);
	CALLMAP(VM_ADDDMA, do_adddma);
	CALLMAP(VM_DELDMA, do_deldma);
	CALLMAP(VM_GETDMA, do_getdma);
	CALLMAP(VM_NOTIFY_SIG, do_notify_sig);

	/* Calls from RS */
	CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);

	/* Generic calls. */
	CALLMAP(VM_REMAP, do_remap);
	CALLMAP(VM_GETPHYS, do_get_phys);
	CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
	CALLMAP(VM_GETREF, do_get_refcount);
	CALLMAP(VM_INFO, do_info);
	CALLMAP(VM_QUERY_EXIT, do_query_exit);

	/* Sanity checks */
	if(find_kernel_top() >= VM_PROCSTART)
		vm_panic("kernel loaded too high", NO_NUM);

	/* Initialize the structures for queryexit */
	init_query_exit();

	/* Unmap our own low pages. */
	unmap_ok = 1;
	_minix_unmapzero();

	/* Map all the services in the boot image. */
	if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
		(vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
		panic("VM", "sys_safecopyfrom failed", s);
	}
	for(i=0;i < NR_BOOT_PROCS;i++) {
		if(rprocpub[i].in_use) {
			if((s = map_service(&rprocpub[i])) != OK) {
				vm_panic("unable to map service", s);
			}
		}
	}

	return(OK);
}
コード例 #6
0
ファイル: main.c プロジェクト: junzhe/minix_pandaboard_es
void init_vm(void)
{
	int s, i;
	static struct memory mem_chunks[NR_MEMS];
	static struct boot_image *ip;
	extern void __minix_init(void);
	multiboot_module_t *mod;
	vir_bytes kern_dyn, kern_static;

#if SANITYCHECKS
	incheck = nocheck = 0;
#endif

	/* Retrieve various crucial boot parameters */
	if(OK != (s=sys_getkinfo(&kernel_boot_info))) {
		panic("couldn't get bootinfo: %d", s);
	}

	/* Turn file mmap on? */
	env_parse("filemap", "d", 0, &enable_filemap, 0, 1);

	/* Sanity check */
	assert(kernel_boot_info.mmap_size > 0);
	assert(kernel_boot_info.mods_with_kernel > 0);

	/* Get chunks of available memory. */
	get_mem_chunks(mem_chunks);

	/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
	memset(vmproc, 0, sizeof(vmproc));

	for(i = 0; i < ELEMENTS(vmproc); i++) {
		vmproc[i].vm_slot = i;
	}

	/* Initialize ACL data structures. */
	acl_init();

	/* region management initialization. */
	map_region_init();

	/* Initialize tables to all physical memory. */
	mem_init(mem_chunks);

	/* Architecture-dependent initialization. */
	init_proc(VM_PROC_NR);
	pt_init();

	/* The kernel's freelist does not include boot-time modules; let
	 * the allocator know that the total memory is bigger.
	 */
	for (mod = &kernel_boot_info.module_list[0];
		mod < &kernel_boot_info.module_list[kernel_boot_info.mods_with_kernel-1]; mod++) {
		phys_bytes len = mod->mod_end-mod->mod_start+1;
		len = roundup(len, VM_PAGE_SIZE);
		mem_add_total_pages(len/VM_PAGE_SIZE);
	}

	kern_dyn = kernel_boot_info.kernel_allocated_bytes_dynamic;
	kern_static = kernel_boot_info.kernel_allocated_bytes;
	kern_static = roundup(kern_static, VM_PAGE_SIZE);
	mem_add_total_pages((kern_dyn + kern_static)/VM_PAGE_SIZE);

	/* Give these processes their own page table. */
	for (ip = &kernel_boot_info.boot_procs[0];
		ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) {
		struct vmproc *vmp;

		if(ip->proc_nr < 0) continue;

		assert(ip->start_addr);

		/* VM has already been set up by the kernel and pt_init().
		 * Any other boot process is already in memory and is set up
		 * here.
		 */
		if(ip->proc_nr == VM_PROC_NR) continue;

		vmp = init_proc(ip->proc_nr);

		exec_bootproc(vmp, ip);

		/* Free the file blob */
		assert(!(ip->start_addr % VM_PAGE_SIZE));
		ip->len = roundup(ip->len, VM_PAGE_SIZE);
		free_mem(ABS2CLICK(ip->start_addr), ABS2CLICK(ip->len));
	}

	/* Set up table of calls. */
#define CALLMAP(code, func) { int i;		      \
	i=CALLNUMBER(code);				\
	assert(i >= 0);					\
	assert(i < NR_VM_CALLS);			\
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
}

	/* Set call table to 0. This invalidates all calls (clear
	 * vmc_func).
	 */
	memset(vm_calls, 0, sizeof(vm_calls));

	/* Basic VM calls. */
	CALLMAP(VM_MMAP, do_mmap);
	CALLMAP(VM_MUNMAP, do_munmap);
	CALLMAP(VM_MAP_PHYS, do_map_phys);
	CALLMAP(VM_UNMAP_PHYS, do_munmap);

	/* Calls from PM. */
	CALLMAP(VM_EXIT, do_exit);
	CALLMAP(VM_FORK, do_fork);
	CALLMAP(VM_BRK, do_brk);
	CALLMAP(VM_WILLEXIT, do_willexit);
	CALLMAP(VM_NOTIFY_SIG, do_notify_sig);

	/* Calls from VFS. */
	CALLMAP(VM_VFS_REPLY, do_vfs_reply);
	CALLMAP(VM_VFS_MMAP, do_vfs_mmap);

	/* Calls from RS */
	CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);
	CALLMAP(VM_RS_UPDATE, do_rs_update);
	CALLMAP(VM_RS_MEMCTL, do_rs_memctl);

	/* Calls from RS/VFS */
	CALLMAP(VM_PROCCTL, do_procctl);

	/* Generic calls. */
	CALLMAP(VM_REMAP, do_remap);
	CALLMAP(VM_REMAP_RO, do_remap);
	CALLMAP(VM_GETPHYS, do_get_phys);
	CALLMAP(VM_SHM_UNMAP, do_munmap);
	CALLMAP(VM_GETREF, do_get_refcount);
	CALLMAP(VM_INFO, do_info);
	CALLMAP(VM_QUERY_EXIT, do_query_exit);
	CALLMAP(VM_WATCH_EXIT, do_watch_exit);

	/* Cache blocks. */
	CALLMAP(VM_MAPCACHEPAGE, do_mapcache);
	CALLMAP(VM_SETCACHEPAGE, do_setcache);

	/* getrusage */
	CALLMAP(VM_GETRUSAGE, do_getrusage);

	/* Initialize the structures for queryexit */
	init_query_exit();

	/* Acquire kernel ipc vectors that weren't available
	 * before VM had determined kernel mappings
	 */
	__minix_init();
}
コード例 #7
0
/*===========================================================================*
 *				vm_init					     *
 *===========================================================================*/
static void vm_init(void)
{
    int s, i;
    int click, clicksforgotten = 0;
    struct memory mem_chunks[NR_MEMS];
    struct boot_image image[NR_BOOT_PROCS];
    struct boot_image *ip;
    phys_bytes limit = 0;

    /* The initrd is put right after boot image */
    if ((s = sys_getbootparam(&bootparam)) != 0) {
        panic("VM","Couldn't get boot parameters!",s);
    }

    /* get what setup found out */
    memcpy(mem_chunks, bootparam.nucleos_kludge.mem, sizeof(bootparam.nucleos_kludge.mem));

    /* Get chunks of available memory. */
    get_mem_chunks(mem_chunks);

    /* Initialize VM's process table. Request a copy of the system
     * image table that is defined at the kernel level to see which
     * slots to fill in.
     */
    if ((s=sys_getimage(image)) != 0)
        vm_panic("couldn't get image table: %d\n", s);

    /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
    memset(vmproc, 0, sizeof(vmproc));

    for(i = 0; i < ELEMENTS(vmproc); i++) {
        vmproc[i].vm_slot = i;
    }

    /* Walk through boot-time system processes that are alive
     * now and make valid slot entries for them.
     */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        phys_bytes proclimit;
        struct vmproc *vmp;

        if(ip->proc_nr >= NR_PROCS) {
            vm_panic("proc", ip->proc_nr);
        }
        if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

        /* Initialize normal process table slot or special SYSTEM
         * table slot. Kernel memory is already reserved.
         */
        GETVMP(vmp, ip->proc_nr);

        /* reset fields as if exited */
        clear_proc(vmp);

        /* Get memory map for this process from the kernel. */
        if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != 0)
            vm_panic("couldn't get process mem_map",s);

        /* Remove this memory from the free list. */
        reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

        /* Set memory limit. */
        proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
                              vmp->vm_arch.vm_seg[S].mem_len) - 1;

        if(proclimit > limit)
            limit = proclimit;

        vmp->vm_flags = VMF_INUSE;
        vmp->vm_endpoint = ip->endpoint;
        vmp->vm_stacktop =
            CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                      vmp->vm_arch.vm_seg[S].mem_len);
    }

#ifndef CONFIG_BUILTIN_INITRD
    /* Remove initrd memory from the free list. We must do it right after we
       have reserved memory for boot image otherwise it may happen that initrd
       will be overwritten by other process (in arch_init_vm).
     */
    if ((s = reserve_initrd_mem(mem_chunks, bootparam.hdr.ramdisk_image,
                                bootparam.hdr.ramdisk_size)) < 0) {
        panic("VM", "Couldn't reserve memory for initial ramdisk!", s);
    }
#endif
    /* Architecture-dependent initialization. */
    pt_init(limit);

    /* Initialize tables to all physical memory. */
    mem_init(mem_chunks);
    meminit_done = 1;

    /* Give these processes their own page table. */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        int s;
        struct vmproc *vmp;
        vir_bytes old_stacktop, old_stack;

        if(ip->proc_nr < 0) continue;

        GETVMP(vmp, ip->proc_nr);

        if(!(ip->flags & PROC_FULLVM))
            continue;

        old_stack =
            vmp->vm_arch.vm_seg[S].mem_vir +
            vmp->vm_arch.vm_seg[S].mem_len -
            vmp->vm_arch.vm_seg[D].mem_len;

        if(pt_new(&vmp->vm_pt) != 0)
            vm_panic("vm_init: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
        old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                                 vmp->vm_arch.vm_seg[S].mem_len);
        if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
                     VM_STACKTOP - old_stacktop) != 0) {
            vm_panic("VM: vmctl for new stack failed", NO_NUM);
        }

        FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
                 vmp->vm_arch.vm_seg[D].mem_len,
                 old_stack);

        if(proc_new(vmp,
                    VM_PROCSTART,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
                    BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                              vmp->vm_arch.vm_seg[S].mem_len -
                              vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
                    VM_STACKTOP) != 0) {
            vm_panic("failed proc_new for boot process", NO_NUM);
        }
    }

    /* Set up table of calls. */
#define CALLMAP(code, func, thecaller) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= VM_NCALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
	if(((thecaller) < MINEPM || (thecaller) > MAXEPM) 		\
		&& (thecaller) != ANYEPM				\
		&& (thecaller) != NEEDACL ) {				\
		vm_panic(#thecaller " invalid", (code));  		\
	}								\
	vm_calls[i].vmc_callers |= EPM(thecaller);		      \
}

    /* Set call table to 0. This invalidates all calls (clear
     * vmc_func).
     */
    memset(vm_calls, 0, sizeof(vm_calls));

    /* Requests from PM (restricted to be from PM only). */
    CALLMAP(VM_EXIT, do_exit, PM_PROC_NR);
    CALLMAP(VM_FORK, do_fork, PM_PROC_NR);
    CALLMAP(VM_BRK, do_brk, PM_PROC_NR);
    CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem, PM_PROC_NR);
    CALLMAP(VM_PUSH_SIG, do_push_sig, PM_PROC_NR);
    CALLMAP(VM_WILLEXIT, do_willexit, PM_PROC_NR);
    CALLMAP(VM_ADDDMA, do_adddma, PM_PROC_NR);
    CALLMAP(VM_DELDMA, do_deldma, PM_PROC_NR);
    CALLMAP(VM_GETDMA, do_getdma, PM_PROC_NR);
    CALLMAP(VM_ALLOCMEM, do_allocmem, PM_PROC_NR);
    CALLMAP(VM_NOTIFY_SIG, do_notify_sig, PM_PROC_NR);

    /* Requests from RS */
    CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv, RS_PROC_NR);

    /* Requests from userland (source unrestricted). */
    CALLMAP(VM_MMAP, do_mmap, ANYEPM);
    CALLMAP(VM_MUNMAP, do_munmap, ANYEPM);
    CALLMAP(VM_MUNMAP_TEXT, do_munmap, ANYEPM);
    CALLMAP(VM_MAP_PHYS, do_map_phys, ANYEPM); /* Does its own checking. */
    CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, ANYEPM);

    CALLMAP(NNR_VM_MMAP, scall_mmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP, scall_munmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP_TEXT, scall_munmap, ANYEPM);

    /* Requests from userland (anyone can call but need an ACL bit). */
    CALLMAP(VM_REMAP, do_remap, NEEDACL);
    CALLMAP(VM_GETPHYS, do_get_phys, NEEDACL);
    CALLMAP(VM_SHM_UNMAP, do_shared_unmap, NEEDACL);
    CALLMAP(VM_GETREF, do_get_refcount, NEEDACL);
    CALLMAP(VM_CTL, do_ctl, NEEDACL);
    CALLMAP(VM_QUERY_EXIT, do_query_exit, NEEDACL);

    /* Sanity checks */
    if(find_kernel_top() >= VM_PROCSTART)
        vm_panic("kernel loaded too high", NO_NUM);

    /* Initialize the structures for queryexit */
    init_query_exit();

    /* Unmap our own low pages. */
    unmap_ok = 1;
    unmap_page_zero();
}
コード例 #8
0
ファイル: interpreter.c プロジェクト: ross-t/pbrain_vm
int main()
{
    struct master_table mastertable;

    //mark all of the entries in the master table as free
    for (int i = 0; i < 100; i++) {
        mastertable.mem[i].free = 1;
        mastertable.mem[i].base = i * 10;
    }
    srand(time(NULL));
    //initialize our semaphores to 1 and create their queues
    for (int i = 0; i < 10; i++) {
        SEM[i].count = 1;
        SEM[i].sem_queue = pQueueNew();
    }

    //initialize space for n pcbs
    struct pbrain_pcb *pcbMem = (struct pbrain_pcb *)malloc(NUM_PROCESSES * sizeof(struct pbrain_pcb));

    //as well as memory for the entire system (1000 words)
    memBase = (char *)malloc(NUM_PROCESSES * sizeof(char[100][6]));
    mainmem = (char *)malloc(1000 * sizeof(char[100][6]));
    //"zero" out all of the memory we allocated
    memset(memBase, '0', NUM_PROCESSES * sizeof(char[100][6]));
    memset(mainmem, '0', 1000 * sizeof(char[100][6]));

    //assign each process a unique PID
    for (int i = 0; i < NUM_PROCESSES; i++) {
        pcbMem[i].memory = (char (*)[100][6])memBase;
        //set each process's PID to i
        pcbMem[i].pid = i;

        //give the process a random timeslice between 1 and 10
        pcbMem[i].timeSlice = (rand() % 10) + 1;
        pcbMem[i].remaining_quantum = pcbMem[i].timeSlice;

        //load the program into pcb storage
        loadProgramToMem(i, memBase, i);
        
        //set the BAR to 100 * i
        char temp[5];
        sprintf(temp, "%04d", 100 * i);
        memcpy(pcbMem[i].BAR, temp, 4);

        //set the PC to 2 to skip the two parameters
        pcbMem[i].PC = 2;
        pcbMem[i].N = charToInt(pcbMem[i].memory[0][pcbBaseAddress(&pcbMem[i])], 6);
        pcbMem[i].memSize = charToInt(pcbMem[i].memory[0][pcbBaseAddress(&pcbMem[i]) + 1], 6);
        printf("Process %d: N = %d, size = %d\n", i, pcbMem[i].N, pcbMem[i].memSize);
        pcbMem[i].table = (struct page_table *)malloc(sizeof(struct page_table));
        pt_init(pcbMem[i].table);
        
        for (int j = 0; j < 10; j++) {    
            pcbMem[i].table->entries[j].valid = 1;
        }
    }

    struct pqueue *readyQueue = pQueueNew();
    struct pqueue *waitQueue = pQueueNew();

    //load processes into memory until we can't fit any more
    int currentLoaded = 0;
    while (currentLoaded != NUM_PROCESSES && canFit(&pcbMem[currentLoaded], &mastertable)) {
        printf("Process %d can fit (free pages: %d)\n", currentLoaded, freeCount(&mastertable));

        //set up the process's page table
        for (int i = 0; i < getPageSize(&pcbMem[currentLoaded]); i++) {
            struct master_entry *freeEntry = getFree(&mastertable);
            pcbMem[currentLoaded].table->entries[i].baseAddress = freeEntry->base;
            printf("Process %d: table entry %d base set to %d\n", pcbMem[currentLoaded].pid, i, freeEntry->base);
            freeEntry->free = 0;
        }

        for (int i = getPageSize(&pcbMem[currentLoaded]); i < 10; i++) {
            pcbMem[currentLoaded].table->entries[i].valid = 0;
            printf("Process %d: table entry %d set to invalid\n", pcbMem[currentLoaded].pid, i);
        }

        currentLoaded++;
    }
    //load all processes that were placed into memory in the ready queue

    for (int i = 0; i < currentLoaded; i++) {
        printf("Process %d is loaded into memory and placed on the ready queue\n", pcbMem[i].pid);
        pEnqueue(readyQueue, &pcbMem[i]);
    }

    while (currentLoaded != NUM_PROCESSES) {
        pEnqueue(waitQueue, &pcbMem[currentLoaded]);
        printf("Process %d couldn't fit in memory; placed in queue\n", pcbMem[currentLoaded].pid);
        currentLoaded++;
    }

    int numFinished = 0;
    struct pbrain_pcb *current = pDequeue(readyQueue);

    while (numFinished != NUM_PROCESSES) {
        //execute a step
        if (!current->halted) {
            updateEffectiveAddress(current);
            interpretStep(current);
            current->remaining_quantum--;
        }

        //if it made a system call
        if (current->trap) {
            current->trap = false;
            //the cases have braces around them because otherwise we'd have cases in protected scope
            switch (current->syscall) {
            case 0: {//getpid
                printf("Process %d called getpid\n", current->pid);
                current->R0 = current->pid;
                break;
            }

            case 1: { //wait
                int index = current->R0;

                if (index < 0 || index > 9) {
                    current->halted = true;
                    printf("Process %d: attempt to wait on invalid semaphore at index %d\n", current->pid, index);
                    break;
                }

                SEM[index].count--;

                if (SEM[index].count < 0) {
                    pEnqueue(SEM[index].sem_queue, current);
                    printf("Process %d: wait on semaphore %d\n", current->pid, index);
                    current->queueRemoved = true;
                    break;
                } else {
                    printf("Process %d: wait on semaphore %d; continues\n", current->pid, index);
                    break;
                }
                break;
            }

            case 2: { //signal
                int index = current->R0;

                if (index < 0 || index > 9) {
                    current->halted = true;
                    printf("Process %d: attempt to signal invalid semaphore at index %d\n", current->pid, index);
                    continue;
                }

                SEM[index].count++;

                if (SEM[index].count <= 0) {
                    printf("Process %d calls SIGNAL on semaphore %d; ", current->pid, index);
                    struct pbrain_pcb *blocked = pDequeue(SEM[index].sem_queue);
                    pEnqueue(readyQueue, blocked);
                    printf("; process %d was waiting for semaphore %d and moves to the ready queue\n", blocked->pid, index);
                    continue;
                } else {
                    printf("Process %d calls SIGNAL to release semaphore %d. No processes were waiting.\n", current->pid, index);
                    continue;
                }
            }
            } //end switch/case
        } //end if(current->trap)

        if (current->queueRemoved) {
            printf("Process %d is waiting and removed from ready queue\n", current->pid);
            current->queueRemoved = false;

            current = pDequeue(readyQueue);
            if (current == NULL) {
                printf("\nProcesses are deadlocked\n");
                exit(0);
                break;
            }

            printf("Process %d is now executing\n", current->pid);
            current->remaining_quantum = current->timeSlice;
        }

        //if it finished
        if (current->halted) {
            printf("Process %d halted\n", current->pid);
            numFinished++;
            //free the table entries used by that process
            freeEntries(current, &mastertable);
            //see if we can put another process in
            struct pbrain_pcb *potential = pDequeue(waitQueue);
            if (potential != NULL) {
                if (canFit(potential, &mastertable)) {
                    for (int i = 0; i < getPageSize(potential); i++) {
                        struct master_entry *freeEntry = getFree(&mastertable);
                        potential->table->entries[i].baseAddress = freeEntry->base;
                        printf("Process %d: table entry %d base set to %d\n", potential->pid, i, freeEntry->base);
                        freeEntry->free = 0;
                    }

                    for (int i = getPageSize(potential); i < 10; i++) {
                        potential->table->entries[i].valid = 0;
                        printf("Process %d: table entry %d set to invalid\n", potential->pid, i);
                    }

                    printf("Loaded process %d into memory and placed on ready queue\n", potential->pid);
                    pEnqueue(readyQueue, potential);
                } else {
                    pEnqueue(waitQueue, potential);
                }
            }
            struct pbrain_pcb *next = pDequeue(readyQueue);
            

            if (next != NULL) {
                current = next;
                current->remaining_quantum = current->timeSlice;
            }
        }

        if (current == NULL) {
            break; //there are no more processes if we reach this point and this is true
        }

        if (current->remaining_quantum == 0) {
            pEnqueue(readyQueue, current);
            current = pDequeue(readyQueue);

            if (current != NULL) {
                current->remaining_quantum = current->timeSlice;
                continue;
            } else {
                numFinished = NUM_PROCESSES;
            }
        }
    }

    writeMain(pcbMem);
/*
    for (int i = 0; i < NUM_PROCESSES; i++) {
        writeCPU(&pcbMem[i]);
        writeMem(&pcbMem[i]);
    }
*/
    return 0;
}
コード例 #9
0
ファイル: dev.c プロジェクト: 0xffea/gnumach
int net_dev_init(void)
{
	struct device *dev, **dp;

	/*
	 *	Initialise the packet receive queue.
	 */

#ifndef MACH
	skb_queue_head_init(&backlog);
#endif
	
	/*
	 *	The bridge has to be up before the devices
	 */

#ifdef CONFIG_BRIDGE	 
	br_init();
#endif	
	
	/*
	 * This is Very Ugly(tm).
	 *
	 * Some devices want to be initialized early..
	 */
#if defined(CONFIG_PI)
	pi_init();
#endif	
#if defined(CONFIG_PT)
	pt_init();
#endif
#if defined(CONFIG_DLCI)
	dlci_setup();
#endif
#if defined(CONFIG_SDLA)
	sdla_setup();
#endif
	/*
	 *	SLHC if present needs attaching so other people see it
	 *	even if not opened.
	 */
#if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \
	 || defined(CONFIG_PPP) \
    || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP))
	slhc_install();
#endif	

	/*
	 *	Add the devices.
	 *	If the call to dev->init fails, the dev is removed
	 *	from the chain disconnecting the device until the
	 *	next reboot.
	 */

	dp = &dev_base;
	while ((dev = *dp) != NULL)
	{
		int i;
		for (i = 0; i < DEV_NUMBUFFS; i++)  {
			skb_queue_head_init(dev->buffs + i);
		}

		if (dev->init && dev->init(dev)) 
		{
			/*
			 *	It failed to come up. Unhook it.
			 */
			*dp = dev->next;
		} 
		else
		{
			dp = &dev->next;
		}
	}

#ifdef CONFIG_PROC_FS
	proc_net_register(&(struct proc_dir_entry) {
		PROC_NET_DEV, 3, "dev",
		S_IFREG | S_IRUGO, 1, 0, 0,
		0, &proc_net_inode_operations,
		dev_get_info
	});
コード例 #10
0
ファイル: main.c プロジェクト: vooon/imuahrs-fw
/*
 * Application entry point.
 */
int main(void)
{
	enum led_status lstat = LST_INIT;
	EventListener el0;
	alert_status_t proto_st = ALST_INIT;
	alert_status_t bmp085_st = ALST_INIT;
	alert_status_t mpu6050_st = ALST_INIT;
	alert_status_t hmc5883_st = ALST_INIT;

	/*
	 * System initializations.
	 * - HAL initialization, this also initializes the configured device drivers
	 *   and performs the board-specific initializations.
	 * - Kernel initialization, the main() function becomes a thread and the
	 *   RTOS is active.
	 */
	halInit();
	chSysInit();

#ifdef BOARD_IMU_AHRF
	/* Clear DRDY pad */
	palClearPad(GPIOA, GPIOA_DRDY);

	/* Activates serial */
	sdStart(&SD1, NULL);
	sdStart(&SD2, NULL);

	/* Activate pwm */
	pwmStart(&PWMD1, &pwm1cfg);

	/* Activate i2c */
	i2cStart(&I2CD1, &i2c1cfg);

	/* Activate exti */
	extStart(&EXTD1, &extcfg);

#endif /* BOARD_IMU_AHRF */
#ifdef BOARD_CAPTAIN_PRO2

	/* Activates serial */
	sdStart(&SD3, NULL);
	sdStart(&SD4, NULL);

	/* Activate pwm */
	pwmStart(&PWMD3, &pwm3cfg);
	pwmStart(&PWMD4, &pwm4cfg);
	pwmStart(&PWMD5, &pwm5cfg);

	/* Activate i2c */
	i2cStart(&I2CD1, &i2c1cfg);

	/* Activate exti */
	extStart(&EXTD1, &extcfg);

	/* Activate adc */
	adcStart(&ADCD1, NULL);

#endif /* BOARD_CAPTAIN_PRO2 */

	/* alert subsys */
	chEvtInit(&alert_event_source);
	chEvtRegister(&alert_event_source, &el0, 0);

	/* init devices */
	pt_init();
	chThdSleepMilliseconds(10); /* power on delay */
#ifdef HAS_DEV_BMP085
	bmp085_init();
	chThdSleepMilliseconds(50); /* init delay */
#endif
#ifdef HAS_DEV_MS5611
	ms5611_init(&ms5611cfg);
	chThdSleepMilliseconds(50); /* init delay */
#endif
#ifdef HAS_DEV_MPU6050
	mpu6050_init(&mpu6050cfg);
	chThdSleepMilliseconds(250); /* give some time for mpu6050 configuration */
#endif
#ifdef HAS_DEV_HMC5883
	hmc5883_init(&hmc5883cfg);
#endif
#ifdef HAS_DEV_SERVOPWM
	servopwm_init(&servopwmcfg);
#endif
#ifdef HAS_DEV_NTC10K
	ntc10k_init();
#endif
#ifdef HAS_DEV_RPM
	rpm_init();
#endif

#ifdef BOARD_IMU_AHRF
	/* Set DRDY pad */
	palSetPad(GPIOA, GPIOA_DRDY);
#endif

	while (TRUE) {
		eventmask_t msk = chEvtWaitOneTimeout(ALL_EVENTS, MS2ST(100));

		if (msk & EVENT_MASK(0)) {
			flagsmask_t fl = chEvtGetAndClearFlags(&el0);

			if (fl & ALERT_FLAG_PROTO)
				proto_st = pt_get_status();

#ifdef HAS_DEV_MPU6050
			if (fl & ALERT_FLAG_MPU6050)
				mpu6050_st = mpu6050_get_status();
#endif

#ifdef HAS_DEV_HMC5883
			if (fl & ALERT_FLAG_HMC5883)
				hmc5883_st = hmc5883_get_status();
#endif

#ifdef HAS_DEV_BMP085
			if (fl & ALERT_FLAG_BMP085)
				bmp085_st = bmp085_get_status();
#endif

#ifdef HAS_DEV_MS5611
			if (fl & ALERT_FLAG_BMP085)
				bmp085_st = ms5611_get_status();
#endif

			pt_set_sens_state(mpu6050_st, hmc5883_st, bmp085_st);
		}

		if (proto_st == ALST_FAIL || mpu6050_st == ALST_FAIL || hmc5883_st == ALST_FAIL || bmp085_st == ALST_FAIL)
			lstat = LST_FAIL;
		else if (proto_st == ALST_INIT || mpu6050_st == ALST_INIT || hmc5883_st == ALST_INIT || bmp085_st == ALST_INIT)
			lstat = LST_INIT;
		else if (proto_st == ALST_NORMAL && mpu6050_st == ALST_NORMAL && hmc5883_st == ALST_NORMAL && bmp085_st == ALST_NORMAL)
			lstat = LST_NORMAL;

		led_update(lstat);
	}
}
コード例 #11
0
ファイル: main.c プロジェクト: Ga-vin/MINIX3
void init_vm(void)
{
	int s, i;
	static struct memory mem_chunks[NR_MEMS];
	static struct boot_image *ip;
	extern void __minix_init(void);

#if SANITYCHECKS
	incheck = nocheck = 0;
#endif

	/* Retrieve various crucial boot parameters */
	if(OK != (s=sys_getkinfo(&kernel_boot_info))) {
		panic("couldn't get bootinfo: %d", s);
	}

	/* Sanity check */
	assert(kernel_boot_info.mmap_size > 0);
	assert(kernel_boot_info.mods_with_kernel > 0);

#if SANITYCHECKS
	env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
#endif

	/* Get chunks of available memory. */
	get_mem_chunks(mem_chunks);

	/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
	memset(vmproc, 0, sizeof(vmproc));

	for(i = 0; i < ELEMENTS(vmproc); i++) {
		vmproc[i].vm_slot = i;
	}

	/* region management initialization. */
	map_region_init();

	/* Initialize tables to all physical memory. */
	mem_init(mem_chunks);

	/* Architecture-dependent initialization. */
	init_proc(VM_PROC_NR);
	pt_init();

	/* Give these processes their own page table. */
	for (ip = &kernel_boot_info.boot_procs[0];
		ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) {
		struct vmproc *vmp;

		if(ip->proc_nr < 0) continue;

		assert(ip->start_addr);

		/* VM has already been set up by the kernel and pt_init().
		 * Any other boot process is already in memory and is set up
		 * here.
		 */
		if(ip->proc_nr == VM_PROC_NR) continue;

		vmp = init_proc(ip->proc_nr);

		exec_bootproc(vmp, ip);

		/* Free the file blob */
		assert(!(ip->start_addr % VM_PAGE_SIZE));
		ip->len = roundup(ip->len, VM_PAGE_SIZE);
		free_mem(ABS2CLICK(ip->start_addr), ABS2CLICK(ip->len));
	}

	/* Set up table of calls. */
#define CALLMAP(code, func) { int i;		      \
	i=CALLNUMBER(code);				\
	assert(i >= 0);					\
	assert(i < NR_VM_CALLS);			\
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
}

	/* Set call table to 0. This invalidates all calls (clear
	 * vmc_func).
	 */
	memset(vm_calls, 0, sizeof(vm_calls));

	/* Basic VM calls. */
	CALLMAP(VM_MMAP, do_mmap);
	CALLMAP(VM_MUNMAP, do_munmap);
	CALLMAP(VM_MAP_PHYS, do_map_phys);
	CALLMAP(VM_UNMAP_PHYS, do_munmap);

	/* Calls from PM. */
	CALLMAP(VM_EXIT, do_exit);
	CALLMAP(VM_FORK, do_fork);
	CALLMAP(VM_BRK, do_brk);
	CALLMAP(VM_WILLEXIT, do_willexit);
	CALLMAP(VM_NOTIFY_SIG, do_notify_sig);

	/* Calls from RS */
	CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);
	CALLMAP(VM_RS_UPDATE, do_rs_update);
	CALLMAP(VM_RS_MEMCTL, do_rs_memctl);

	/* Calls from RS/VFS */
	CALLMAP(VM_PROCCTL, do_procctl);

	/* Generic calls. */
	CALLMAP(VM_REMAP, do_remap);
	CALLMAP(VM_REMAP_RO, do_remap);
	CALLMAP(VM_GETPHYS, do_get_phys);
	CALLMAP(VM_SHM_UNMAP, do_munmap);
	CALLMAP(VM_GETREF, do_get_refcount);
	CALLMAP(VM_INFO, do_info);
	CALLMAP(VM_QUERY_EXIT, do_query_exit);
	CALLMAP(VM_WATCH_EXIT, do_watch_exit);
	CALLMAP(VM_FORGETBLOCKS, do_forgetblocks);
	CALLMAP(VM_FORGETBLOCK, do_forgetblock);
	CALLMAP(VM_YIELDBLOCKGETBLOCK, do_yieldblockgetblock);

	/* Initialize the structures for queryexit */
	init_query_exit();

	/* Acquire kernel ipc vectors that weren't available
	 * before VM had determined kernel mappings
	 */
	__minix_init();
}