Esempio n. 1
0
vmem_t *
vmem_mmap_arena(vmem_alloc_t **a_out, vmem_free_t **f_out)
{
#ifdef _WIN32
	SYSTEM_INFO info;
	size_t pagesize;
#else
	size_t pagesize = _sysconf(_SC_PAGESIZE);
#endif
	
#ifdef _WIN32
	GetSystemInfo(&info);
	pagesize = info.dwPageSize;
	CHUNKSIZE = info.dwAllocationGranularity;
#elif !defined(MAP_ALIGN)
	CHUNKSIZE = pagesize;
#endif
	
	if (mmap_heap == NULL) {
		mmap_heap = vmem_init("mmap_top", 
			CHUNKSIZE,
		    vmem_mmap_top_alloc, vmem_free,
		    "mmap_heap", NULL, 0, pagesize,
		    vmem_mmap_alloc, vmem_mmap_free);
	}

	if (a_out != NULL)
		*a_out = vmem_mmap_alloc;
	if (f_out != NULL)
		*f_out = vmem_mmap_free;

	return (mmap_heap);
}
Esempio n. 2
0
void _kmain(struct multiboot_info *mboot) {
	clrscr();

	// This will make sure there's about 4K of space for malloc to use until physical
	// memory management is available for proper virtual memory.
	kprintf("Initialising malloc()...\n");
	dlmalloc_sbrk(0);

	kprintf("Initialising physical memory manager...\n");
	pmem_init(mboot);

	kprintf("Completing virtual memory initialisation...\n");
	vmem_init();

	kprintf("Configuring software and hardware interrupts...\n");
	interrupts_init();

	kprintf("Initialising machine devices...\n");
	init_devices();

	kprintf("Enabling interrupts...\n");
	interrupts_enable();

	kprintf("Startup complete!\n");

	while(1) __asm__ volatile("hlt");
}
Esempio n. 3
0
/*
 * vmem_check -- memory pool consistency check
 */
int
vmem_check(VMEM *vmp)
{
	vmem_init();
	LOG(3, "vmp %p", vmp);

	return je_vmem_pool_check((pool_t *)((uintptr_t)vmp + Header_size));
}
Esempio n. 4
0
void
vmem_subsystem_init(vmem_t *vm)
{

	kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va",
	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm,
	    0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
	    IPL_VM);

	kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta",
	    0, 0, PAGE_SIZE,
	    uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);

	pool_init(&vmem_btag_pool, sizeof(bt_t), 0, 0, PR_PHINPAGE,
		    "vmembt", &pool_allocator_vmem_meta, IPL_VM);
}
Esempio n. 5
0
vmem_t *
vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
    vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
    vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
{

	KASSERT((flags & (VM_XIMPORT)) == 0);

	return vmem_init(NULL, name, base, size, quantum,
	    importfn, releasefn, source, qcache_max, flags, ipl);
}
Esempio n. 6
0
int main(int argc, char **argv) {
	struct sigaction sigact;

	init_pagefile(); // init page file
	open_logger();   // open logfile

	/* Create shared memory and init vmem structure */
	vmem_init();
	TEST_AND_EXIT_ERRNO(!vmem, "Error initialising vmem");
	PRINT_DEBUG((stderr, "vmem successfully created\n"));

	// scan parameter
	vmem->adm.program_name = argv[0];
	vmem->adm.page_rep_algo = VMEM_ALGO_AGING;
	scan_params(argc, argv);

	/* Setup signal handler */
	sigact.sa_handler = sighandler;
	sigemptyset(&sigact.sa_mask);
	sigact.sa_flags = 0;

	TEST_AND_EXIT_ERRNO(sigaction(SIGUSR1, &sigact, NULL) == -1,
			"Error installing signal handler for USR1");
	PRINT_DEBUG((stderr, "USR1 handler successfully installed\n"));

	TEST_AND_EXIT_ERRNO(sigaction(SIGUSR2, &sigact, NULL) == -1,
			"Error installing signal handler for USR2");
	PRINT_DEBUG((stderr, "USR2 handler successfully installed\n"));

	TEST_AND_EXIT_ERRNO(sigaction(SIGINT, &sigact, NULL) == -1,
			"Error installing signal handler for INT");
	PRINT_DEBUG((stderr, "INT handler successfully installed\n"));

	/* Signal processing loop */
	while (1) {
		signal_number = 0;
		pause();
		if (signal_number == SIGUSR1) { /* Page fault */
			PRINT_DEBUG((stderr, "Processed SIGUSR1\n"));
			signal_number = 0;
		} else if (signal_number == SIGUSR2) { /* PT dump */
			PRINT_DEBUG((stderr, "Processed SIGUSR2\n"));
			signal_number = 0;
		} else if (signal_number == SIGINT) {
			PRINT_DEBUG((stderr, "Processed SIGINT\n"));
		}
	}

	return 0;
}
Esempio n. 7
0
/*
 * vmem_set_funcs -- allow overriding libvmem's call to malloc, etc.
 */
void
vmem_set_funcs(
		void *(*malloc_func)(size_t size),
		void (*free_func)(void *ptr),
		void *(*realloc_func)(void *ptr, size_t size),
		char *(*strdup_func)(const char *s),
		void (*print_func)(const char *s))
{
	vmem_init();
	LOG(3, NULL);

	util_set_alloc_funcs(malloc_func, free_func,
			realloc_func, strdup_func);
	out_set_print_func(print_func);
	je_vmem_pool_set_alloc_funcs(malloc_func, free_func);
}
Esempio n. 8
0
/*
 * Initialize the MemGuard mock allocator.  All objects from MemGuard come
 * out of a single VM map (contiguous chunk of address space).
 */
void
memguard_init(vmem_t *parent)
{
	vm_offset_t base;

	vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base);
	vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize,
	    PAGE_SIZE, 0, M_WAITOK);
	memguard_cursor = base;
	memguard_base = base;

	printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n");
	printf("\tMEMGUARD map base: 0x%lx\n", (u_long)base);
	printf("\tMEMGUARD map size: %jd KBytes\n",
	    (uintmax_t)memguard_mapsize >> 10);
}
Esempio n. 9
0
/*
 * vmem_create -- create a memory pool in a temp file
 */
VMEM *
vmem_create(const char *dir, size_t size)
{
	vmem_init();
	LOG(3, "dir \"%s\" size %zu", dir, size);

	if (size < VMEM_MIN_POOL) {
		ERR("size %zu smaller than %zu", size, VMEM_MIN_POOL);
		errno = EINVAL;
		return NULL;
	}

	/* silently enforce multiple of page size */
	size = roundup(size, Pagesize);

	void *addr;
	if ((addr = util_map_tmpfile(dir, size, 4 << 20)) == NULL)
		return NULL;

	/* store opaque info at beginning of mapped area */
	struct vmem *vmp = addr;
	memset(&vmp->hdr, '\0', sizeof (vmp->hdr));
	memcpy(vmp->hdr.signature, VMEM_HDR_SIG, POOL_HDR_SIG_LEN);
	vmp->addr = addr;
	vmp->size = size;
	vmp->caller_mapped = 0;

	/* Prepare pool for jemalloc */
	if (je_vmem_pool_create((void *)((uintptr_t)addr + Header_size),
			size - Header_size, 1) == NULL) {
		ERR("pool creation failed");
		util_unmap(vmp->addr, vmp->size);
		return NULL;
	}

	/*
	 * If possible, turn off all permissions on the pool header page.
	 *
	 * The prototype PMFS doesn't allow this when large pages are in
	 * use. It is not considered an error if this fails.
	 */
	util_range_none(addr, sizeof (struct pool_hdr));

	LOG(3, "vmp %p", vmp);
	return vmp;
}
Esempio n. 10
0
int main(void) {
    struct sigaction sigact;

    /* Init pagefile */
    init_pagefile(MMANAGE_PFNAME);

    /* Open logfile */
    open_logfile();

    /* Create shared memory and init vmem structure */
    vmem_init();

    /* Setup signal handler */
    /* Handler for USR1 */
    sigact.sa_handler = save_sig_no;
    sigemptyset(&sigact.sa_mask);
    sigact.sa_flags = 0;
    if(sigaction(SIGUSR1, &sigact, NULL) == -1) {
        perror("Error installing signal handler for USR1");
        exit(EXIT_FAILURE);
    }
    else {
        DEBUG(fprintf(stderr, "USR1 handler successfully installed\n"));
    }

    if(sigaction(SIGUSR2, &sigact, NULL) == -1) {
        perror("Error installing signal handler for USR2");
        exit(EXIT_FAILURE);
    }
    else {
        DEBUG(fprintf(stderr, "USR2 handler successfully installed\n"));
    }

    if(sigaction(SIGINT, &sigact, NULL) == -1) {
        perror("Error installing signal handler for INT");
        exit(EXIT_FAILURE);
    }
    else {
        DEBUG(fprintf(stderr, "INT handler successfully installed\n"));
    }

    /* Signal processing loop */
    signal_proccessing_loop();
    exit(EXIT_SUCCESS);
}
Esempio n. 11
0
/*
 * vmem_create_in_region -- create a memory pool in a given range
 */
VMEM *
vmem_create_in_region(void *addr, size_t size)
{
	vmem_init();
	LOG(3, "addr %p size %zu", addr, size);

	if (((uintptr_t)addr & (Pagesize - 1)) != 0) {
		ERR("addr %p not aligned to pagesize %lu", addr, Pagesize);
		errno = EINVAL;
		return NULL;
	}

	if (size < VMEM_MIN_POOL) {
		ERR("size %zu smaller than %zu", size, VMEM_MIN_POOL);
		errno = EINVAL;
		return NULL;
	}

	/* store opaque info at beginning of mapped area */
	struct vmem *vmp = addr;
	memset(&vmp->hdr, '\0', sizeof (vmp->hdr));
	memcpy(vmp->hdr.signature, VMEM_HDR_SIG, POOL_HDR_SIG_LEN);
	vmp->addr = addr;
	vmp->size = size;
	vmp->caller_mapped = 1;

	/* Prepare pool for jemalloc */
	if (je_vmem_pool_create((void *)((uintptr_t)addr + Header_size),
				size - Header_size, 0) == NULL) {
		ERR("pool creation failed");
		return NULL;
	}

	/*
	 * If possible, turn off all permissions on the pool header page.
	 *
	 * The prototype PMFS doesn't allow this when large pages are in
	 * use. It is not considered an error if this fails.
	 */
	util_range_none(addr, sizeof (struct pool_hdr));

	LOG(3, "vmp %p", vmp);
	return vmp;
}
Esempio n. 12
0
/*
 * vmem_check_version -- see if library meets application version requirements
 */
const char *
vmem_check_version(unsigned major_required, unsigned minor_required)
{
	vmem_init();
	LOG(3, "major_required %u minor_required %u",
			major_required, minor_required);

	if (major_required != VMEM_MAJOR_VERSION) {
		ERR("libvmem major version mismatch (need %u, found %u)",
			major_required, VMEM_MAJOR_VERSION);
		return out_get_errormsg();
	}

	if (minor_required > VMEM_MINOR_VERSION) {
		ERR("libvmem minor version mismatch (need %u, found %u)",
			minor_required, VMEM_MINOR_VERSION);
		return out_get_errormsg();
	}

	return NULL;
}
Esempio n. 13
0
File: kernel.c Progetto: ALoay94/os
void kmain(void)
{
	kernel_end = (uint64_t)heap_addr;
	kernel_heap_ptr = (uint8_t*)((uint64_t)&VMA + kernel_end + heap_size) ;
	UPD = (uint64_t*)((uint64_t)&VMA + kernel_end + 0x3000);
	KPD = (uint64_t*)((uint64_t)&VMA + kernel_end + 0x4000);
	PML4 = (uint64_t*)((uint64_t)&VMA + kernel_end);
	*(KPD + 511) = (kernel_end + 0x4000) | 3;
	map_mem(mboot_info);

#if _GFX_
	init_video();
#endif
	
	idt_install();
	isr_install();

	vmem_init();
	
	serial.init();

	extern void load_tss(void);
	load_tss();
	extern uint64_t k_tss64_sp;
	*(uint64_t*)( (uint64_t)&VMA + (uint64_t)&k_tss64_sp ) = 0xFFFFFFFFC0008000;

	pit_set_freq(2000);	// timer tick = 500 us
	pit_install();
	irq_install();

	extern void mouse_init();
	//mouse_init();
	extern void mouse_handler(void*);
	//irq_install_handler(12, mouse_handler);

	multiboot_module_t *mod = (multiboot_module_t*)(uint64_t)mboot_info->mods_addr;

	ramdev_private_t ramdev_private = 
		(ramdev_private_t) 
		{ 
			.ptr  = (void*)((uint64_t)&VMA + mod->mod_start),
			.size = (uint32_t)(mod->mod_end - mod->mod_start),
		};
		
 	inode_t ramdisk_inode =
	 	(inode_t) 
 		{
 			.name 	= "ramdisk",
 			.type	= FS_CHRDEV,
 			.fs		= &devfs,
 			.dev	= &ramdev,
 			.p 		= &ramdev_private, 
 		};
 	
 	vfs_create(&dev_root, "/", &ramdisk_inode);

	inode_t *rootfs = initramfs.load(&ramdisk_inode);
	
	vfs_mount_root(rootfs);
	
	irq_install_handler(1, kbd_handler);
	
	devman.init();
	fsman.init();
	
#if _DBG_CON_
	// We should disable debugging by this stage!
	serial.end();
#endif

	process_t *init = load_elf("/bin/init");
		
	extern void spawn_init(process_t*);
	spawn_init(init);
	for(;;);
}
Esempio n. 14
0
File: mmanage.c Progetto: Slaan/bs3
int
main(void)
{
  struct sigaction sigact;
  /* Init pagefile */
  init_pagefile(MMANAGE_PFNAME);
  if(!pagefile) {
    perror("Error creating pagefile\n");
    exit(EXIT_FAILURE);
  }
#ifdef DEBUG_MESSAGES
  else {
    fprintf(stderr, "pagefile successfully created\n");
  }
#endif /* DEBUG_MESSAGES */
  /* Open logfile */
  logfile = fopen(MMANAGE_LOGFNAME, "w");
  if(!logfile) {
    perror("Error creating logfile");
    exit(EXIT_FAILURE);
  }

  /* Create shared memory and init vmem structure */
  vmem_init();
  if(!vmem) {
    perror("Error initialising vmem");
    exit(EXIT_FAILURE);
  }
#ifdef DEBUG_MESSAGES
  else {
    fprintf(stderr, "vmem successfully created\n");
  }
#endif /* DEBUG_MESSAGES */
  /* Setup signal handler */
  /* Handler for USR1 */
  sigact.sa_handler = sighandler;
  sigemptyset(&sigact.sa_mask);
  sigact.sa_flags = 0;
  if(sigaction(SIGUSR1, &sigact, NULL) == -1) {
    perror("Error installing signal handler for USR1");
    exit(EXIT_FAILURE);
  }
#ifdef DEBUG_MESSAGES
  else {
    fprintf(stderr, "USR1 handler successfully installed\n");
  }
#endif /* DEBUG_MESSAGES */
  if(sigaction(SIGUSR2, &sigact, NULL) == -1) {
    perror("Error installing signal handler for USR2");
    exit(EXIT_FAILURE);
  }
#ifdef DEBUG_MESSAGES
  else {
    fprintf(stderr, "USR2 handler successfully installed\n");
  }
#endif /* DEBUG_MESSAGES */
  if(sigaction(SIGINT, &sigact, NULL) == -1) {
    perror("Error installing signal handler for INT");
    exit(EXIT_FAILURE);
  }
#ifdef DEBUG_MESSAGES
  else {
    fprintf(stderr, "INT handler successfully installed\n");
  }
#endif /* DEBUG_MESSAGES */
  /* Signal processing loop */
  while(1) 
  {
    signal_number = 0;
    pause();
    if(signal_number == SIGUSR1) 
    {  /* Page fault */
#ifdef DEBUG_MESSAGES
      fprintf(stderr, "Processed SIGUSR1\n");
#endif /* DEBUG_MESSAGES */
      signal_number = 0;
    }
    else if(signal_number == SIGUSR2) 
    {     /* PT dump */
#ifdef DEBUG_MESSAGES
      fprintf(stderr, "Processed SIGUSR2\n");
#endif /* DEBUG_MESSAGES */
      signal_number = 0;
    }
    else if(signal_number == SIGINT) 
    {
#ifdef DEBUG_MESSAGES
      fprintf(stderr, "Processed SIGINT\n");
#endif /* DEBUG_MESSAGES */
    }
  }
  return 0;
}
Esempio n. 15
0
/*
 * Initialize kernel heap boundaries.
 */
void
kernelheap_init(
	void *heap_start,
	void *heap_end,
	char *first_avail,
	void *core_start,
	void *core_end)
{
	uintptr_t textbase;
	size_t core_size;
	size_t heap_size;
	vmem_t *heaptext_parent;
	size_t	heap_lp_size = 0;
#ifdef __sparc
	size_t kmem64_sz = kmem64_aligned_end - kmem64_base;
#endif	/* __sparc */

	kernelheap = heap_start;
	ekernelheap = heap_end;

#ifdef __sparc
	heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
	/*
	 * Bias heap_lp start address by kmem64_sz to reduce collisions
	 * in 4M kernel TSB between kmem64 area and heap_lp
	 */
	kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M);
	if (kmem64_sz <= heap_lp_size / 2)
		heap_lp_size -= kmem64_sz;
	heap_lp_base = ekernelheap - heap_lp_size;
	heap_lp_end = heap_lp_base + heap_lp_size;
#endif	/* __sparc */

	/*
	 * If this platform has a 'core' heap area, then the space for
	 * overflow module text should be carved out of the end of that
	 * heap.  Otherwise, it gets carved out of the general purpose
	 * heap.
	 */
	core_size = (uintptr_t)core_end - (uintptr_t)core_start;
	if (core_size > 0) {
		ASSERT(core_size >= HEAPTEXT_SIZE);
		textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
		core_size -= HEAPTEXT_SIZE;
	}
#ifndef __sparc
	else {
		ekernelheap -= HEAPTEXT_SIZE;
		textbase = (uintptr_t)ekernelheap;
	}
#endif

	heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
	heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
	    segkmem_alloc, segkmem_free);

	if (core_size > 0) {
		heap_core_arena = vmem_create("heap_core", core_start,
		    core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
		heap_core_base = core_start;
	} else {
		heap_core_arena = heap_arena;
		heap_core_base = kernelheap;
	}

	/*
	 * reserve space for the large page heap. If large pages for kernel
	 * heap is enabled large page heap arean will be created later in the
	 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
	 * range will be returned back to the heap_arena.
	 */
	if (heap_lp_size) {
		(void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
		    heap_lp_base, heap_lp_end,
		    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
	}

	/*
	 * Remove the already-spoken-for memory range [kernelheap, first_avail).
	 */
	(void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
	    0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);

#ifdef __sparc
	heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
	    SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
	    NULL, NULL, 0, VM_SLEEP);
	/*
	 * Prom claims the physical and virtual resources used by panicbuf
	 * and inter_vec_table. So reserve space for panicbuf, intr_vec_table,
	 * reserved interrupt vector data structures from 32-bit heap.
	 */
	(void) vmem_xalloc(heap32_arena, PANICBUFSIZE, PAGESIZE, 0, 0,
	    panicbuf, panicbuf + PANICBUFSIZE,
	    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);

	(void) vmem_xalloc(heap32_arena, IVSIZE, PAGESIZE, 0, 0,
	    intr_vec_table, (caddr_t)intr_vec_table + IVSIZE,
	    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);

	textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
	heaptext_parent = NULL;
#else	/* __sparc */
	heap32_arena = heap_core_arena;
	heaptext_parent = heap_core_arena;
#endif	/* __sparc */

	heaptext_arena = vmem_create("heaptext", (void *)textbase,
	    HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);

	/*
	 * Create a set of arenas for memory with static translations
	 * (e.g. VA -> PA translations cannot change).  Since using
	 * kernel pages by physical address implies it isn't safe to
	 * walk across page boundaries, the static_arena quantum must
	 * be PAGESIZE.  Any kmem caches that require static memory
	 * should source from static_arena, while direct allocations
	 * should only use static_alloc_arena.
	 */
	static_arena = vmem_create("static", NULL, 0, PAGESIZE,
	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
	static_alloc_arena = vmem_create("static_alloc", NULL, 0,
	    sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
	    0, VM_SLEEP);

	/*
	 * Create an arena for translation data (ptes, hmes, or hblks).
	 * We need an arena for this because hat_memload() is essential
	 * to vmem_populate() (see comments in common/os/vmem.c).
	 *
	 * Note: any kmem cache that allocates from hat_memload_arena
	 * must be created as a KMC_NOHASH cache (i.e. no external slab
	 * and bufctl structures to allocate) so that slab creation doesn't
	 * require anything more than a single vmem_alloc().
	 */
	hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
	    hat_memload_alloc, segkmem_free, heap_arena, 0,
	    VM_SLEEP | VMC_POPULATOR | VMC_DUMPSAFE);
}
Esempio n. 16
0
int
pbsdboot(TCHAR *wkernel_name, int argc, char *argv[], struct bootinfo* bi)
{
	int i;
	caddr_t start, end;
	caddr_t argbuf, p;
	struct bootinfo *bibuf;
	int fd = -1;

	stat_printf(TEXT("open %s..."), wkernel_name);
	if (CheckCancel(0) || (fd = open((char*)wkernel_name, O_RDONLY)) < 0) {
		msg_printf(MSG_ERROR, whoami, TEXT("open failed.\n"));
		stat_printf(TEXT("open %s...failed"), wkernel_name);
		goto cancel;
	}

	stat_printf(TEXT("read information from %s..."), wkernel_name);
	if (CheckCancel(0) || getinfo(fd, &start, &end) < 0) {
		stat_printf(TEXT("read information failed"), wkernel_name);
		goto cancel;
	}

	stat_printf(TEXT("create memory map..."));
	if (CheckCancel(0) || vmem_init(start, end) < 0) {
		stat_printf(TEXT("create memory map...failed"));
		goto cancel;
	}
	//vmem_dump_map();

	stat_printf(TEXT("prepare boot information..."));
	if ((argbuf = vmem_alloc()) == NULL ||
		(bibuf = (struct bootinfo*)vmem_alloc()) == NULL) {
		msg_printf(MSG_ERROR, whoami, TEXT("can't allocate argument page\n"));
		stat_printf(TEXT("prepare boot information...failed"));
		goto cancel;
	}

	memcpy(bibuf, bi, sizeof(struct bootinfo));
	for (p = &argbuf[sizeof(char*) * argc], i = 0; i < argc; i++) {
		int arglen = strlen(argv[i]) + 1;
		((char**)argbuf)[i] = p;
		memcpy(p, argv[i], arglen);
		p += arglen;
	}

	stat_printf(TEXT("loading..."));
	if (CheckCancel(0) || loadfile(fd, &start) < 0) {
		stat_printf(TEXT("loading...failed"));
		goto cancel;
	}

	/* last chance to cancel */
	if (CheckCancel(-1)) {
		goto cancel;
	}

	stat_printf(TEXT("execute kernel..."));
	vmem_exec(start, argc, (char**)argbuf, bibuf);
	stat_printf(TEXT("execute kernel...failed"));

cancel:
	if (0 <= fd) {
		close(fd);
	}
	vmem_free();

	return (-1);
}
Esempio n. 17
0
File: vmem.c Progetto: AmesianX/nvml
/*
 * vmem_construct -- load-time initialization for vmem
 *
 * Called automatically by the run-time loader.
 */
ATTR_CONSTRUCTOR
void
vmem_construct(void)
{
	vmem_init();
}
Esempio n. 18
0
static void
vmem_construct(void)
{
	vmem_init();
}