Exemple #1
0
int main(void)
{
	/* Hardware initialization */
	init();

	proc_new(usb_serial_process, (iptr_t)USB_TO_SERIAL,
			KERN_MINSTACKSIZE * 2, NULL);
	proc_new(usb_serial_process, (iptr_t)SERIAL_TO_USB,
			KERN_MINSTACKSIZE * 2, NULL);

	/* Main process will never be scheduled. */
	sig_wait(SIG_USER0);
	ASSERT(0);
}
void kernel_footprint(void)
{
	init();

	// generate code for process
	struct Process *p =
		proc_new(proc1_main, 0, sizeof(proc1_stack), proc1_stack);
	proc_setPri(p, 5);
	proc_yield();

	// generate code for msg
	Msg msg;
	msg_initPort(&in_port, event_createSignal(p, SIG_USER1));
	msg_put(&in_port, &msg);
	msg_peek(&in_port);
	Msg *msg_re = msg_get(&in_port);
	msg_reply(msg_re);

	// generate code for signals
	sig_send(p, SIG_USER0);

	// generate code for msg
	Semaphore sem;
	sem_init(&sem);
	sem_obtain(&sem);
	sem_release(&sem);

	sig_wait(SIG_USER0);
}
Exemple #3
0
static VALUE
rb_proc_s_new(int argc, VALUE *argv, VALUE klass)
{
    VALUE block = proc_new(klass, Qfalse);

    rb_obj_call_init(block, argc, argv);
    return block;
}
Exemple #4
0
void hadarp_init(unsigned port, unsigned long baudrate)
{
	ser_init(&ser, port);
	ser_setbaudrate(&ser, baudrate);
	hadarp_cnt = -1;

	hadarp_proc = proc_new(hadarp_process, NULL, KERN_MINSTACKSIZE * 3, NULL);
	ASSERT(hadarp_proc);
}
Exemple #5
0
void measures_init(void)
{
	sem_init(&i2c_sem);
	i2c_init(&i2c_bus, I2C_BITBANG0, CONFIG_I2C_FREQ);
	bool ret = mma845x_init(&i2c_bus, 0, MMADYN_4G);
	ASSERT(ret);

	Process *p = proc_new(acc_process, NULL, KERN_MINSTACKSIZE * 4, NULL);
	ASSERT(p);

	aux_init();

	config_register(&measures);
	config_load(&measures);

	/* Start current check process */
	p = proc_new(curr_process, NULL, KERN_MINSTACKSIZE * 4, NULL);
	ASSERT(p);

	uplink_registerCmd("curr_override", cmd_curr_override);
	uplink_registerCmd("curr_reset", cmd_curr_reset);
}
Exemple #6
0
void NORETURN context_switch(void)
{
	IRQ_ENABLE;
	timer_init();
	proc_init();

	#if CONFIG_USE_HP_TIMER
		ser_init(&out, CONFIG_CTX_DEBUG_PORT);
		ser_setbaudrate(&out, CONFIG_CTX_DEBUG_BAUDRATE);
	#endif

	#if CONFIG_USE_LED
		LED_INIT();
	#endif

	proc_forbid();
	hp_proc = proc_new(hp_process, NULL, PROC_STACK_SIZE, hp_stack);
	lp_proc = proc_new(lp_process, NULL, PROC_STACK_SIZE, lp_stack);
	main_proc = proc_current();
	proc_setPri(hp_proc, 2);
	proc_setPri(lp_proc, 1);
	proc_permit();

	while (1)
	{
		timer_delay(100);

		sig_send(lp_proc, SIG_USER0);
		sig_wait(SIG_USER0);

		#if CONFIG_USE_HP_TIMER
			kfile_printf(&out.fd,
				"Switch: %lu.%lu usec\n\r",
				hptime_to_us((end - start)),
				hptime_to_us((end - start) * 1000) % 1000);
		#endif
	}
}
Exemple #7
0
int main(void)
{
	struct netconn *server;

	/* Hardware initialization */
	init();

	proc_new(monitor_process, NULL, KERN_MINSTACKSIZE * 2, NULL);

	dhcp_start(&netif);
	/*
	 * Here we wait for an ip address, but it's not strictly
	 * necessary. The address is obtained in background and
	 * as long as we don't use network functions, we could go
	 * on with initialization
	 */
	while (!netif.ip_addr.addr)
		timer_delay(200);
	kprintf(">>> dhcp ok: ip = ip = %s (kernel %s)\n",
		ip_ntoa(&netif.ip_addr.addr),
		CONFIG_KERN_PREEMPT ? "preempt" : "coop");

	server = netconn_new(NETCONN_TCP);
	netconn_bind(server, IP_ADDR_ANY, 80);
	netconn_listen(server);

	while (1)
	{
		struct netconn *client;
		struct netbuf *rx_buf_conn;
		char *rx_buf;
		u16_t len;

		client = netconn_accept(server);
		if (!client)
			continue;

		tot_req++;
		rx_buf_conn = netconn_recv(client);
		if (rx_buf_conn)
		{
			netbuf_data(rx_buf_conn, (void **)&rx_buf, &len);
			if (rx_buf)
				netconn_write(client, rx_buf, len, NETCONN_COPY);
			netbuf_delete(rx_buf_conn);
		}
		while (netconn_delete(client) != ERR_OK)
			cpu_relax();
	}
}
Exemple #8
0
int main(void)
{
	IRQ_ENABLE;

	kdbg_init();
	LED_INIT();
	timer_init();
	proc_init();

	spi_dma_init(&spi);
	spi_dma_setclock(LCD_SPICLOCK);
	lcd_ili9225_init(&spi.fd);
	LCD_BACKLIGHT_INIT();
	lcd_setBacklight(lcd_brightness);

	gfx_bitmapInit(&lcd_bitmap, raster, LCD_WIDTH, LCD_HEIGHT);
	gfx_setFont(&lcd_bitmap, &font_luBS14);
	lcd_ili9225_blitBitmap(&lcd_bitmap);

	kbd_init();

	hp_proc = proc_new(hp_process, NULL, PROC_STACK_SIZE, hp_stack);
	lp_proc = proc_new(lp_process, NULL, PROC_STACK_SIZE, lp_stack);
	led_proc = proc_new(led_process, NULL, PROC_STACK_SIZE, led_stack);

	proc_setPri(hp_proc, 2);
	proc_setPri(lp_proc, 1);

	lcd_ili9225_blitBitmap24(0, 50, BMP_LOGO_WIDTH, BMP_LOGO_HEIGHT, bmp_logo);
	timer_delay(3000);

	while (1)
	{
		menu_handle(&main_menu);
		cpu_relax();
	}
}
Exemple #9
0
void cutoff_init(void)
{
	CUTOFF_INIT();
	config_register(&cutoff);
	config_load(&cutoff);
	uplink_registerCmd("cutoff", cutoff_procedure);
	uplink_registerCmd("cutoff_reset", cmd_cutoff_reset);
	uplink_registerCmd("cutoff_pause", cmd_cutoff_pause);
	uplink_registerCmd("cutoff_resume", cmd_cutoff_resume);

	#if !(ARCH & ARCH_UNITTEST)
		//start process
		LOG_INFO("Starting cutoff process\n");
		Process *p = proc_new(cutoff_process, NULL, KERN_MINSTACKSIZE * 6, NULL);
		ASSERT(p);
	#endif
}
Exemple #10
0
int main(void)
{
	/* Hardware initialization */
	init();

	/* Create a new child process */
	proc_new(led_process, NULL, KERN_MINSTACKSIZE * 2, NULL);

	/*
	 * The main process is kept to periodically report the stack
	 * utilization of all the processes (1 probe per second).
	 */
	while (1)
	{
		monitor_report();
		timer_delay(1000);
	}
}
Exemple #11
0
/** create a struct proc for each argv */
struct job *job_new(char ***argvp, int jid, struct redir *redir)
{
	struct job *j;
	struct proc **tail;

	j = umalloc(sizeof *j);
	memset(j, 0, sizeof *j);

	j->cmd    = ustrdup_argvp(argvp);
	j->argvp  = argvp;
	j->redir  = redir;
	j->job_id = jid;
	j->state  = JOB_BEGIN;

	tail = &j->proc;
	while(*argvp){
		*tail = proc_new(*argvp++);
		tail = &(*tail)->next;
	}
	*tail = NULL;
	return j;
}
Exemple #12
0
VALUE
rb_block_proc(void)
{
    return proc_new(rb_cProc, Qfalse);
}
Exemple #13
0
VALUE
rb_block_lambda(void)
{
    return proc_new(rb_cProc, Qtrue);
}
Exemple #14
0
/*===========================================================================*
 *				do_fork					     *
 *===========================================================================*/
PUBLIC int do_fork(message *msg)
{
  int r, proc, s, childproc, fullvm;
  struct vmproc *vmp, *vmc;
  pt_t origpt;
  vir_bytes msgaddr;

  SANITYCHECK(SCL_FUNCTIONS);

  if(vm_isokendpt(msg->VMF_ENDPOINT, &proc) != OK) {
	printf("VM: bogus endpoint VM_FORK %d\n", msg->VMF_ENDPOINT);
  SANITYCHECK(SCL_FUNCTIONS);
	return EINVAL;
  }

  childproc = msg->VMF_SLOTNO;
  if(childproc < 0 || childproc >= NR_PROCS) {
	printf("VM: bogus slotno VM_FORK %d\n", msg->VMF_SLOTNO);
  SANITYCHECK(SCL_FUNCTIONS);
	return EINVAL;
  }

  vmp = &vmproc[proc];		/* parent */
  vmc = &vmproc[childproc];	/* child */
  assert(vmc->vm_slot == childproc);

  if(vmp->vm_flags & VMF_HAS_DMA) {
	printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT);
	return EINVAL;
  }

  fullvm = vmp->vm_flags & VMF_HASPT;

  /* The child is basically a copy of the parent. */
  origpt = vmc->vm_pt;
  *vmc = *vmp;
  vmc->vm_slot = childproc;
  vmc->vm_regions = NULL;
  yielded_init(&vmc->vm_yielded_blocks);
  vmc->vm_endpoint = NONE;	/* In case someone tries to use it. */
  vmc->vm_pt = origpt;
  vmc->vm_flags &= ~VMF_HASPT;

#if VMSTATS
  vmc->vm_bytecopies = 0;
#endif

  if(pt_new(&vmc->vm_pt) != OK) {
	printf("VM: fork: pt_new failed\n");
	return ENOMEM;
  }

  vmc->vm_flags |= VMF_HASPT;

  if(fullvm) {
	SANITYCHECK(SCL_DETAIL);

	if(map_proc_copy(vmc, vmp) != OK) {
		printf("VM: fork: map_proc_copy failed\n");
		pt_free(&vmc->vm_pt);
		return(ENOMEM);
	}

	if(vmp->vm_heap) {
		vmc->vm_heap = map_region_lookup_tag(vmc, VRT_HEAP);
		assert(vmc->vm_heap);
	}

	SANITYCHECK(SCL_DETAIL);
  } else {
	vir_bytes sp;
	struct vir_region *heap, *stack;
	vir_bytes text_bytes, data_bytes, stack_bytes, parent_gap_bytes,
		child_gap_bytes;

	/* Get SP of new process (using parent). */
	if(get_stack_ptr(vmp->vm_endpoint, &sp) != OK) {
		printf("VM: fork: get_stack_ptr failed for %d\n",
			vmp->vm_endpoint);
		return ENOMEM;
	}

	/* Update size of stack segment using current SP. */
	if(adjust(vmp, vmp->vm_arch.vm_seg[D].mem_len, sp) != OK) {
		printf("VM: fork: adjust failed for %d\n",
			vmp->vm_endpoint);
		return ENOMEM;
	}

	/* Copy newly adjust()ed stack segment size to child. */
	vmc->vm_arch.vm_seg[S] = vmp->vm_arch.vm_seg[S];

        text_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[T].mem_len);
        data_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[D].mem_len);
	stack_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_len);

	/* how much space after break and before lower end (which is the
	 * logical top) of stack for the parent
	 */
	parent_gap_bytes = CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir -
		vmc->vm_arch.vm_seg[D].mem_len);

	/* how much space can the child stack grow downwards, below
	 * the current SP? The rest of the gap is available for the
	 * heap to grow upwards.
	 */
	child_gap_bytes = VM_PAGE_SIZE;

        if((r=proc_new(vmc, VM_PROCSTART,
		text_bytes, data_bytes, stack_bytes, child_gap_bytes, 0, 0,
		CLICK2ABS(vmc->vm_arch.vm_seg[S].mem_vir +
                        vmc->vm_arch.vm_seg[S].mem_len), 1)) != OK) {
			printf("VM: fork: proc_new failed\n");
			return r;
	}

	if(!(heap = map_region_lookup_tag(vmc, VRT_HEAP)))
		panic("couldn't lookup heap");
	assert(heap->phys);
	if(!(stack = map_region_lookup_tag(vmc, VRT_STACK)))
		panic("couldn't lookup stack");
	assert(stack->phys);

	/* Now copy the memory regions. */

	if(vmc->vm_arch.vm_seg[T].mem_len > 0) {
		struct vir_region *text;
		if(!(text = map_region_lookup_tag(vmc, VRT_TEXT)))
			panic("couldn't lookup text");
		assert(text->phys);
		if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
			text, 0, text_bytes) != OK)
				panic("couldn't copy text");
	}

	if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
		heap, 0, data_bytes) != OK)
			panic("couldn't copy heap");

	if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys +
			vmc->vm_arch.vm_seg[D].mem_len) + parent_gap_bytes,
			stack, child_gap_bytes, stack_bytes) != OK)
			panic("couldn't copy stack");
  }

  /* Only inherit these flags. */
  vmc->vm_flags &= (VMF_INUSE|VMF_SEPARATE|VMF_HASPT);

  /* inherit the priv call bitmaps */
  memcpy(&vmc->vm_call_mask, &vmp->vm_call_mask, sizeof(vmc->vm_call_mask));

  /* Tell kernel about the (now successful) FORK. */
  if((r=sys_fork(vmp->vm_endpoint, childproc,
	&vmc->vm_endpoint, vmc->vm_arch.vm_seg,
	PFF_VMINHIBIT, &msgaddr)) != OK) {
        panic("do_fork can't sys_fork: %d", r);
  }

  if(fullvm) {
	vir_bytes vir;
	/* making these messages writable is an optimisation
	 * and its return value needn't be checked.
	 */
	vir = arch_vir2map(vmc, msgaddr);
	handle_memory(vmc, vir, sizeof(message), 1);
	vir = arch_vir2map(vmp, msgaddr);
	handle_memory(vmp, vir, sizeof(message), 1);
  }

  if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
	panic("fork can't pt_bind: %d", r);

  /* Inform caller of new child endpoint. */
  msg->VMF_CHILD_ENDPOINT = vmc->vm_endpoint;

  SANITYCHECK(SCL_FUNCTIONS);
  return OK;
}
Exemple #15
0
static VALUE
rb_proc_s_new(VALUE klass)
{
    return proc_new(klass, Qfalse);
}
Exemple #16
0
processor_t
proc_new_from_other(processor_t other_proc)
{
  return proc_new(other_proc->stask.sync_data);
}
Exemple #17
0
/*===========================================================================*
 *				vm_init					     *
 *===========================================================================*/
static void vm_init(void)
{
    int s, i;
    int click, clicksforgotten = 0;
    struct memory mem_chunks[NR_MEMS];
    struct boot_image image[NR_BOOT_PROCS];
    struct boot_image *ip;
    phys_bytes limit = 0;

    /* The initrd is put right after boot image */
    if ((s = sys_getbootparam(&bootparam)) != 0) {
        panic("VM","Couldn't get boot parameters!",s);
    }

    /* get what setup found out */
    memcpy(mem_chunks, bootparam.nucleos_kludge.mem, sizeof(bootparam.nucleos_kludge.mem));

    /* Get chunks of available memory. */
    get_mem_chunks(mem_chunks);

    /* Initialize VM's process table. Request a copy of the system
     * image table that is defined at the kernel level to see which
     * slots to fill in.
     */
    if ((s=sys_getimage(image)) != 0)
        vm_panic("couldn't get image table: %d\n", s);

    /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
    memset(vmproc, 0, sizeof(vmproc));

    for(i = 0; i < ELEMENTS(vmproc); i++) {
        vmproc[i].vm_slot = i;
    }

    /* Walk through boot-time system processes that are alive
     * now and make valid slot entries for them.
     */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        phys_bytes proclimit;
        struct vmproc *vmp;

        if(ip->proc_nr >= NR_PROCS) {
            vm_panic("proc", ip->proc_nr);
        }
        if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

        /* Initialize normal process table slot or special SYSTEM
         * table slot. Kernel memory is already reserved.
         */
        GETVMP(vmp, ip->proc_nr);

        /* reset fields as if exited */
        clear_proc(vmp);

        /* Get memory map for this process from the kernel. */
        if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != 0)
            vm_panic("couldn't get process mem_map",s);

        /* Remove this memory from the free list. */
        reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

        /* Set memory limit. */
        proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
                              vmp->vm_arch.vm_seg[S].mem_len) - 1;

        if(proclimit > limit)
            limit = proclimit;

        vmp->vm_flags = VMF_INUSE;
        vmp->vm_endpoint = ip->endpoint;
        vmp->vm_stacktop =
            CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                      vmp->vm_arch.vm_seg[S].mem_len);
    }

#ifndef CONFIG_BUILTIN_INITRD
    /* Remove initrd memory from the free list. We must do it right after we
       have reserved memory for boot image otherwise it may happen that initrd
       will be overwritten by other process (in arch_init_vm).
     */
    if ((s = reserve_initrd_mem(mem_chunks, bootparam.hdr.ramdisk_image,
                                bootparam.hdr.ramdisk_size)) < 0) {
        panic("VM", "Couldn't reserve memory for initial ramdisk!", s);
    }
#endif
    /* Architecture-dependent initialization. */
    pt_init(limit);

    /* Initialize tables to all physical memory. */
    mem_init(mem_chunks);
    meminit_done = 1;

    /* Give these processes their own page table. */
    for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
        int s;
        struct vmproc *vmp;
        vir_bytes old_stacktop, old_stack;

        if(ip->proc_nr < 0) continue;

        GETVMP(vmp, ip->proc_nr);

        if(!(ip->flags & PROC_FULLVM))
            continue;

        old_stack =
            vmp->vm_arch.vm_seg[S].mem_vir +
            vmp->vm_arch.vm_seg[S].mem_len -
            vmp->vm_arch.vm_seg[D].mem_len;

        if(pt_new(&vmp->vm_pt) != 0)
            vm_panic("vm_init: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
        old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                                 vmp->vm_arch.vm_seg[S].mem_len);
        if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
                     VM_STACKTOP - old_stacktop) != 0) {
            vm_panic("VM: vmctl for new stack failed", NO_NUM);
        }

        FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
                 vmp->vm_arch.vm_seg[D].mem_len,
                 old_stack);

        if(proc_new(vmp,
                    VM_PROCSTART,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
                    BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
                              vmp->vm_arch.vm_seg[S].mem_len -
                              vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
                    CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
                    CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
                    VM_STACKTOP) != 0) {
            vm_panic("failed proc_new for boot process", NO_NUM);
        }
    }

    /* Set up table of calls. */
#define CALLMAP(code, func, thecaller) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= VM_NCALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
	if(((thecaller) < MINEPM || (thecaller) > MAXEPM) 		\
		&& (thecaller) != ANYEPM				\
		&& (thecaller) != NEEDACL ) {				\
		vm_panic(#thecaller " invalid", (code));  		\
	}								\
	vm_calls[i].vmc_callers |= EPM(thecaller);		      \
}

    /* Set call table to 0. This invalidates all calls (clear
     * vmc_func).
     */
    memset(vm_calls, 0, sizeof(vm_calls));

    /* Requests from PM (restricted to be from PM only). */
    CALLMAP(VM_EXIT, do_exit, PM_PROC_NR);
    CALLMAP(VM_FORK, do_fork, PM_PROC_NR);
    CALLMAP(VM_BRK, do_brk, PM_PROC_NR);
    CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem, PM_PROC_NR);
    CALLMAP(VM_PUSH_SIG, do_push_sig, PM_PROC_NR);
    CALLMAP(VM_WILLEXIT, do_willexit, PM_PROC_NR);
    CALLMAP(VM_ADDDMA, do_adddma, PM_PROC_NR);
    CALLMAP(VM_DELDMA, do_deldma, PM_PROC_NR);
    CALLMAP(VM_GETDMA, do_getdma, PM_PROC_NR);
    CALLMAP(VM_ALLOCMEM, do_allocmem, PM_PROC_NR);
    CALLMAP(VM_NOTIFY_SIG, do_notify_sig, PM_PROC_NR);

    /* Requests from RS */
    CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv, RS_PROC_NR);

    /* Requests from userland (source unrestricted). */
    CALLMAP(VM_MMAP, do_mmap, ANYEPM);
    CALLMAP(VM_MUNMAP, do_munmap, ANYEPM);
    CALLMAP(VM_MUNMAP_TEXT, do_munmap, ANYEPM);
    CALLMAP(VM_MAP_PHYS, do_map_phys, ANYEPM); /* Does its own checking. */
    CALLMAP(VM_UNMAP_PHYS, do_unmap_phys, ANYEPM);

    CALLMAP(NNR_VM_MMAP, scall_mmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP, scall_munmap, ANYEPM);
    CALLMAP(NNR_VM_MUNMAP_TEXT, scall_munmap, ANYEPM);

    /* Requests from userland (anyone can call but need an ACL bit). */
    CALLMAP(VM_REMAP, do_remap, NEEDACL);
    CALLMAP(VM_GETPHYS, do_get_phys, NEEDACL);
    CALLMAP(VM_SHM_UNMAP, do_shared_unmap, NEEDACL);
    CALLMAP(VM_GETREF, do_get_refcount, NEEDACL);
    CALLMAP(VM_CTL, do_ctl, NEEDACL);
    CALLMAP(VM_QUERY_EXIT, do_query_exit, NEEDACL);

    /* Sanity checks */
    if(find_kernel_top() >= VM_PROCSTART)
        vm_panic("kernel loaded too high", NO_NUM);

    /* Initialize the structures for queryexit */
    init_query_exit();

    /* Unmap our own low pages. */
    unmap_ok = 1;
    unmap_page_zero();
}
int	main(int argc, char** argv)
{
  struct sigaction	action;
  int			ch, i, count = 0;
  int			ret, passed = 0;
  addr_t		addr;

  memset(&tracee, 0, sizeof(tracee_t));

  /*
  ** Catch sigint & sigquit
  */
  action.sa_handler = quit_ftrace;
  action.sa_flags = SA_RESTART;
  if (sigaction(SIGQUIT, &action, NULL))
    errexit("ftrace: sigaction(SIGQUIT), %s", ftrace_strerror(errno));
  if (sigaction(SIGINT, &action, NULL))
    errexit("ftrace: sigaction(SIGKILL), %s", ftrace_strerror(errno));

  /*
  ** Count Args Before -p, -e or -c Option
  */
  for (i = 0; i < argc; i++)
    {
      if (!strcmp(argv[i], "-p") ||
	  !strcmp(argv[i], "-e") ||
	  !strcmp(argv[i], "-c")
	  )
	{
	  if (i != argc - 1)
	    argc = i + 2;
	  break;
	}
    }

  if (atexit(clean_ftrace) == -1)
    errexit("ftrace: atexit:, %s", ftrace_strerror(errno));

  for (count = 0; (ch = getopt(argc, argv,"hv:b:p:e:c:")) != -1; count++)
    {
      switch ((char) ch)
	{
	case 'b':
	  /*
	  ** Add Breakpoint
	  */
	  tracee.brkps_toset = list_add(tracee.brkps_toset, optarg);
	  if (tracee.brkps_toset == NULL)
	    errexit("ftrace: list_add:, %s", ftrace_strerror(errno));
	  break;
	case 'p':
	  passed = 1;
	  /*
	  ** Attach To Pid
	  */
	  tracee.proc = proc_new();
	  if (tracee.proc == NULL)
	    errexit("ftrace: proc_new:, %s", ftrace_strerror(errno));
	  if (proc_attach(tracee.proc, atoi(optarg)) == -1)
	    errexit("ftrace: proc_attach:, %s", ftrace_strerror(errno));
	  if (*(argv + i + 2))
	    tracee.proc->path = *(argv + i + 2);
	  else
	    tracee.proc->path = proc_get_bin_path(tracee.proc);
	  break;
	case 'e':
	  passed = 1;
	  /*
	  ** Exec Process
	  */
	  tracee.proc = proc_new();
	  if (tracee.proc == NULL)
	    errexit("ftrace: proc_new:, %s", ftrace_strerror(errno));
	  if (proc_create(tracee.proc, optarg, argv + i + 1) == -1)
	    errexit("ftrace: proc_create:, %s", ftrace_strerror(errno));
	  break;
	case 'c':
	  passed = 1;
	  /*
	  ** Backtrace Core
	  */
	  core_trace(optarg, *(argv + i + 2));
	  return (0);
	  break;
	case 'v':
	  verbose = atoi(optarg);
	  if (verbose > 0 && verbose < 5)
	    break;
	case 'h':
	default:
	  errexit(USAGE);
	  break;
	}
      if (passed == 1)
	break;
    }

  if (!passed)
    errexit(USAGE);

  /*
  ** Tracee Ready !
  */
  ret = init_loop(&tracee, &addr);
  if (tracer_quit)
    return (0);
  switch (ret)
    {
    case TRACEE_OK:
      if (tracing_loop(&tracee, addr) == -1)
	errexit("ftrace: tracing_loop:, %s", ftrace_strerror(errno));
      break;
    case TRACEE_EXIT:
      return (0);
    default:
      errexit("ftrace: init_loop:, %s", ftrace_strerror(errno));
    }

  return (0);
}
Exemple #19
0
/*===========================================================================*
 *				sef_cb_init_fresh			     *
 *===========================================================================*/
PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
{
/* Initialize the vm server. */
	int s, i;
	int click, clicksforgotten = 0;
	struct memory mem_chunks[NR_MEMS];
	struct boot_image image[NR_BOOT_PROCS];
	struct boot_image *ip;
	struct rprocpub rprocpub[NR_BOOT_PROCS];
	phys_bytes limit = 0;

#if SANITYCHECKS
	incheck = nocheck = 0;
	FIXME("VM SANITYCHECKS are on");
#endif

	vm_paged = 1;
	env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
#if SANITYCHECKS
	env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX);
#endif

	/* Get chunks of available memory. */
	get_mem_chunks(mem_chunks);

	/* Initialize VM's process table. Request a copy of the system
	 * image table that is defined at the kernel level to see which
	 * slots to fill in.
	 */
	if (OK != (s=sys_getimage(image)))
		vm_panic("couldn't get image table: %d\n", s);

	/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
	memset(vmproc, 0, sizeof(vmproc));

	for(i = 0; i < ELEMENTS(vmproc); i++) {
		vmproc[i].vm_slot = i;
	}

	/* Walk through boot-time system processes that are alive
	 * now and make valid slot entries for them.
	 */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		phys_bytes proclimit;
		struct vmproc *vmp;

		if(ip->proc_nr >= _NR_PROCS) { vm_panic("proc", ip->proc_nr); }
		if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;

#define GETVMP(v, nr)						\
		if(nr >= 0) {					\
			vmp = &vmproc[ip->proc_nr];		\
		} else if(nr == SYSTEM) {			\
			vmp = &vmproc[VMP_SYSTEM];		\
		} else {					\
			vm_panic("init: crazy proc_nr", nr);	\
		}

		/* Initialize normal process table slot or special SYSTEM
		 * table slot. Kernel memory is already reserved.
		 */
		GETVMP(vmp, ip->proc_nr);

		/* reset fields as if exited */
		clear_proc(vmp);

		/* Get memory map for this process from the kernel. */
		if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
			vm_panic("couldn't get process mem_map",s);

		/* Remove this memory from the free list. */
		reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);

		/* Set memory limit. */
		proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
			vmp->vm_arch.vm_seg[S].mem_len) - 1;

		if(proclimit > limit)
			limit = proclimit;

		vmp->vm_flags = VMF_INUSE;
		vmp->vm_endpoint = ip->endpoint;
		vmp->vm_stacktop =
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);

		if (vmp->vm_arch.vm_seg[T].mem_len != 0)
			vmp->vm_flags |= VMF_SEPARATE;
	}

	/* Architecture-dependent initialization. */
	pt_init(limit);

	/* Initialize tables to all physical memory. */
	mem_init(mem_chunks);
	meminit_done = 1;

	/* Give these processes their own page table. */
	for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
		int s;
		struct vmproc *vmp;
		vir_bytes old_stacktop, old_stack;

		if(ip->proc_nr < 0) continue;

		GETVMP(vmp, ip->proc_nr);

               if(!(ip->flags & PROC_FULLVM))
                       continue;

		old_stack = 
			vmp->vm_arch.vm_seg[S].mem_vir +
			vmp->vm_arch.vm_seg[S].mem_len - 
			vmp->vm_arch.vm_seg[D].mem_len;

        	if(pt_new(&vmp->vm_pt) != OK)
			vm_panic("VM: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
		old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len);
		if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
			VM_STACKTOP - old_stacktop) != OK) {
			vm_panic("VM: vmctl for new stack failed", NO_NUM);
		}

		FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys +
			vmp->vm_arch.vm_seg[D].mem_len,
			old_stack);

		if(proc_new(vmp,
			VM_PROCSTART,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_len),
			BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
				vmp->vm_arch.vm_seg[S].mem_len -
				vmp->vm_arch.vm_seg[D].mem_len) - BASICSTACK,
			CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
			CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
				VM_STACKTOP) != OK) {
			vm_panic("failed proc_new for boot process", NO_NUM);
		}
	}

	/* Set up table of calls. */
#define CALLMAP(code, func) { int i;			      \
	if((i=CALLNUMBER(code)) < 0) { vm_panic(#code " invalid", (code)); } \
	if(i >= NR_VM_CALLS) { vm_panic(#code " invalid", (code)); } \
	vm_calls[i].vmc_func = (func); 				      \
	vm_calls[i].vmc_name = #code; 				      \
}

	/* Set call table to 0. This invalidates all calls (clear
	 * vmc_func).
	 */
	memset(vm_calls, 0, sizeof(vm_calls));

	/* Basic VM calls. */
	CALLMAP(VM_MMAP, do_mmap);
	CALLMAP(VM_MUNMAP, do_munmap);
	CALLMAP(VM_MUNMAP_TEXT, do_munmap);
	CALLMAP(VM_MAP_PHYS, do_map_phys);
	CALLMAP(VM_UNMAP_PHYS, do_unmap_phys);

	/* Calls from PM. */
	CALLMAP(VM_EXIT, do_exit);
	CALLMAP(VM_FORK, do_fork);
	CALLMAP(VM_BRK, do_brk);
	CALLMAP(VM_EXEC_NEWMEM, do_exec_newmem);
	CALLMAP(VM_PUSH_SIG, do_push_sig);
	CALLMAP(VM_WILLEXIT, do_willexit);
	CALLMAP(VM_ADDDMA, do_adddma);
	CALLMAP(VM_DELDMA, do_deldma);
	CALLMAP(VM_GETDMA, do_getdma);
	CALLMAP(VM_NOTIFY_SIG, do_notify_sig);

	/* Calls from RS */
	CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv);

	/* Generic calls. */
	CALLMAP(VM_REMAP, do_remap);
	CALLMAP(VM_GETPHYS, do_get_phys);
	CALLMAP(VM_SHM_UNMAP, do_shared_unmap);
	CALLMAP(VM_GETREF, do_get_refcount);
	CALLMAP(VM_INFO, do_info);
	CALLMAP(VM_QUERY_EXIT, do_query_exit);

	/* Sanity checks */
	if(find_kernel_top() >= VM_PROCSTART)
		vm_panic("kernel loaded too high", NO_NUM);

	/* Initialize the structures for queryexit */
	init_query_exit();

	/* Unmap our own low pages. */
	unmap_ok = 1;
	_minix_unmapzero();

	/* Map all the services in the boot image. */
	if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
		(vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
		panic("VM", "sys_safecopyfrom failed", s);
	}
	for(i=0;i < NR_BOOT_PROCS;i++) {
		if(rprocpub[i].in_use) {
			if((s = map_service(&rprocpub[i])) != OK) {
				vm_panic("unable to map service", s);
			}
		}
	}

	return(OK);
}
Exemple #20
0
int main(int argc, char **argv) {
	/* SIMULATION VARIABLES */
	int n = 0;      // processes
	int cs_t = 13;  // context switch time (in ms)
	FILE *inf;      // input file (processes.txt)
	FILE *outf;   // output file (simout.txt)

	// recording variables
	float a_cpu_t = 0.0;  // average cpu burst time
	float a_turn_t = 0.0; // average turnaround time
	float a_wait_t = 0.0; // average wait time
	int total_cs = 0;     // total context switches
	int total_burst = 0;  // total cpu bursts
	int total_mem = 256;  // total virtual memory
	int total_t = 0;      // total time
	int total_d = 0;      // total defrag time
	int t_slice = 80;     // round robin time slice
	int t_memmove = 10;   // defragmentation memmove time

	if(argc != 1) {
		printf("There are no arguments associated with this simulation. Input is read from processes.txt.\n");
		return 1;
	}

	// priority queues and process
	priority_queue srtf = pq_new(n), srtn = pq_new(n), srtb = pq_new(n), rrf = pq_new(n), rrn = pq_new(n), rrb = pq_new(n);
	process *p1, *p2, *p3, *p4, *p5, *p6;

	/* read file */
	if((inf = fopen("processes.txt", "r")) == NULL) {
		perror("open() failed");
		return EXIT_FAILURE;
	}

	if((outf = fopen("simout.txt", "w")) == NULL) {
		perror("fopen() failed");
		return EXIT_FAILURE;
	}

	// get current line
	char *line = NULL;
	size_t nbytes = 0;

	while(getline(&line, &nbytes, inf) != -1) {
		// trim the string first
		char *newline;
		newline = trim(line);

		if(*newline != 0) {
			// get values
			char pn;
			int at, bt, bn, it, mem;
			sscanf(newline, "%c|%i|%i|%i|%i|%i", &pn, &at, &bt, &bn, &it, &mem);

			// allocate and init
			p1 = malloc(sizeof(process));
			p2 = malloc(sizeof(process));
			p3 = malloc(sizeof(process));
			p4 = malloc(sizeof(process));
			p5 = malloc(sizeof(process));
			p6 = malloc(sizeof(process));
			*p1 = proc_new(pn, at, bt, bn, it, 0, mem);
			*p2 = *p1;
			*p3 = *p1;
			*p4 = *p1;
			*p5 = *p1;
			*p6 = *p1;

			// calculate burst statistics
			a_cpu_t += bt * bn;
			total_burst += bn;

			// push to queues
			++n;
			pq_push(srtf, p1, at);
			pq_push(srtn, p2, at);
			pq_push(srtb, p3, at);
			pq_push(rrf, p2, at);
			pq_push(rrn, p5, at);
			pq_push(rrb, p6, at);
		}
	}

	// free memory
	free(line);

	// create virtual memory
	v_memory vm_ff = vm_new(total_mem, 'f');
	v_memory vm_nf = vm_new(total_mem, 'n');
	v_memory vm_bf = vm_new(total_mem, 'b');

	/* ===begin simulations=== */
	a_cpu_t /= total_burst;
	s_srt(srtf, vm_ff, n, cs_t, &a_wait_t, &total_cs, &total_t, &total_d, t_memmove);
	simout("SRT", "First-Fit", outf, &a_cpu_t, &a_turn_t, &a_wait_t, &total_cs, total_burst, total_t, total_d);
	printf("\n\n");
	s_srt(srtn, vm_nf, n, cs_t, &a_wait_t, &total_cs, &total_t, &total_d, t_memmove);
	simout("SRT", "First-Fit", outf, &a_cpu_t, &a_turn_t, &a_wait_t, &total_cs, total_burst, total_t, total_d);
	printf("\n\n");
	s_srt(srtb, vm_bf, n, cs_t, &a_wait_t, &total_cs, &total_t, &total_d, t_memmove);
	simout("SRT", "First-Fit", outf, &a_cpu_t, &a_turn_t, &a_wait_t, &total_cs, total_burst, total_t, total_d);
	printf("\n\n");
	s_rr(rrf, vm_ff, n, cs_t, &a_wait_t, &total_cs, &total_t, &total_d, t_memmove, t_slice);
	simout("RR", "First-Fit", outf, &a_cpu_t, &a_turn_t, &a_wait_t, &total_cs, total_burst, total_t, total_d);
	printf("\n\n");
	s_rr(rrn, vm_nf, n, cs_t, &a_wait_t, &total_cs, &total_t, &total_d, t_memmove, t_slice);
	simout("RR", "First-Fit", outf, &a_cpu_t, &a_turn_t, &a_wait_t, &total_cs, total_burst, total_t, total_d);
	printf("\n\n");
	s_rr(rrb, vm_bf, n, cs_t, &a_wait_t, &total_cs, &total_t, &total_d, t_memmove, t_slice);
	simout("RR", "First-Fit", outf, &a_cpu_t, &a_turn_t, &a_wait_t, &total_cs, total_burst, total_t, total_d);

	// close streams
	fclose(inf);
	fclose(outf);

	// free memory and exit gracefully
	vm_free(vm_ff);
	vm_free(vm_nf);
	vm_free(vm_bf);
	pq_free(srtf);
	pq_free(srtn);
	pq_free(srtb);
	pq_free(rrf);
	pq_free(rrn);
	pq_free(rrb);
	free(srtf);
	free(srtn);
	free(srtb);
	free(rrf);
	free(rrn);
	free(rrb);
	return EXIT_SUCCESS;
}