Esempio n. 1
0
uint32_t ipc_deliver(void *data)
{
	tcb_t *thr = NULL, *from_thr = NULL, *to_thr = NULL;
	l4_thread_t receiver;
	int i;

	for (i = 1; i < thread_count; ++i) {
		thr = thread_map[i];
		switch (thr->state) {
		case T_RECV_BLOCKED:
			if (thr->ipc_from != L4_NILTHREAD &&
				thr->ipc_from != L4_ANYTHREAD &&
				thr->ipc_from != TID_TO_GLOBALID(THREAD_INTERRUPT)) {
				from_thr = thread_by_globalid(thr->ipc_from);
				if (from_thr->state == T_SEND_BLOCKED)
					do_ipc(from_thr, thr);
			}
			break;
		case T_SEND_BLOCKED:
			receiver = thr->utcb->intended_receiver;
			if (receiver != L4_NILTHREAD && receiver != L4_ANYTHREAD) {
				to_thr = thread_by_globalid(receiver);
				if (to_thr->state == T_RECV_BLOCKED)
					do_ipc(thr, to_thr);
			}
			break;
		default:
			break;
		}
	}

	return 4096;
}
Esempio n. 2
0
void sys_ipc(uint32_t *param1)
{
	/* TODO: Checking of recv-mask */
	tcb_t *to_thr = NULL;
	l4_thread_t to_tid = param1[REG_R0], from_tid = param1[REG_R1];

	if (to_tid != L4_NILTHREAD) {
		to_thr = thread_by_globalid(to_tid);

		if (to_tid == TID_TO_GLOBALID(THREAD_LOG)) {
			user_log(caller);
			caller->state = T_RUNNABLE;
		} else if ((to_thr && to_thr->state == T_RECV_BLOCKED)
		           || to_tid == caller->t_globalid) {
			/* To thread who is waiting for us or sends to myself */
			do_ipc(caller, to_thr);
		} else if (to_thr && to_thr->state == T_INACTIVE &&
		           GLOBALID_TO_TID(to_thr->utcb->t_pager) == GLOBALID_TO_TID(caller->t_globalid)) {
			if (ipc_read_mr(caller, 0) == 0x00000003) {
				/* thread start protocol */

				memptr_t sp = ipc_read_mr(caller, 2);
				size_t stack_size = ipc_read_mr(caller, 3);

				dbg_printf(DL_IPC, "IPC: %t thread start\n", to_tid);

				to_thr->stack_base = sp - stack_size;
				to_thr->stack_size = stack_size;

				thread_init_ctx((void *) sp, (void *) ipc_read_mr(caller, 1), to_thr);
				caller->state = T_RUNNABLE;

				/* Start thread */
				to_thr->state = T_RUNNABLE;
			} else {
				do_ipc(caller, to_thr);
				to_thr->state = T_INACTIVE;
			}
		} else  {
			/* No waiting, block myself */
			caller->state = T_SEND_BLOCKED;
			caller->utcb->intended_receiver = to_tid;

			dbg_printf(DL_IPC, "IPC: %t sending\n", caller->t_globalid);

			return;
		}
	}

	if (from_tid != L4_NILTHREAD) {
		/* Only receive phases, simply lock myself */
		caller->state = T_RECV_BLOCKED;
		caller->ipc_from = from_tid;

		dbg_printf(DL_IPC, "IPC: %t receiving\n", caller->t_globalid);
	}
}
Esempio n. 3
0
PUBLIC void arch_do_syscall(struct proc *proc)
{
  /* do_ipc assumes that it's running because of the current process */
  assert(proc == get_cpulocal_var(proc_ptr));
  /* Make the system call, for real this time. */
  proc->p_reg.retreg =
	  do_ipc(proc->p_reg.cx, proc->p_reg.retreg, proc->p_reg.bx);
}
Esempio n. 4
0
void arch_do_syscall(struct proc *proc)
{
  /* do_ipc assumes that it's running because of the current process */
  assert(proc == get_cpulocal_var(proc_ptr));
  /* Make the system call, for real this time. */
  assert(proc->p_misc_flags & MF_SC_DEFER);
  proc->p_reg.retreg =
	  do_ipc(proc->p_defer.r1, proc->p_defer.r2, proc->p_defer.r3);
}
Esempio n. 5
0
uint32_t ipc_deliver(void* data) {
	tcb_t* thr = NULL, *from_thr = NULL;
	int i;

	for(i = 1; i < thread_count; ++i) {
		thr = thread_map[i];

		if(thr->state == T_RECV_BLOCKED && thr->ipc_from != L4_NILTHREAD) {
			from_thr = thread_by_globalid(thr->ipc_from);

			do_ipc(from_thr, thr);
		}
	}

	return 4096;
}
Esempio n. 6
0
void sys_ipc(uint32_t* param1) {
	/*TODO: Checking of recv-mask*/
	tcb_t *to_thr = NULL;
	l4_thread_t to_tid = param1[REG_R0], from_tid = param1[REG_R1];

	if(to_tid != L4_NILTHREAD) {
		to_thr =  thread_by_globalid(to_tid);

		if((to_thr && to_thr->state == T_RECV_BLOCKED)
				|| to_tid == caller->t_globalid ) {
			/* To thread who waiting us or send myself*/
			do_ipc(caller, to_thr);
		}
		else if(to_thr && to_thr->state == T_INACTIVE &&
				GLOBALID_TO_TID(to_thr->utcb->t_pager) == GLOBALID_TO_TID(caller->t_globalid)) {
			/* That is thread start protocol */

			dbg_printf(DL_IPC, "IPC: %t thread start\n", to_tid);

			thread_init_ctx((void*) ipc_read_mr(caller, 2),
							(void*) ipc_read_mr(caller, 1), to_thr);
			caller->state = T_RUNNABLE;

			/*Start thread*/
			to_thr->state = T_RUNNABLE;
		}
		else  {
			/*No waiting, block myself*/
			caller->state = T_SEND_BLOCKED;
			caller->utcb->intended_receiver = to_tid;

			dbg_printf(DL_IPC, "IPC: %t sending\n", caller->t_globalid);
		}
	}

	if(from_tid != L4_NILTHREAD) {
		/*Only receive phases, simply lock myself*/
		caller->state = T_RECV_BLOCKED;
		caller->ipc_from = from_tid;

		dbg_printf(DL_IPC, "IPC: %t receiving\n", caller->t_globalid);
	}
}
Esempio n. 7
0
void play_hmi (void * arg)
{
	int i;
	int pos = 0x308;
	int n_chunks = 0;
	int low_dtime;
	int low_chunk;
	int csec;
//	pid_t loc_pid;
	int qid;
	int ipc_read = 0;
	int k=0;
	
	struct msgbuf *rcv;
	
	Track_info *t_info;

	//printf ("play_hmi\n");//#########
	
	stop = 0;
	ipc_read=0;
//	loc_pid=fork();
    
/*	switch (loc_pid)
	{
	 case 0:
		break;
	 case -1:
		return -1;
	 default:
		atexit(kill_ipc);
		return loc_pid;
	}*/
	
//	signal(SIGTERM, my_quit);
	rcv=d_malloc(sizeof(long) + 16);
	
	rcv->mtype=1;
	rcv->mtext[0]='0';
	
	sleep(2);
	
	qid=msgget ((key_t) ('l'<<24) | ('d'<<16) | ('e'<<8) | 's', 0660);
	if(qid == -1)
	{	
		return;
	}
	
	do
	{
		ipc_read=do_ipc(qid,rcv,0);
	}
	while(rcv->mtext[0] != 'p');
	
	stop=0;
	rcv->mtext[0] = '0';
	
	seq_init();
	
	n_chunks=data[0x30];
	
	t_info = d_malloc(sizeof(Track_info)*n_chunks);
	
	while(1)
	{
		
		for(i=0;i<n_chunks;i++)
		{
			t_info[i].position = pos + 12;
			t_info[i].status = PLAYING;
			t_info[i].time = get_dtime(data,&t_info[i].position);
			pos += (( (0xff & data[pos + 5]) << 8 ) + (0xff & data[pos + 4]);
		}
		
		SEQ_START_TIMER();
		do
		{
			low_chunk = -1;
			k++;
			i=0;
			do
			{
				if (t_info[i].status == PLAYING)
				  low_chunk = i;
				i++;
			}
			while((low_chunk <=0) && (i<n_chunks);
			
			if (low_chunk == -1)
			  break;
			
			low_dtime = t_info[low_chunk].time;
			
			for(i=1;i<n_chunks;i++)
			{
				if ((t_info[i].time < low_dtime) && 
				    (t_info[i].status == PLAYING))
				{
					low_dtime = t_info[i].time;
					low_chunk = i;
				}
			}
			
			//if (low_dtime < 0)
			  //printf("Serious warning: d_time negative!!!!!!\n");
			
			csec = 0.86 * low_dtime;
			
			//flush sequencer buffer after 20 events
			if (k == 20)
			{
				ioctl(seqfd, SNDCTL_SEQ_SYNC);
				k = 0;
			}
			
			SEQ_WAIT_TIME(csec);
			
			t_info[low_chunk].status = do_track_event(data,&t_info[low_chunk].position);
			
			if (t_info[low_chunk].status == 3)
			{
				//printf("Error playing data in chunk %d\n",low_chunk);
				t_info[low_chunk].status = STOPPED;
			}
			
			if (t_info[low_chunk].status == PLAYING)
			  t_info[low_chunk].time += get_dtime(data,&t_info[low_chunk].position);
			
			//Check if the song has reached the end
			stop = t_info[0].status;
			for(i=1;i<n_chunks;i++)
			  stop &= t_info[i].status;
			
			if((do_ipc(qid,rcv,IPC_NOWAIT) > 0) && (rcv->mtext[0]=='p'))
			{
				n_chunks=data[0x30];
				t_info = realloc(t_info,sizeof(Track_info)*n_chunks);
				stop = 1;
				rcv->mtext[0] = '0';  
				stop_all();
			}
		}
		while(!stop);
		SEQ_STOP_TIMER();
		if( stop == 2)
		{
			stop_all();	    
			do
			{
				ipc_read=do_ipc(qid,rcv,0);
			}
			while(rcv->mtext[0] != 'p');
			rcv->mtext[0] = '0';
			n_chunks=data[0x30];
			t_info = realloc(t_info,sizeof(Track_info)*n_chunks);
			stop = 0;
		}
		pos=0x308;
	}
	d_free(data);
	d_free(t_info);
	d_free(rcv);
	
}
Esempio n. 8
0
static void do_service(config_t *cpe, config_t *config, struct rs_config *rs_config)
{
	struct rs_start *rs_start = &rs_config->rs_start;
	config_t *cp;

	/* At this point we expect one sublist that contains the varios
	 * resource allocations
	 */
	if (!(cpe->flags & CFG_SUBLIST))
	{
		fatal("do_service: expected list at %s:%d",
			cpe->file, cpe->line);
	}
	if (cpe->next != NULL)
	{
		cpe= cpe->next;
		fatal("do_service: expected end of list at %s:%d",
			cpe->file, cpe->line);
	}
	cpe= cpe->list;

	/* Process the list */
	for (cp= cpe; cp; cp= cp->next)
	{
		if (!(cp->flags & CFG_SUBLIST))
		{
			fatal("do_service: expected list at %s:%d",
				cp->file, cp->line);
		}
		cpe= cp->list;
		if ((cpe->flags & CFG_STRING) || (cpe->flags & CFG_SUBLIST))
		{
			fatal("do_service: expected word at %s:%d",
				cpe->file, cpe->line);
		}

		if (strcmp(cpe->word, KW_CLASS) == 0)
		{
			do_class(cpe->next, config, rs_config);
			continue;
		}
		if (strcmp(cpe->word, KW_UID) == 0)
		{
			do_uid(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_SIGMGR) == 0)
		{
			do_sigmgr(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_TYPE) == 0)
		{
			do_type(cpe->next, rs_config);
			continue;
		}
		if (strcmp(cpe->word, KW_DESCR) == 0)
		{
			do_descr(cpe->next, rs_config);
			continue;
		}
		if (strcmp(cpe->word, KW_SCHEDULER) == 0)
		{
			do_scheduler(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_PRIORITY) == 0)
		{
			do_priority(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_QUANTUM) == 0)
		{
			do_quantum(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_CPU) == 0)
		{
			do_cpu(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_IRQ) == 0)
		{
			do_irq(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_IO) == 0)
		{
			do_io(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_PCI) == 0)
		{
			do_pci(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_SYSTEM) == 0)
		{
			do_system(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_IPC) == 0)
		{
			do_ipc(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_VM) == 0)
		{
			do_vm(cpe->next, rs_start);
			continue;
		}
		if (strcmp(cpe->word, KW_CONTROL) == 0)
		{
			do_control(cpe->next, rs_start);
			continue;
		}
	}
}
Esempio n. 9
0
void *proc_main(void *ptr)
{
    (void)ptr;

    info("PROC Plugin thread created with task id %d", gettid());

    if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
        error("Cannot set pthread cancel type to DEFERRED.");

    if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
        error("Cannot set pthread cancel state to ENABLE.");

    // disable (by default) various interface that are not needed
    config_get_boolean("plugin:proc:/proc/net/dev:lo", "enabled", 0);
    config_get_boolean("plugin:proc:/proc/net/dev:fireqos_monitor", "enabled", 0);

    // when ZERO, attempt to do it
    int vdo_proc_net_dev            = !config_get_boolean("plugin:proc", "/proc/net/dev", 1);
    int vdo_proc_diskstats          = !config_get_boolean("plugin:proc", "/proc/diskstats", 1);
    int vdo_proc_net_snmp           = !config_get_boolean("plugin:proc", "/proc/net/snmp", 1);
    int vdo_proc_net_snmp6          = !config_get_boolean("plugin:proc", "/proc/net/snmp6", 1);
    int vdo_proc_net_netstat        = !config_get_boolean("plugin:proc", "/proc/net/netstat", 1);
    int vdo_proc_net_stat_conntrack = !config_get_boolean("plugin:proc", "/proc/net/stat/conntrack", 1);
    int vdo_proc_net_ip_vs_stats    = !config_get_boolean("plugin:proc", "/proc/net/ip_vs/stats", 1);
    int vdo_proc_net_stat_synproxy  = !config_get_boolean("plugin:proc", "/proc/net/stat/synproxy", 1);
    int vdo_proc_stat               = !config_get_boolean("plugin:proc", "/proc/stat", 1);
    int vdo_proc_meminfo            = !config_get_boolean("plugin:proc", "/proc/meminfo", 1);
    int vdo_proc_vmstat             = !config_get_boolean("plugin:proc", "/proc/vmstat", 1);
    int vdo_proc_net_rpc_nfs        = !config_get_boolean("plugin:proc", "/proc/net/rpc/nfs", 1);
    int vdo_proc_net_rpc_nfsd       = !config_get_boolean("plugin:proc", "/proc/net/rpc/nfsd", 1);
    int vdo_proc_sys_kernel_random_entropy_avail    = !config_get_boolean("plugin:proc", "/proc/sys/kernel/random/entropy_avail", 1);
    int vdo_proc_interrupts         = !config_get_boolean("plugin:proc", "/proc/interrupts", 1);
    int vdo_proc_softirqs           = !config_get_boolean("plugin:proc", "/proc/softirqs", 1);
    int vdo_proc_net_softnet_stat   = !config_get_boolean("plugin:proc", "/proc/net/softnet_stat", 1);
    int vdo_proc_loadavg            = !config_get_boolean("plugin:proc", "/proc/loadavg", 1);
    int vdo_ipc                     = !config_get_boolean("plugin:proc", "ipc", 1);
    int vdo_sys_kernel_mm_ksm       = !config_get_boolean("plugin:proc", "/sys/kernel/mm/ksm", 1);
    int vdo_cpu_netdata             = !config_get_boolean("plugin:proc", "netdata server resources", 1);

    // keep track of the time each module was called
    unsigned long long sutime_proc_net_dev = 0ULL;
    unsigned long long sutime_proc_diskstats = 0ULL;
    unsigned long long sutime_proc_net_snmp = 0ULL;
    unsigned long long sutime_proc_net_snmp6 = 0ULL;
    unsigned long long sutime_proc_net_netstat = 0ULL;
    unsigned long long sutime_proc_net_stat_conntrack = 0ULL;
    unsigned long long sutime_proc_net_ip_vs_stats = 0ULL;
    unsigned long long sutime_proc_net_stat_synproxy = 0ULL;
    unsigned long long sutime_proc_stat = 0ULL;
    unsigned long long sutime_proc_meminfo = 0ULL;
    unsigned long long sutime_proc_vmstat = 0ULL;
    unsigned long long sutime_proc_net_rpc_nfs = 0ULL;
    unsigned long long sutime_proc_net_rpc_nfsd = 0ULL;
    unsigned long long sutime_proc_sys_kernel_random_entropy_avail = 0ULL;
    unsigned long long sutime_proc_interrupts = 0ULL;
    unsigned long long sutime_proc_softirqs = 0ULL;
    unsigned long long sutime_proc_net_softnet_stat = 0ULL;
    unsigned long long sutime_proc_loadavg = 0ULL;
    unsigned long long sutime_ipc = 0ULL;
    unsigned long long sutime_sys_kernel_mm_ksm = 0ULL;

    unsigned long long step = rrd_update_every * 1000000ULL;
    for(;;) {
        unsigned long long now = time_usec();
        unsigned long long next = now - (now % step) + step;

        while(now < next) {
            sleep_usec(next - now);
            now = time_usec();
        }

        if(unlikely(netdata_exit)) break;

        // BEGIN -- the job to be done

        if(!vdo_sys_kernel_mm_ksm) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_sys_kernel_mm_ksm().");

            now = time_usec();
            vdo_sys_kernel_mm_ksm = do_sys_kernel_mm_ksm(rrd_update_every, (sutime_sys_kernel_mm_ksm > 0)?now - sutime_sys_kernel_mm_ksm:0ULL);
            sutime_sys_kernel_mm_ksm = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_loadavg) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_loadavg().");
            now = time_usec();
            vdo_proc_loadavg = do_proc_loadavg(rrd_update_every, (sutime_proc_loadavg > 0)?now - sutime_proc_loadavg:0ULL);
            sutime_proc_loadavg = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_ipc) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_ipc().");
            now = time_usec();
            vdo_ipc = do_ipc(rrd_update_every, (sutime_ipc > 0)?now - sutime_ipc:0ULL);
            sutime_ipc = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_interrupts) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_interrupts().");
            now = time_usec();
            vdo_proc_interrupts = do_proc_interrupts(rrd_update_every, (sutime_proc_interrupts > 0)?now - sutime_proc_interrupts:0ULL);
            sutime_proc_interrupts = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_softirqs) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_softirqs().");
            now = time_usec();
            vdo_proc_softirqs = do_proc_softirqs(rrd_update_every, (sutime_proc_softirqs > 0)?now - sutime_proc_softirqs:0ULL);
            sutime_proc_softirqs = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_softnet_stat) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_net_softnet_stat().");
            now = time_usec();
            vdo_proc_net_softnet_stat = do_proc_net_softnet_stat(rrd_update_every, (sutime_proc_net_softnet_stat > 0)?now - sutime_proc_net_softnet_stat:0ULL);
            sutime_proc_net_softnet_stat = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_sys_kernel_random_entropy_avail) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_sys_kernel_random_entropy_avail().");
            now = time_usec();
            vdo_proc_sys_kernel_random_entropy_avail = do_proc_sys_kernel_random_entropy_avail(rrd_update_every, (sutime_proc_sys_kernel_random_entropy_avail > 0)?now - sutime_proc_sys_kernel_random_entropy_avail:0ULL);
            sutime_proc_sys_kernel_random_entropy_avail = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_dev) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_net_dev().");
            now = time_usec();
            vdo_proc_net_dev = do_proc_net_dev(rrd_update_every, (sutime_proc_net_dev > 0)?now - sutime_proc_net_dev:0ULL);
            sutime_proc_net_dev = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_diskstats) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_diskstats().");
            now = time_usec();
            vdo_proc_diskstats = do_proc_diskstats(rrd_update_every, (sutime_proc_diskstats > 0)?now - sutime_proc_diskstats:0ULL);
            sutime_proc_diskstats = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_snmp) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_net_snmp().");
            now = time_usec();
            vdo_proc_net_snmp = do_proc_net_snmp(rrd_update_every, (sutime_proc_net_snmp > 0)?now - sutime_proc_net_snmp:0ULL);
            sutime_proc_net_snmp = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_snmp6) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_net_snmp6().");
            now = time_usec();
            vdo_proc_net_snmp6 = do_proc_net_snmp6(rrd_update_every, (sutime_proc_net_snmp6 > 0)?now - sutime_proc_net_snmp6:0ULL);
            sutime_proc_net_snmp6 = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_netstat) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_net_netstat().");
            now = time_usec();
            vdo_proc_net_netstat = do_proc_net_netstat(rrd_update_every, (sutime_proc_net_netstat > 0)?now - sutime_proc_net_netstat:0ULL);
            sutime_proc_net_netstat = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_stat_conntrack) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_net_stat_conntrack().");
            now = time_usec();
            vdo_proc_net_stat_conntrack = do_proc_net_stat_conntrack(rrd_update_every, (sutime_proc_net_stat_conntrack > 0)?now - sutime_proc_net_stat_conntrack:0ULL);
            sutime_proc_net_stat_conntrack = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_ip_vs_stats) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling vdo_proc_net_ip_vs_stats().");
            now = time_usec();
            vdo_proc_net_ip_vs_stats = do_proc_net_ip_vs_stats(rrd_update_every, (sutime_proc_net_ip_vs_stats > 0)?now - sutime_proc_net_ip_vs_stats:0ULL);
            sutime_proc_net_ip_vs_stats = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_stat_synproxy) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling vdo_proc_net_stat_synproxy().");
            now = time_usec();
            vdo_proc_net_stat_synproxy = do_proc_net_stat_synproxy(rrd_update_every, (sutime_proc_net_stat_synproxy > 0)?now - sutime_proc_net_stat_synproxy:0ULL);
            sutime_proc_net_stat_synproxy = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_stat) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_stat().");
            now = time_usec();
            vdo_proc_stat = do_proc_stat(rrd_update_every, (sutime_proc_stat > 0)?now - sutime_proc_stat:0ULL);
            sutime_proc_stat = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_meminfo) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling vdo_proc_meminfo().");
            now = time_usec();
            vdo_proc_meminfo = do_proc_meminfo(rrd_update_every, (sutime_proc_meminfo > 0)?now - sutime_proc_meminfo:0ULL);
            sutime_proc_meminfo = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_vmstat) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling vdo_proc_vmstat().");
            now = time_usec();
            vdo_proc_vmstat = do_proc_vmstat(rrd_update_every, (sutime_proc_vmstat > 0)?now - sutime_proc_vmstat:0ULL);
            sutime_proc_vmstat = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_rpc_nfsd) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_net_rpc_nfsd().");
            now = time_usec();
            vdo_proc_net_rpc_nfsd = do_proc_net_rpc_nfsd(rrd_update_every, (sutime_proc_net_rpc_nfsd > 0)?now - sutime_proc_net_rpc_nfsd:0ULL);
            sutime_proc_net_rpc_nfsd = now;
        }
        if(unlikely(netdata_exit)) break;

        if(!vdo_proc_net_rpc_nfs) {
            debug(D_PROCNETDEV_LOOP, "PROCNETDEV: calling do_proc_net_rpc_nfs().");
            now = time_usec();
            vdo_proc_net_rpc_nfs = do_proc_net_rpc_nfs(rrd_update_every, (sutime_proc_net_rpc_nfs > 0)?now - sutime_proc_net_rpc_nfs:0ULL);
            sutime_proc_net_rpc_nfs = now;
        }
        if(unlikely(netdata_exit)) break;

        // END -- the job is done

        // --------------------------------------------------------------------

        if(!vdo_cpu_netdata) {
            global_statistics_charts();
            registry_statistics();
        }
    }

    info("PROC thread exiting");

    pthread_exit(NULL);
    return NULL;
}
Esempio n. 10
0
void play_hmi (void * arg)
{
	int i;
	int pos = 0x308;
	int n_chunks = 0;
	int low_dtime;
	int low_chunk;
	int csec, lcsec;
	int qid;
	int ipc_read = 0;
	int k=0;

	struct msgbuf *rcv;

	Track_info *t_info;

	con_printf(CON_DEBUG,"play_hmi\n");

	stop = 0;
	ipc_read=0;

	rcv=malloc(sizeof(long) + 16);

	rcv->mtype=1;
	rcv->mtext[0]='0';

	qid=msgget ((key_t) ('l'<<24) | ('d'<<16) | ('e'<<8) | 's', 0660);
	if(qid == -1)
	{
		return;
	}

	do
	{
		ipc_read=do_ipc(qid,rcv,0);
	}
	while(rcv->mtext[0] != 'p');

	stop=0;
	rcv->mtext[0] = '0';

	seq_init();

	n_chunks=data[0x30];

	t_info = malloc(sizeof(Track_info)*n_chunks);

	while(1)
	{

		for(i=0;i<n_chunks;i++)
		{
			t_info[i].position = pos + 12;
			t_info[i].status = PLAYING;
			t_info[i].time = get_dtime(data,&t_info[i].position);
			pos += (( (0xff & data[pos + 5]) << 8 ) + (0xff & data[pos + 4]));
		}

		lcsec = 0;

		SEQ_START_TIMER();
		do
		{
			low_chunk = -1;
			k++;
			i=0;
			do
			{
				if (t_info[i].status == PLAYING)
				  low_chunk = i;
				i++;
			}
			while((low_chunk <=0) && (i<n_chunks));

			if (low_chunk == -1)
			  break;

			low_dtime = t_info[low_chunk].time;

			for(i=1;i<n_chunks;i++)
			{
				if ((t_info[i].time < low_dtime) &&
				    (t_info[i].status == PLAYING))
				{
					low_dtime = t_info[i].time;
					low_chunk = i;
				}
			}

			if (low_dtime < 0)
			  con_printf(CON_URGENT,"Serious warning: d_time negative!!!!!!\n");

			csec = 0.86 * low_dtime;

			//flush sequencer buffer after 20 events
			if (k == 20)
			{
				ioctl(seqfd, SNDCTL_SEQ_SYNC);
				k = 0;
			}

#ifdef WANT_AWE32
			cut_trough();
#endif

			if (csec != lcsec) {
				SEQ_WAIT_TIME(csec);
			}

			lcsec = csec;

			t_info[low_chunk].status = do_track_event(data,&t_info[low_chunk].position);

			if (t_info[low_chunk].status == 3)
			{
				con_printf(CON_URGENT,"Error playing data in chunk %d\n",low_chunk);
				t_info[low_chunk].status = STOPPED;
			}

			if (t_info[low_chunk].status == PLAYING)
			  t_info[low_chunk].time += get_dtime(data,&t_info[low_chunk].position);

			//Check if the song has reached the end
			stop = t_info[0].status;
			for(i=1;i<n_chunks;i++)
			  stop &= t_info[i].status;

			if((do_ipc(qid,rcv,IPC_NOWAIT) > 0) && (rcv->mtext[0]=='p'))
			{
				n_chunks=data[0x30];
				t_info = realloc(t_info,sizeof(Track_info)*n_chunks);
				stop = 1;
				rcv->mtext[0] = '0';
				stop_all();
			}
		}
		while(!stop);
		SEQ_STOP_TIMER();
		if( stop == 2)
		{
			stop_all();
			do
			{
				ipc_read=do_ipc(qid,rcv,0);
			}
			while(rcv->mtext[0] != 'p');
			rcv->mtext[0] = '0';
			n_chunks=data[0x30];
			t_info = realloc(t_info,sizeof(Track_info)*n_chunks);
			stop = 0;
		}
		pos=0x308;
	}
	free(data);
	free(t_info);
	free(rcv);

}
Esempio n. 11
0
void sys_ipc(uint32_t *param1)
{
	/* TODO: Checking of recv-mask */
	tcb_t *to_thr = NULL;
	l4_thread_t to_tid = param1[REG_R0], from_tid = param1[REG_R1];
	uint32_t timeout = param1[REG_R2];

	if (to_tid == L4_NILTHREAD &&
		from_tid == L4_NILTHREAD) {
		caller->state = T_INACTIVE;
		if (timeout)
			sys_ipc_timeout(timeout);
		return;
	}

	if (to_tid != L4_NILTHREAD) {
		to_thr = thread_by_globalid(to_tid);

		if (to_tid == TID_TO_GLOBALID(THREAD_LOG)) {
			user_log(caller);
			caller->state = T_RUNNABLE;
			return;
		} else if (to_tid == TID_TO_GLOBALID(THREAD_IRQ_REQUEST)) {
			user_interrupt_config(caller);
			caller->state = T_RUNNABLE;
			return;
		} else if ((to_thr && to_thr->state == T_RECV_BLOCKED)
		           || to_tid == caller->t_globalid) {
			/* To thread who is waiting for us or sends to myself */
			do_ipc(caller, to_thr);
			return;
		} else if (to_thr && to_thr->state == T_INACTIVE &&
		           GLOBALID_TO_TID(to_thr->utcb->t_pager) == GLOBALID_TO_TID(caller->t_globalid)) {
			if (ipc_read_mr(caller, 0) == 0x00000005) {
				/* mr1: thread func, mr2: stack addr, mr3: stack size*/
				/* mr4: thread entry, mr5: thread args */
				/* thread start protocol */

				memptr_t sp = ipc_read_mr(caller, 2);
				size_t stack_size = ipc_read_mr(caller, 3);
				uint32_t regs[4];	/* r0, r1, r2, r3 */

				dbg_printf(DL_IPC, "IPC: %t thread start\n", to_tid);

				to_thr->stack_base = sp - stack_size;
				to_thr->stack_size = stack_size;

				regs[REG_R0] = (uint32_t)&kip;
				regs[REG_R1] = (uint32_t)to_thr->utcb;
				regs[REG_R2] = ipc_read_mr(caller, 4);
				regs[REG_R3] = ipc_read_mr(caller, 5);
				thread_init_ctx((void *) sp, (void *) ipc_read_mr(caller, 1),
				                regs, to_thr);

				caller->state = T_RUNNABLE;

				/* Start thread */
				to_thr->state = T_RUNNABLE;

				return;
			} else {
				do_ipc(caller, to_thr);
				to_thr->state = T_INACTIVE;

				return;
			}
		} else  {
			/* No waiting, block myself */
			caller->state = T_SEND_BLOCKED;
			caller->utcb->intended_receiver = to_tid;
			dbg_printf(DL_IPC, "IPC: %t sending\n", caller->t_globalid);

			if (timeout)
				sys_ipc_timeout(timeout);

			return;
		}
	}

	if (from_tid != L4_NILTHREAD) {
		tcb_t *thr = NULL;

		if (from_tid == L4_ANYTHREAD) {
			int i;
			/* Find out if there is any sending thread waiting for caller */
			for (i = 1; i < thread_count; ++i) {
				thr = thread_map[i];
				if (thr->state == T_SEND_BLOCKED &&
				    thr->utcb->intended_receiver == caller->t_globalid) {
					do_ipc(thr, caller);
					return;
				}
			}
		} else if (from_tid != TID_TO_GLOBALID(THREAD_INTERRUPT)) {
			thr = thread_by_globalid(from_tid);

			if (thr->state == T_SEND_BLOCKED &&
			    thr->utcb->intended_receiver == caller->t_globalid) {
				do_ipc(thr, caller);
				return;
			}
		}

		/* Only receive phases, simply lock myself */
		caller->state = T_RECV_BLOCKED;
		caller->ipc_from = from_tid;

		if (from_tid == TID_TO_GLOBALID(THREAD_INTERRUPT)) {
			/* Threaded interrupt is ready */
			user_interrupt_handler_update(caller);
		}

		if (timeout)
			sys_ipc_timeout(timeout);

		dbg_printf(DL_IPC, "IPC: %t receiving\n", caller->t_globalid);

		return;
	}

	caller->state = T_SEND_BLOCKED;
}
Esempio n. 12
0
PRIVATE inline
L4_msg_tag
Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
{
  if (this != current() || !(state() & Thread_vcpu_enabled))
    return commit_result(-L4_err::EInval);

  Space *s = space();
  Vcpu_state *vcpu = vcpu_state().access(true);

  L4_obj_ref user_task = vcpu->user_task;
  if (user_task.valid())
    {
      L4_fpage::Rights task_rights = L4_fpage::Rights(0);
      Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
                                                         &task_rights));

      if (EXPECT_FALSE(task && !(task_rights & L4_fpage::Rights::W())))
        return commit_result(-L4_err::EPerm);

      if (task != vcpu_user_space())
        vcpu_set_user_space(task);

      vcpu->user_task = L4_obj_ref();
    }
  else if (user_task.op() == L4_obj_ref::Ipc_reply)
    vcpu_set_user_space(0);

  L4_snd_item_iter snd_items(utcb, tag.words());
  int items = tag.items();
  if (vcpu_user_space())
    for (; items && snd_items.more(); --items)
      {
        if (EXPECT_FALSE(!snd_items.next()))
          break;

        Lock_guard<Lock> guard;
        if (!guard.check_and_lock(&static_cast<Task *>(vcpu_user_space())->existence_lock))
          return commit_result(-L4_err::ENoent);

        cpu_lock.clear();

        L4_snd_item_iter::Item const *const item = snd_items.get();
        L4_fpage sfp(item->d);

        Reap_list rl;
        L4_error err = fpage_map(space(), sfp,
                                 vcpu_user_space(), L4_fpage::all_spaces(),
                                 item->b, &rl);
        rl.del();

        cpu_lock.lock();

        if (EXPECT_FALSE(!err.ok()))
          return commit_error(utcb, err);
      }

  if ((vcpu->_saved_state & Vcpu_state::F_irqs)
      && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
    {
      assert_kdb(cpu_lock.test());
      do_ipc(L4_msg_tag(), 0, 0, true, 0,
	     L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
	     &vcpu->_ipc_regs, L4_fpage::Rights::FULL());

      vcpu = vcpu_state().access(true);

      if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error()
	              || this->utcb().access(true)->error.error() == L4_error::R_timeout))
	{
	  vcpu->_ts.set_ipc_upcall();

	  Address sp;

	  // tried to resume to user mode, so an IRQ enters from user mode
	  if (vcpu->_saved_state & Vcpu_state::F_user_mode)
            sp = vcpu->_entry_sp;
	  else
            sp = vcpu->_ts.sp();

          arch_load_vcpu_kern_state(vcpu, true);

	  LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
	      l->type = 4;
	      l->state = vcpu->state;
	      l->ip = vcpu->_entry_ip;
	      l->sp = sp;
	      l->space = static_cast<Task*>(_space.vcpu_aware())->dbg_id();
	      );