Esempio n. 1
0
int syscall_handler(int num, struct syscall_arguments *args)
{
    interrupt_unmask();
    
    switch (num)
    {
        case ADDPROCESS:
            interrupt_mask();
                process_add((void (*)())args->arg1);
            interrupt_unmask();
        break;
        case YIELD:
            interrupt_sleepinsyscall();
        break;
        
        case UDPSEND:
            return udp_output(args->arg1, (struct sockaddr*)args->arg2, 
                (char *)args->arg3, args->arg4);
        break;
        case UDPRECV:
            return udp_recvfrom(args->arg1, (struct sockaddr*)args->arg2, 
                (char *)args->arg3, args->arg4);
        break;
        case UDPSOCKET: 
            return udp_socket();
        break;
        case UDPBIND:
            return udp_bind(args->arg1, (struct sockaddr*)args->arg2);
        break;
        case UDPCLOSE:
            return udp_close(args->arg1);
        break;
        
        case TCPCONNECT:
            return tcp_connect(args->arg1, (struct sockaddr*)args->arg2);
        break;
        
        case TCPSEND:
            return tcp_send(args->arg1, (char *)args->arg2, args->arg3);
        break;
        case TCPRECV:
            return tcp_recv(args->arg1, (char *)args->arg2, args->arg3);
        break;
        case TCPSOCKET: 
            return tcp_socket();
        break;
        case TCPBIND:
            return tcp_bind(args->arg1, (struct sockaddr*)args->arg2);
        break;
        case TCPCLOSE:
            return tcp_close(args->arg1);
        break;
        case TCPLISTEN:
            return tcp_listen(args->arg1);
        break;
    }
    
    return 0;
}
Esempio n. 2
0
void interrupt_unregister(int irq)
{
    if (irq < 0 || irq > MAX_IRQ_NUMBER)
        return;

    interrupt_mask(irq);

    interrupt_entries[irq].type = INTERRUPT_NONE;
}
int
aipc_ISR( u32_t irq , void *data )
{
	int n = MAX_HANDLE_CNT;
	u32_t fid;

	interrupt_mask( irq );
	deassert( AIPC_CPU_INT_IP );

	while(1) {
		if( !aipc_int_cpu_hiq_empty() ){	//handle elements in hi queue
			for( ; n>0 ; n-- )
			{
				if( !aipc_int_cpu_hiq_empty() ){  //For hi queue	
					//Get element from hi queue
					if( OK == aipc_int_cpu_hiq_dequeue( &fid ))
						;//aipc_exec_callback( fid , data );  //Run callback
					else
						break;
				}
			}
		}
	
		if( !aipc_int_cpu_lowq_empty() ){	//handle elements in low queue
			for( ; n>0 ; n-- )
			{
				if( !aipc_int_cpu_lowq_empty() ){  //For low queue	
					//Get element from low queue
					if( OK == aipc_int_cpu_lowq_dequeue( &fid ) )
						;//aipc_exec_callback( fid , data );  //Run callback
					else
						break;
				}
			}
		}

		if( read_register( AIPC_CPU_INT_IP ) )
			deassert( AIPC_CPU_INT_IP );
		else
			break;
	}

	interrupt_unmask( irq );

	return IRQ_HANDLED;
}
Esempio n. 4
0
//
// This routine is called from an interrupt handler.
// If it is safe, we do the work immediately.
// Otherwise we queue the event which will be acted
// on at the first safe moment. Probably when we are
// about to leave the microkernel.
//
// The low bit of *thp may be set to indicate intrevent_add was called from an IO interrupt. (Yes, it's ugly.)
// use kermacros.c:  AP_INTREVENT_FROM_IO(thp), which sets the low bit, to pass the thp parm, if you're calling
// intrevent_add from IO.
int intrevent_add_attr
intrevent_add(const struct sigevent *evp, THREAD *thp, INTERRUPT *isr) {
	INTREVENT		*irp;
	int		 		prio;
	struct sigevent	ev_copy;

	//make events sent from IO interrupt critical, but dont clobber the user's sigevent
	if(((uintptr_t)(thp)) & AP_INTREVENT_FROM_IO_FLAG) {
		thp = (THREAD*) ((uintptr_t)(thp) & ~AP_INTREVENT_FROM_IO_FLAG);
		if(intrs_aps_critical) {
			ev_copy = *evp;
			SIGEV_MAKE_CRITICAL(&ev_copy);
			evp = &ev_copy;
		}
	};

#if !defined(VARIANT_smp)	/* PDB: condition is true */
	//
	// If we were in user mode before, we can deliver the event right away.
	// We can't, however, allocate any memory. We might need to allocate
	// up to two pulses - one for the actual event being delivered, and
	// one for an UNBLOCK pulse to be delivered to the server that the first
	// thread is replied blocked on. We check for 4 available entries as
	// an extra security blanket.
	//
	if((get_inkernel() == 1) && ((pulse_souls.total - pulse_souls.used) >= 4)) {
		int				status;

		// Probably safe to handle it right away (SIGEV_THREAD is a problem).
		if((status = sigevent_exe(evp, thp, 1)) >= 0) {
			return(status);
		}
	}
#endif

	for( ;; ) {
		INTR_LOCK(&intrevent_lock);

		// get a free event from the list and fill it in
		irp = intrevent_free;
		if(irp != NULL) break;
		if(ker_verbose >= 2) {
			DebugKDBreak();
		}
		INTR_UNLOCK(&intrevent_lock);
		if(!intrevent_error(thp, isr)) {
			return(-1);
		}
	}
	intrevent_free = irp->next;

	irp->thread = thp;
	irp->event = *evp;

	// Keep track of the maximum queued event priority for pre-emption.
	if(--num_pev_free <= num_pev_trigger) {
		if(drain_active) {
			int		level =	isr->level;

			// We're trying to drain the queue, don't let this interrupt
			// happen again until we're finished
			INTR_UNLOCK(&intrevent_lock);
			(void) interrupt_mask(level, NULL);
			INTR_LOCK(&intrevent_lock);
			interrupt_level[level].config |= INTERNAL_CONFIG_FLAG_MASKED;
			if(drain_last_mask < level) drain_last_mask = level;
		}
		// If we are running low on free pending event structures, try to
		// preempt sooner to drain the queue....
		prio = NUM_PRI;
	} else if(nopreempt) {
		prio = 0;
	} else if(SIGEV_GET_TYPE(evp) == SIGEV_PULSE && evp->sigev_priority >= 0) {
		prio = evp->sigev_priority;
	} else {
		prio = thp->priority;
	}

	if(queued_event_priority < prio) {
		queued_event_priority = prio;
	}

    // now put this event at the end of pending list
	irp->next = NULL;
	*intrevent_tail = irp;
	intrevent_tail = &irp->next;
    INTR_UNLOCK(&intrevent_lock);

	return(0);
}
Esempio n. 5
0
/*
 * We've run out of intrevent structures. This means that some process
 * has messed up its ISR. We'll find the errant process and kill it
 * without mercy.
 *
 * This code is actually bogus, since it doesn't really solve the
 * problem - the messed up ISR may not be adding any events to the
 * intrevent_pending queue, so it won't be found by scanning the
 * list. We'd need to keep track of active interrupts and find the
 * deepest nesting one that keeps on asserting when we re-enable interrupts
 * in the kernel interrupt exit processing, but I can't see any way
 * of doing that without slowing down the normal interrupt handling code,
 * which we don't want to do. It's also got race conditions - think
 * about what happens if another nested interrupt goes off while in
 * here and the code is re-entrantly started. Things to think about...
 *
 * Beginings of an idea.
 *  In intrevent_add(), when get down to a critical number of
 *  free INTREVENT's (~20), turn on a flag. In the interrupt()
 *  processing loop, if that flag is on, mask any interrupt that
 *  occurs and set a flag on the INTERRUPT structure saying that
 *  it's been masked. Eventually the problem interrupt will be masked
 *  and forward progress will be made. Once we get into intrevent_drain(),
 *  and have drained all the pending events, check the global flag. If
 *  it's on, scan the INTERRUPT structures looking for ones that have
 *  been masked by the interrupt() loop. For each, clear a state flag,
 *  unmask the level and then set the state flag. In the interrupt() loop,
 *  more code counts the number of times it gets entered. If we get above
 *  a predetermined number (~100) without seeing the state flag gets set,
 *  we assume that this is the permanently asserted interrupt and
 *  remask it. All the processes with ISR's attached to that interrupt
 *  need to be killed. Where this has problems is with SMP, since the
 *  interrupt() loop may be handled by a different CPU than the one
 *  that's doing intrevent_drain().

 */
static int
intrevent_error(THREAD *thp, INTERRUPT *isr) {
	INTREVENT	*curr_intr;
	INTREVENT	**owner;
	PROCESS		*curr_proc;
	PROCESS		*high_intrs_proc;
	pid_t		curr_pid;
	int			high_intrs_count;
	INTRLEVEL	*intr_level;
	unsigned	intr_vector;
	INTREVENT	*killer;

	/*
	 * First, run thru the pending interrupt list and 'mark' each process
	 * with the number of pending events.
	 */
	INTR_LOCK(&intrevent_lock);
	for(curr_intr = intrevent_pending; curr_intr != NULL; curr_intr = curr_intr->next) {
		curr_intr->thread->process->pending_interrupts++;
	}
	INTR_UNLOCK(&intrevent_lock);

	/*
	 * Walk the process table and find the process with the most pending
	 * interrupts. Zero the list behind us (so we don't have to do another
	 * pass later).
	 */
	high_intrs_proc = NULL;
	high_intrs_count = 0;
	for(curr_pid = 2; curr_pid < process_vector.nentries; ++curr_pid) {
		if(VECP(curr_proc, &process_vector, curr_pid)) {
			if(curr_proc->pending_interrupts > high_intrs_count) {
				high_intrs_count = curr_proc->pending_interrupts;
				high_intrs_proc = curr_proc;
			}
			curr_proc->pending_interrupts = 0;
		}
	}

	intr_level = &interrupt_level[isr->level];
	intr_vector = intr_level->info->vector_base + isr->level - intr_level->level_base;
#define MIN_INTRS_BAD 10
	if(high_intrs_count < MIN_INTRS_BAD) {
		/* There wasn't enough pending interrupts on any particular process to justify
		 * canceling them.
		 */
		char	*name = thp->process->debug_name;

		if(name == NULL) name = "";
		kprintf("Out of interrupt events! (vector=%u process=%u [%s])\n",
				intr_vector, thp->process->pid, name);

		if(ker_verbose >= 2) {
		/* debug assist information when of out interrupt events occurs */
			unsigned  i;
			INTREVENT * irplocal = intrevent_pending;
#if defined(__GNUC__)
/* this enum order safe init is not supported by the WATCOM compiler */
	#define ARRAY_EL(e)	[(e)] =
#else
	#define ARRAY_EL(e)
#endif
			static const char * const fmt[] =
			{
				ARRAY_EL(SIGEV_NONE) "t-%02u:  SIGEV_NONE(%u) -> %u (%s)  (--/--/--/--)\n",
				ARRAY_EL(SIGEV_SIGNAL) "t-%02u:  SIGEV_SIGNAL(%u) -> %u (%s)  (0x%x/0x%x/--/--)\n",
				ARRAY_EL(SIGEV_SIGNAL_CODE) "t-%02u:  SIGEV_SIGNAL_CODE(%u) -> %u (%s)  (0x%x/0x%x/0x%x/--)\n",
				ARRAY_EL(SIGEV_SIGNAL_THREAD) "t-%02u:  SIGEV_SIGNAL_THREAD(%u) -> %u (%s)  (0x%x/0x%x/0x%x/--)\n",
				ARRAY_EL(SIGEV_PULSE) "t-%02u:  SIGEV_PULSE(%u) -> %u (%s)  (0x%x/0x%x/0x%x/0x%x)\n",
				ARRAY_EL(SIGEV_UNBLOCK) "t-%02u:  SIGEV_UNBLOCK(%u) -> %u (%s)  (--/--/--/--)\n",
				ARRAY_EL(SIGEV_INTR) "t-%02u:  SIGEV_INTR(%u) -> %u (%s)  (--/--/--/--)\n",
				ARRAY_EL(SIGEV_THREAD) "t-%02u:  SIGEV_THREAD(%u) -> %u (%s)  (--/--/--/--)\n",
			};
			kprintf("Last %u:   event  ->   pid  (signo/val/code/pri)\n", 2 * MIN_INTRS_BAD);
			for (i=0; i<(2 * MIN_INTRS_BAD); i++, irplocal=irplocal->next)
				kprintf(fmt[SIGEV_GET_TYPE(&irplocal->event) % NUM_ELTS(fmt)],
							i+1, SIGEV_GET_TYPE(&irplocal->event),
							(irplocal->thread && irplocal->thread->process) ? irplocal->thread->process->pid : 0,
							(irplocal->thread && irplocal->thread->process) ? irplocal->thread->process->debug_name : "?",
							irplocal->event.sigev_signo, irplocal->event.sigev_value.sival_int,
							irplocal->event.sigev_code, irplocal->event.sigev_priority);
		}
		return 0;
	}


	/*
	 * Cancel all interrupts pending for that process.
	 */
	killer = NULL;
	INTR_LOCK(&intrevent_lock);
	owner = &intrevent_pending;
	for( ;; ) {
		curr_intr = *owner;
		if(curr_intr == NULL) break;
		if(curr_intr->thread->process == high_intrs_proc) {
			*owner = curr_intr->next;
			if(intrevent_tail == &curr_intr->next) {
				intrevent_tail = owner;
			}
			if(killer == NULL) {
				killer = curr_intr;
			} else {
				curr_intr->next = intrevent_free;
				intrevent_free = curr_intr;
				++num_pev_free;
			}
		} else {
			owner = &curr_intr->next;
		}
	}

	if((killer == NULL) || (high_intrs_proc == NULL)) crash();
	killer->thread = high_intrs_proc->valid_thp;
	// Use a uniqe code that people can recognize
	SIGEV_SIGNAL_CODE_INIT(&killer->event, SIGKILL, 1, SI_IRQ);
	killer->next = intrevent_pending;
	intrevent_pending = killer;

	INTR_UNLOCK(&intrevent_lock);

	if(high_intrs_proc == thp->process) {
		if(intr_vector != SYSPAGE_ENTRY(qtime)->intr) {
			// The current interrupt came from the failed process. Mask it.
			//NYI: There should be a tracevent for this....
			(void) interrupt_mask(isr->level, isr);
		}
	}

	// Tell intrevent_add() to try again, we've freed stuff up.
	return 1;
}
Esempio n. 6
0
File: clock.c Progetto: sgh/aos
void disable_clock(void) {
	interrupt_mask(TIMER0_IRQ);
}