Example #1
0
int sys_DebugPrint( const char *fmt, char **pzArgs )
{
    if ( SysBase )
    {
        Thread_s *psThread = CURRENT_THREAD;
        char String[512];
        char zBuffer[1024];
        uint32 nFlg;

        nFlg = cli();
        if ( NULL != psThread )
        {
            Process_s *psProc;

            if ( ( psProc = CURRENT_PROC ) )
            {
                sprintf( String, "%d:%s::%s : %s", get_processor_id(), psProc->tc_zName, psThread->tr_zName, fmt );
            }
            else
            {
                sprintf( String, "%d:%s : %s", get_processor_id(), psThread->tr_zName, fmt );
            }
        }
        else
        {
            sprintf( String, "%d : %s", get_processor_id(), fmt );
        }
        put_cpu_flags( nFlg );
        sprintf( zBuffer, String, pzArgs[0], pzArgs[1], pzArgs[2], pzArgs[3], pzArgs[4], pzArgs[5], pzArgs[6], pzArgs[7], pzArgs[8], pzArgs[9], pzArgs[10], pzArgs[11], pzArgs[12] );

        debug_write( zBuffer, strlen( zBuffer ) );
    }
    return ( 0 );
}
Example #2
0
static int do_call_v86( Virtual86Struct_s * psState, SysCallRegs_s * psCallRegs )
{
	Virtual86State_s sState;
	Thread_s *psThread = CURRENT_THREAD;
	int nFlags;


	nFlags = cli();	/* Will be reset when we exit to v86 mode */

	atomic_inc( &psThread->tr_nInV86 );
	while ( get_processor_id() != g_nBootCPU )
	{
		printk( "do_call_v86() wrong CPU (%d), will schedule\n", get_processor_id() );
		Schedule();
	}

      //printk( "Enter v86\n" );

	memcpy( &sState.regs, &psState->regs, sizeof( sState.regs ) );

	sState.regs32 = psCallRegs;	// (SysCallRegs_s*) &psState;
	sState.psRegs16 = &psState->regs;

	if ( NULL == g_psFirstV86State )
	{
		unprotect_dos_mem();
	}
	sState.psNext = g_psFirstV86State;
	g_psFirstV86State = &sState;

	sState.regs.__null_ds = 0;
	sState.regs.__null_es = 0;
	sState.regs.__null_fs = 0;
	sState.regs.__null_gs = 0;

	sState.regs.eflags = sState.regs32->eflags | EFLG_VM;
	sState.regs.eflags &= ~EFLG_IOPL;

	sState.regs32->eax = 0;

	sState.pSavedStack = psThread->tr_pESP0;
	psThread->tr_pESP0 = ( void * )&sState.VM86_TSS_ESP0;

	g_asProcessorDescs[get_processor_id()].pi_sTSS.esp0 = psThread->tr_pESP0;
	
	__asm__ __volatile__( "movl %0,%%esp\n\t" "jmp ret_from_sys_call":	/* no outputs */
		:"r"( &sState.regs ) );

	/* we never return here */
	return ( 0 );
}
static inline jboolean remote_unblock(ThreadDesc * cpuState)
{
	int result;
	sched_dprintf(DEBUG_2, "CPU%d: unblocking Thread:%p on remote CPU %d\n", get_processor_id(), cpuState,
		      cpuState->curCpuId);
	smp_call_function(cpuState->curCpuId, _cpuManager_unblock, cpuState, 1, &result);
	return (jboolean) result;
}
Example #4
0
static inline void return_to_32bit( Virtual86Regs_s * regs16, int retval )
{
	Thread_s *psThread = CURRENT_THREAD;
	SysCallRegs_s *psCallerRegs;
	int nGS;

	cli();		/* Will be reset when we return to caller */

	nGS = g_asProcessorDescs[get_processor_id()].pi_nGS;
	__asm__ __volatile__( "mov %0,%%gs"::"r"( nGS ) );

	set_gdt_desc_base( nGS, ( uint32 )psThread->tr_pThreadData );
	
	__asm__ __volatile__( "movl %0,%%gs\n\t" :	/* no outputs */
		:"r"( nGS ) );

	if ( NULL == g_psFirstV86State )
	{
		printk( "ERROR : return_to_32bit() called while g_psFirstV86State == NULL! Propably game over :(\n" );
		return;
	}

	memcpy( g_psFirstV86State->psRegs16, regs16, sizeof( *regs16 ) );

	g_psFirstV86State->regs32->eax = retval;
	psCallerRegs = g_psFirstV86State->regs32;

	psThread->tr_pESP0 = g_psFirstV86State->pSavedStack;
	g_asProcessorDescs[get_processor_id()].pi_sTSS.esp0 = psThread->tr_pESP0;


	g_psFirstV86State = g_psFirstV86State->psNext;

	kassertw( get_processor_id() == g_nBootCPU );

	if ( NULL == g_psFirstV86State )
	{
		protect_dos_mem();
	}
	atomic_dec( &psThread->tr_nInV86 );
	psCallerRegs->eax = retval;

	__asm__ __volatile__( "movl %0,%%esp\n\t" "jmp exit_from_sys_call"::"r"( psCallerRegs ) );
}
Example #5
0
File: lapic.c Project: mczero80/jx
void APIC_error_interrupt(void)	// called from "error_apic"
{
	unsigned long v, v1;

	v = apic_read(APIC_ESR);
	apic_write(APIC_ESR, 0);
	v1 = apic_read(APIC_ESR);
	ack_APIC_irq();

	/* Here is what the APIC error bits mean:
	   0: Send CS error
	   1: Receive CS error
	   2: Send accept error
	   3: Receive accept error
	   4: Reserved
	   5: Send illegal vector
	   6: Received illegal vector
	   7: Illegal register address
	 */
	printf("APIC error: %02lx(%02lx): \"", v, v1);
	if (v1 & 0x01)
		printf("Send CS error");
	if (v1 & 0x02)
		printf("Receive CS error");
	if (v1 & 0x04)
		printf("Send accept error");
	if (v1 & 0x08)
		printf("Receive accept error");
	if (v1 & 0x10)
		printf("Reserved");
	if (v1 & 0x20)
		printf("Send illegal vector");
	if (v1 & 0x40)
		printf("Received illegal vector");
	if (v1 & 0x80)
		printf("Illegal register addres");
	printf("\" on CPU%d\n", get_processor_id());
	sys_panic("APIC error on CPU%d\n", get_processor_id());
}
Example #6
0
int printk( const char *fmt, ... )
{
#ifdef _ENABLE_PRINTK
    Thread_s *psThread = CURRENT_THREAD;
    char String[512];
    char zBuffer[1024];
    int nFlg;


    if ( SysBase == NULL )
    {
        return ( 0 );
    }
    nFlg = cli();

    if ( NULL != psThread )
    {
        Process_s *psProc;

        if ( ( psProc = CURRENT_PROC ) )
        {
            sprintf( String, "%d:%s::%s : %s", get_processor_id(), psProc->tc_zName, psThread->tr_zName, fmt );
        }
        else
        {
            sprintf( String, "%s : %s", psThread->tr_zName, fmt );
        }
    }
    else
    {
        sprintf( String, "%d : %s", get_processor_id(), fmt );
    }
    put_cpu_flags( nFlg );
    sprintf( zBuffer, String, ( ( uint32 * )( &fmt ) )[1], ( ( uint32 * )( &fmt ) )[2], ( ( uint32 * )( &fmt ) )[3], ( ( uint32 * )( &fmt ) )[4], ( ( uint32 * )( &fmt ) )[5], ( ( uint32 * )( &fmt ) )[6], ( ( uint32 * )( &fmt ) )[7], ( ( uint32 * )( &fmt ) )[8], ( ( uint32 * )( &fmt ) )[9], ( ( uint32 * )( &fmt ) )[10], ( ( uint32 * )( &fmt ) )[11], ( ( uint32 * )( &fmt ) )[12] );

    debug_write( zBuffer, strlen( zBuffer ) );
#endif
    return ( 0 );
}
Example #7
0
File: lapic.c Project: mczero80/jx
void enable_local_APIC(void)
{
	unsigned long value;

	// Clear the logical destination ID
	value = apic_read(APIC_LDR);
	value &= 0x00ffffff;	// Dest = 0
	apic_write(APIC_LDR, value);

	/* Set Task Priority to 'accept all' */
	value = apic_read(APIC_TASKPRI);
	value &= 0xffffff00;
	apic_write(APIC_TASKPRI, value);

	// bring the APIC into flat delivery mode.
	value = apic_read(APIC_DFR);
	value |= 0xf0000000;	// bits 28-31 = 1111 -> flat mode
	apic_write(APIC_DFR, value);

	/* now enable APIC */
	value = apic_read(APIC_SPIV);
	value |= (1 << 8);	/* Enable APIC (bit==1) */
//      value &= ~(1<<9);               /* Enable focus processor (bit==0) */
	value |= (1 << 9);	/* Disable focus processor (bit==1) */
	value |= SPURIOUS_APIC_VECTOR;	/* Set spurious IRQ vector to 0xff */
	apic_write(APIC_SPIV, value);

	/* setup LVTERR on integrated APICs */
	if ((lapic_version[get_processor_id()] & 0xF0) != APIC_VER_82489DX) {	/* !82489DX */
		unsigned int value, maxlvt;
		maxlvt = GET_APIC_MAXLVT(apic_read(APIC_VERSION));

		if (maxlvt > 3)
			apic_write(APIC_ESR, 0);
		value = apic_read(APIC_ESR);
		/*smp_debug_printf(SMP_DEBUG_ALL, "ESR value before enabling vector: %08lx\n", value); */

		value = ERROR_APIC_VECTOR;
		apic_write(APIC_LVERR, value);

		/* clear errors after enabling vector */
		if (maxlvt > 3)
			apic_write(APIC_ESR, 0);
		value = apic_read(APIC_ESR);
		/*smp_debug_printf(SMP_DEBUG_ALL, "ESR value after enabling vector: %08lx\n", value); */
	} else
		/*smp_debug_printf(SMP_DEBUG_ALL, "No ESR for 82489DX.\n"); */
		;
}
/* needs only one Parameter */
static inline jboolean _cpuManager_unblock(ThreadDesc * cpuState)
{
	jboolean ret;
#ifdef KERNEL
#  ifdef SMP
	if (cpuState->curCpuId != get_processor_id())
		return remote_unblock(cpuState);
	else
#  endif
	{
		//jint* base = (u4_t*)&cpuState-2;
		DISABLE_IRQ;
		/*printf("CPU%d: unblock %p\n",get_processor_id(), cpuState); */
		if (cpuState->state != STATE_BLOCKEDUSER) {
#ifdef DEBUG
/*
			printf("CPU%d: CPUManager::unblock: Thread %p is in state %d (%s)\n", get_processor_id(), cpuState,
			       cpuState->state, get_state(cpuState));
*/
#endif
			//printStackTrace("STACK: ", curthr(), base);
			ret = JNI_FALSE;
		} else {
			threadunblock(cpuState);
			ret = JNI_TRUE;
		}
		RESTORE_IRQ;
		return ret;
	}
#else
	DISABLE_IRQ;
	if (cpuState->state == STATE_BLOCKEDUSER) {
		locked_threadunblock(cpuState);
		ret = JNI_TRUE;
	} else {
#ifdef DEBUG
/*
		printf("CPUManager::unblock: Thread %p is in state %d (%s)\n", cpuState, cpuState->state, get_state(cpuState));
*/
#endif
		ret = JNI_FALSE;
	}

	RESTORE_IRQ;
	return ret;

#endif
}
Example #9
0
int reflect_irq_to_realmode( SysCallRegs_s * psCallRegs, int num )
{
	pgd_t *pPgd = pgd_offset( g_psKernelSeg, 0 );
	pte_t *pPte = pte_offset( pPgd, 0 );
	Virtual86Struct_s sRegs;
	uint32 *pIntVects = NULL;
	uint32 *pnStack;
	uint32 nFlags;

	num &= 0xff;


	memset( &sRegs, 0, sizeof( sRegs ) );

	nFlags = cli();
	kassertw( get_processor_id() == g_nBootCPU );


	// We need access to the first page to read the IVT
	PTE_VALUE( *pPte ) |= PTE_PRESENT;
	flush_tlb_page( 0 );


	sRegs.regs.eip = pIntVects[num] & 0xffff;
	sRegs.regs.cs = pIntVects[num] >> 16;


	pnStack = ( uint32 * )( ( v86Stack_seg << 4 ) + v86Stack_off );
	pnStack[0] = 0xffffffff;

	sRegs.regs.esp = v86Stack_off;
	sRegs.regs.ss = v86Stack_seg;
	v86Stack_off -= V86_STACK_SIZE;
	put_cpu_flags( nFlags );

	call_v86( &sRegs );
//  do_call_v86( &sRegs, psCallRegs );

	v86Stack_off += V86_STACK_SIZE;

	return ( 0 );
}
Example #10
0
/* Shut down the current CPU */
void __cpu_disable(void)
{
    unsigned int cpu = get_processor_id();

    local_irq_disable();
    gic_disable_cpu();
    /* Allow any queued timer interrupts to get serviced */
    local_irq_enable();
    mdelay(1);
    local_irq_disable();

    /* It's now safe to remove this processor from the online map */
    cpumask_clear_cpu(cpu, &cpu_online_map);

    if ( cpu_disable_scheduler(cpu) )
        BUG();
    smp_mb();

    /* Return to caller; eventually the IPI mechanism will unwind and the 
     * scheduler will drop to the idle loop, which will call stop_cpu(). */
}
Example #11
0
void set_processor_device(struct device *device)
{
	if (device == NULL)
		return;

	int processors_count = get_processors_count();	//returns at least 1;
	struct device *processors[processors_count];

	int i;
	for (i = 0; i < processors_count; i++) {
		processors[i] = new_device("Processor");
		add_info(processors[i], get_processor_family(i));
		add_info(processors[i], get_processor_version(i));
		add_info(processors[i], get_processor_socket_designation(i));
		add_info(processors[i], get_processor_manufacturer(i));
		add_info(processors[i], get_processor_id(i));
		add_info(processors[i], get_processor_voltage(i));
		add_info(processors[i], get_processor_external_clock(i));
		set_child(device, processors[i]);
	}

	set_caches(processors, processors_count);
	set_cores(processors, processors_count);
}
Example #12
0
/**
 * wrapper for the AS stateless relay script function.
 *
 */
static int w_as_relay_sl(struct sip_msg *msg, char *as_name, char *foo)
{
   as_msg_p my_as_ev=0;
   int ret=0,len;
   char *buffer=0,processor_id;
   struct as_entry *as;

   as=(struct as_entry *)as_name;

   if(as->type==AS_TYPE){
      if((processor_id=get_processor_id(&msg->rcv,&(as->u.as)))<0){
	 LM_ERR("no processor found for packet with dst port:%d\n",msg->rcv.dst_port);
	 goto error;
      }
   }else if (as->type==CLUSTER_TYPE) {
      LM_ERR("clustering not fully implemented\n");
      goto error;
   }else{
      LM_ERR("unknown type of as\n");
      goto error;
   }

   LM_DBG("as found ! (%.*s) processor id = %d\n",as->name.len,as->name.s,processor_id);
   if(!(buffer=create_as_event_sl(msg,processor_id,&len,0))){
      LM_ERR("unable to create event code\n");
      goto error;
   }
   if(!(my_as_ev=shm_malloc(sizeof(as_msg_t))))
      goto error;
   my_as_ev->msg = buffer;
   my_as_ev->as = as;
   my_as_ev->type = SL_REQ_IN;
   my_as_ev->len = len;
   my_as_ev->transaction=seas_f.tmb.t_gett(); /*does not refcount*/
   if(use_stats)
      as_relay_stat(seas_f.tmb.t_gett());
again:
   ret=write(write_pipe,&my_as_ev,sizeof(as_msg_p));
   if(ret==-1){
      if(errno==EINTR)
	 goto again;
      else if(errno==EPIPE){
	 LM_ERR("SEAS Event Dispatcher has closed the pipe. Invalidating it !\n");
	 return -2;
	 /** TODO handle this correctly !!!*/
      }
   }
   //this shouln't be here, because it will remove the transaction from memory, but
   //if transaction isn't unref'ed iw will be released anyway at t_unref if kr (killreason)==0
   // a wait timer will be put to run with WT_TIME_OUT (5 seconds, within which the AS should respond)
   // this is a bug !!! I think this is why we lose calls at high load !!
   //t_release(msg, 0, 0);
   seas_f.tmb.t_setkr(REQ_FWDED);

   ret=0;
   return ret;
error:
   if(my_as_ev)
      shm_free(my_as_ev);
   if(buffer)
      shm_free(buffer);
   return ret;
}
Example #13
0
/**
 * wrapper for the AS transaction-stateful relay script function.
 *
 */
static int w_as_relay_t(struct sip_msg *msg, char *entry, char *foo)
{
   as_msg_p my_as_ev;
   int new_tran,ret=0,len;
   char *buffer,processor_id;
   struct cell *mycel;
   struct as_entry *as;
   static str msg100={"Your call is important to us",sizeof("Your call is important to us")-1};
   static str msg500={"Server Internal Error!",sizeof("Server Internal Error!")-1};

   buffer=(char*)0;
   my_as_ev=(as_msg_p)0;

   /**
    * returns <0 on error
    * 1 if (new transaction was created) or if (ACK for locally replied 200 with totag) or if (ACK for code>=300)
    * 0 if it was a retransmission
    */
   new_tran = seas_f.tmb.t_newtran(msg);
   if(new_tran<0) {
      ret = new_tran;
      goto done;
   }
   /*retransmission: script processing should be stopped*/
   if (new_tran==0 && !(msg->REQ_METHOD==METHOD_ACK)){
      ret = 0;
      goto done;
   }
   /*new transaction created, let's pass it to an APP SERVER*/
   if (msg->REQ_METHOD==METHOD_INVITE )
   {
      LM_DBG("new INVITE\n");
      if(!seas_f.tmb.t_reply(msg,100,&msg100)){
	 LM_DBG("t_reply (100)\n");
	 goto error;
      }
   }
   as=(struct as_entry *)entry;
   if(!as->connected){
      LM_ERR("app server %.*s not connected\n",as->name.len,as->name.s);
      goto error;
   }
   if(as->type==AS_TYPE){
      if((processor_id=get_processor_id(&msg->rcv,&(as->u.as)))<0){
	 LM_ERR("no processor found for packet with dst port:%d\n",msg->rcv.dst_port);
	 goto error;
      }
   }else if(as->type==CLUSTER_TYPE){
      LM_ERR("clustering not fully implemented\n");
      return 0;
   }else{
      LM_ERR("unknown type of as (neither cluster nor as)\n");
      return -1;
   }
   LM_DBG("as found ! (%.*s) processor id = %d\n",as->name.len,as->name.s,processor_id);
   if(new_tran==1 && msg->REQ_METHOD==METHOD_ACK){
      /* core should forward statelessly (says t_newtran)*/
      LM_DBG("forwarding statelessly !!!\n");
      if(!(buffer=create_as_event_sl(msg,processor_id,&len,0))){
	 LM_ERR("create_as_event_sl() unable to create event code\n");
	 goto error;
      }
   }else if(!(buffer=create_as_event_t(seas_f.tmb.t_gett(),msg,processor_id,&len,0))){
      LM_ERR("unable to create event code\n");
      goto error;
   }
   if(!(my_as_ev=shm_malloc(sizeof(as_msg_t)))){
      LM_ERR("Out of shared mem!\n");
      goto error;
   }
   my_as_ev->msg = buffer;
   my_as_ev->as = as;
   my_as_ev->type = T_REQ_IN;
   my_as_ev->len = len;
   my_as_ev->transaction=seas_f.tmb.t_gett(); /*does not refcount*/
   if(use_stats && new_tran>0)
      as_relay_stat(seas_f.tmb.t_gett());
again:
   ret=write(write_pipe,&my_as_ev,sizeof(as_msg_p));
   if(ret==-1){
      if(errno==EINTR)
	 goto again;
      else if(errno==EPIPE){
	 LM_ERR("SEAS Event Dispatcher has closed the pipe. Invalidating it !\n");
	 goto error;
	 /** TODO handle this correctly !!!*/
      }
   }
   seas_f.tmb.t_setkr(REQ_FWDED);
   ret=0;
done:
   return ret;
error:
   mycel=seas_f.tmb.t_gett();
   if(mycel && mycel!=T_UNDEFINED){
      if(!seas_f.tmb.t_reply(msg,500,&msg500)){
	 LM_ERR("t_reply (500)\n");
      }
   }
   if(my_as_ev)
      shm_free(my_as_ev);
   if(buffer)
      shm_free(buffer);
   return ret;
}
Example #14
0
int sys_realint( int num, struct RMREGS *rm )
{
	pgd_t *pPgd = pgd_offset( g_psKernelSeg, 0 );
	pte_t *pPte = pte_offset( pPgd, 0 );
	Thread_s *psThread = CURRENT_THREAD;
	Virtual86Struct_s sRegs;
	uint32 *pIntVects = NULL;
	uint32 *pnStack;
	uint32 nFlags;
	
	sRegs.regs.eax = rm->EAX;
	sRegs.regs.orig_eax = rm->EAX;
	sRegs.regs.ebx = rm->EBX;
	sRegs.regs.ecx = rm->ECX;
	sRegs.regs.edx = rm->EDX;
	sRegs.regs.edi = rm->EDI;
	sRegs.regs.esi = rm->ESI;
	sRegs.regs.ebp = rm->EBP;
	sRegs.regs.eflags = rm->flags;
	sRegs.regs.ds = rm->DS;
	sRegs.regs.es = rm->ES;
	sRegs.regs.fs = rm->FS;
	sRegs.regs.gs = rm->GS;


	nFlags = cli();
	// We need access to the first page to read the IVT
	PTE_VALUE( *pPte ) |= PTE_PRESENT;
	flush_tlb_page( 0 );

	sRegs.regs.eip = pIntVects[num] & 0xffff;
	sRegs.regs.cs = pIntVects[num] >> 16;

	//printk( "sys_realint(%d) -> %04x:%04lx\n", num, sRegs.regs.cs, sRegs.regs.eip );

	atomic_inc( &psThread->tr_nInV86 );

	kassertw( atomic_read( &psThread->tr_nInV86 ) == 1 );

	while ( get_processor_id() != g_nBootCPU )
	{
//    printk( "sys_call_v86() wrong CPU (%d), will schedule\n", get_processor_id() );
		Schedule();
	}

	pnStack = ( uint32 * )( ( v86Stack_seg << 4 ) + v86Stack_off );
	pnStack[0] = 0xffffffff;
	sRegs.regs.esp = v86Stack_off;
	sRegs.regs.ss = v86Stack_seg;
	v86Stack_off -= V86_STACK_SIZE;

	put_cpu_flags( nFlags );

	call_v86( &sRegs );

	v86Stack_off += V86_STACK_SIZE;

	atomic_dec( &psThread->tr_nInV86 );


	rm->EAX = sRegs.regs.eax;
	rm->EBX = sRegs.regs.ebx;
	rm->ECX = sRegs.regs.ecx;
	rm->EDX = sRegs.regs.edx;
	rm->EDI = sRegs.regs.edi;
	rm->ESI = sRegs.regs.esi;
	rm->EBP = sRegs.regs.ebp;
	rm->flags = sRegs.regs.eflags;
	rm->DS = sRegs.regs.ds;
	rm->ES = sRegs.regs.es;
	rm->FS = sRegs.regs.fs;
	rm->GS = sRegs.regs.gs;

	return ( 0 );
}