u8 sxr_GetNewTaskId () { u32 Status = sxr_EnterSc (); u8 Id = sxr_Task.IdxFree; sxr_ExitSc (Status); return Id; }
unsigned char CheckSempInEventApi(unsigned char semp) { unsigned int status; int i; status = sxr_EnterSc(); if (eventApiNetInProcess>0) { for (i=0;i<MAX_SEM;i++) { if (sempArray[i] == semp) { sxr_ExitSc(status); return TRUE; } } } sxr_ExitSc(status); return FALSE; }
unsigned char CheckAndRemoveSempInEventApi(unsigned char semp) { unsigned int status; int i; gcj_TraceOut(0,"CheckAndRemoveSempInEventApi semp =%d",semp); status = sxr_EnterSc(); if (eventApiNetInProcess>0) { for (i=0;i<MAX_SEM;i++) { if (sempArray[i] == semp) { sempArray[i]=0; eventApiNetInProcess--; sxr_ExitSc(status); return TRUE; } } } sxr_ExitSc(status); return FALSE; }
void gcj_net_reset_mailId(unsigned char mailBoxId) { #if 1 _SOCKET_T *found=NULL; unsigned int status = sxr_EnterSc(); if (socketHead.next != NULL && socketHead.next->thisReadSem == mailBoxId) { found = socketHead.next; socketHead.next = found->next; } else { _SOCKET_T *tmp = socketHead.next; _SOCKET_T *prev=NULL; while (tmp) { if (tmp->thisReadSem == mailBoxId) { found = tmp; prev->next = tmp->next; break; } prev = tmp; tmp = tmp->next; } } sxr_ExitSc(status); if (found) { gcj_TraceOut(0,"gcj_net_reset_mailId found=0x%x,mailBoxId=%d,socketId = %d,socketHead.nex=0x%x\n",found,mailBoxId,found->socketId,socketHead.next); CFW_SetTCPIPCallBackEx (NULL,found->socketId); CFW_TcpipSocketClose(found->socketId); if (CheckAndRemoveSempInEventApi(found->thisReadSem)) { //gcjEventApi_NetRet = DEBUG_SIGN; //sxr_ReleaseSemaphore(netResultSem); gcj_TraceOut(0,"CheckAndRemoveSempInEventApi remove sem =%d%x\n",found->thisReadSem); } free(found); //if (socketHead.next==NULL) // gcj_net_reset(); } #else #ifdef USE_EVENT_API if (CheckAndRemoveSempInEventApi(mailBoxId)) { gcj_TraceOut(0,"CheckAndRemoveSempInEventApi remove sem =%d%x\n",mailBoxId); } #endif #endif }
// ============================================================================= // sxr_StartScheduling // ----------------------------------------------------------------------------- /// Activation of the first eligible task. // ============================================================================= void sxr_StartScheduling (void) { u32 Status = sxr_EnterSc (); if (sxr_Task.Active == SXR_NO_TASK) { SXS_RAISE ((_SXR|TABORT|TDB,TSTR("No elligible task\n",0x06c30004))); } sxr_Task.ScheduleDisable = 0; sxr_Task.Ctx [sxr_Task.Active].State = SXR_ACTIVE_TSK; sxr_TaskFirst (&sxr_Task.Ctx [sxr_Task.Active]); sxr_ExitSc (Status); }
// ============================================================================= // sxr_FreeMutex // ----------------------------------------------------------------------------- /// Free a previously allocated mutex. /// @param Id mutex Id. // ============================================================================= void sxr_FreeMutex (u8 Id) { // ensure mutex is allocated SX_MUTX_ASSERT(sxr_Mutex.Queue [Id].Next == SXR_MUTEX_ALLOCATED, "sxr_FreeMutex mutex is not allocated!"); // ensure mutex is released SX_MUTX_ASSERT(sxr_Mutex.Queue [Id].Count == 0, "sxr_FreeMutex while still taken!"); // free the semaphore sxr_FreeSemaphore(sxr_Mutex.Queue [Id].SemaId); u32 Status = sxr_EnterSc (); // queue management sxr_Mutex.Queue [Id].Next = sxr_Mutex.IdxFree; sxr_Mutex.IdxFree = Id; sxr_Mutex.Nb--; sxr_ExitSc (Status); }
void removeSempInEventApi(unsigned semp) { unsigned int status; int i; //gcj_TraceOut(0,"removeSempInEventApi semp =%d",semp); status = sxr_EnterSc(); if (QueueEventInputMbox!=0 && QueueEventInputMbox==semp) QueueEventTaskInEventApi=0; for (i=0;i<MAX_SEM;i++) { if (sempArray[i] == semp) { sempArray[i]=0; break; } } if (i==MAX_SEM) abort(); eventApiNetInProcess--; sxr_ExitSc(status); }
// ============================================================================= // sxr_FreeTask // ----------------------------------------------------------------------------- /// Free a task. /// @param Id Task Id. // ============================================================================= void sxr_FreeTask (u8 Id) { if (!(sxr_Task.Ctx [Id].State & (SXR_ALLOCATED_TSK | SXR_STOPPED_TSK))) { sxs_Raise (_SXR|TABORT|TDB|TNB_ARG(2),TSTR("Bad state for task release 0x%x (%i)\n",0x06c30003), sxr_Task.Ctx [Id].State, Id); } sxr_Task.Ctx [Id].Id = SXR_NO_TASK; sxr_Task.Ctx [Id].State = SXR_FREE_TSK; sxr_HFree (sxr_Task.Ctx [Id].StackTop); sxr_Task.Ctx [Id].StackTop = NIL; u32 Status = sxr_EnterSc (); sxr_Task.Ctx [Id].Free = sxr_Task.IdxFree; sxr_Task.IdxFree = Id; sxr_Task.Load--; sxr_ExitSc (Status); }
// ============================================================================= // sxr_NewTask // ----------------------------------------------------------------------------- /// Allocate a new task. /// @param Desc Pointer onto the task context static descriptor. /// @return Task Id. // ============================================================================= u8 sxr_NewTask (sxr_TaskDesc_t const *Desc) { u32 Status = sxr_EnterSc (); u8 Id = sxr_Task.IdxFree; if (Id == SXR_NO_TASK) { sxs_Raise (_SXR|TABORT|TDB|TNB_ARG(1),TSTR("Too many tasks %i\n",0x06c30001), sxr_Task.Load); } sxr_Task.IdxFree = sxr_Task.Ctx [Id].Free; sxr_Task.Load++; sxr_ExitSc (Status); sxr_Task.Ctx [Id].Id = Id; sxr_Task.Ctx [Id].State = SXR_ALLOCATED_TSK; sxr_Task.Ctx [Id].Next = SXR_NO_TASK; sxr_Task.Ctx [Id].StackTop = (u32 *)_sxr_HMalloc ((u16)(SXR_SET_STACK(Desc -> StackSize) << 2), SXR_TK_STCK_HEAP); if (sxr_Task.Ctx [Id].StackTop == NIL) { SXS_RAISE ((_SXR|TABORT|TDB,TSTR("No more memory\n",0x06c30002))); } sxr_Task.Ctx [Id].Desc = Desc; if ((sxr_Task.Idle == SXR_NO_TASK) || (sxr_Task.Ctx [sxr_Task.Idle].Desc -> Priority < sxr_Task.Ctx [Id].Desc -> Priority)) { sxr_Task.Idle = Id; } return Id; }
// ============================================================================= // sxr_NewMutex // ----------------------------------------------------------------------------- /// provide a free mutex. /// @return mutex Id. // ============================================================================= u8 sxr_NewMutex (void) { u32 Status = sxr_EnterSc (); // queue management u8 Id = sxr_Mutex.IdxFree; if (Id == SXR_NO_MUTEX) { SXS_RAISE ((_SXR|TABORT|TDB,TSTR("No more free mutex\n",0x06bf0011))); } sxr_Mutex.Nb++; sxr_Mutex.IdxFree = sxr_Mutex.Queue [Id].Next; sxr_ExitSc (Status); sxr_Mutex.Queue [Id].Next = SXR_MUTEX_ALLOCATED; // create the underlying semaphore sxr_Mutex.Queue [Id].SemaId = sxr_NewSemaphore (1); return Id; }
// ============================================================================= // sxr_NewCluster // ----------------------------------------------------------------------------- /// This function is used to create a new cluster. The cluster will be composed /// of NbUnit of Size bytes. /// @param Size of units /// @param NbCluster number of units // ============================================================================= void sxr_NewCluster (u16 Size, u8 NbCluster) { #ifdef __SXR_CLUSTER__ u32 i; sxr_MemHead_t *Header; u16 AlignSize = ( (Size + 3 + 1 + sizeof (sxr_MemHead_t)) & ~3 ); #else u16 AlignSize = ( (Size + 3 + 8 + sizeof (sxr_HMemHead_t)) & ~3 ); #endif u32 Status = sxr_EnterSc (); if (sxr_NbCluster >= SXR_NB_MAX_POOL) { SXS_RAISE ((_SXR|TABORT|TDB,TSTR("Too many clusters.\n",0x06bc0001))); } u8 clusterIdx = sxr_NbCluster; sxr_NbCluster++; sxr_ExitSc (Status); // FIXME The malloc getting memory for the cluster // cast the size on 16 bits (We don't know why). // Thus a cluster cannot be bigger than 64kB, taking // headers into account. if ((AlignSize * NbCluster) >= (1 << 16)) { SXS_RAISE ((_SXR|TABORT|TNB_ARG(2)|TDB,TSTR("Total cluster > 64 k Size %i Nb %i.\n",0x06bc0002), Size, NbCluster)); } sxr_Cluster [clusterIdx].Size = Size; sxr_Cluster [clusterIdx].AlignSize = AlignSize; sxr_Cluster [clusterIdx].NbCluster = NbCluster; sxr_Cluster [clusterIdx].Add = sxr_HMalloc ((u16) ( AlignSize * NbCluster)); sxr_Cluster [clusterIdx].FirstCluster = 0; if ( sxr_Cluster [clusterIdx].Add == NIL ) { SXS_RAISE ((_SXR|TABORT|TDB,TSTR("Out of memory for cluster creation.\n",0x06bc0003))); } memset ( (u8 *)sxr_Cluster [clusterIdx].Add, 0, ( AlignSize * NbCluster)); #ifdef __SXR_CLUSTER__ for ( i=0; i<NbCluster; i++ ) { Header = (sxr_MemHead_t *)(&((u8 *)sxr_Cluster [clusterIdx].Add) [i*AlignSize]); Header -> CallerAdd = SXR_POOL_FREE; Header -> Size = 0; Header -> Link = 0; Header -> Index = i; Header -> Next = (i+1); Header -> PIdx = clusterIdx; sxr_SetCheckSum (&Header -> CheckSum); ((u8 *)sxr_Cluster [clusterIdx].Add) [(i+1)*AlignSize - 1] = SXR_MEM_PATTERN; #ifdef __SXR_DEBUG__ memset (((u8 *)Header) + sizeof (sxr_MemHead_t), SXR_MEM_PATTERN, Size); #endif } Header -> Next = NIL_IDX; sxr_SetCheckSum (&Header -> CheckSum); #else _sxr_NewHeap (sxr_Cluster [clusterIdx].Add, AlignSize * NbCluster, (PAL_NB_RAM_AREA + SXR_NB_HEAP_USER) + clusterIdx); #endif }