/** * Puts work item to the global pool or deallocates it. Also deallocates * the events associated with the work item. NOTE: this code is designed * to be efficient, not compact */ STATIC void WKQ_ReleaseWorkItem(WorkQueueModule * module, WorkItem * w) { Bool locked = False; ASSERT(module->initcount > 0); ASSERT(!w->submitQ.queue); ASSERT(!w->itemsQ.queue); /* deallocate waiters */ while (w->waiters) { Waiter * waiter = w->waiters; Waiter * next = waiter->next; if (module->nwait < module->maxwait) { if (locked) { WKQ_WaiterToPool(module, waiter); waiter = NULL; } else { locked = MUTEX_Lock(&module->mutex); if (module->nwait < module->maxwait) { WKQ_WaiterToPool(module, waiter); waiter = NULL; } } } if (waiter) { EVENT_Destroy(&waiter->event); MEM_Free(waiter); } w->waiters = next; } if (QUEUE_Size(&module->itempool) < module->maxitems) { if (locked) { w->flags = WKI_DETACHED; QUEUE_InsertTail(&module->itempool, &w->itemsQ); } else { locked = MUTEX_Lock(&module->mutex); if (QUEUE_Size(&module->itempool) < module->maxitems) { w->flags = WKI_DETACHED; QUEUE_InsertTail(&module->itempool, &w->itemsQ); } else { MEM_Free(w); } } } else { MEM_Free(w); } if (locked) { MUTEX_Unlock(&module->mutex); } }
/** * Stop the worker thread */ void WKQ_Stop(WorkQueue * q, Bool canWait) { int nthreads = 0; if (MUTEX_Lock(&q->mutex)) { if (q->flags & WKQ_ACTIVE) { q->flags &= ~WKQ_ACTIVE; EVENT_Set(&q->event); } if (canWait && q->nthreads) { ASSERT(!(q->flags & WKQ_DEAD)); nthreads = q->nthreads; q->nthreads = 0; } MUTEX_Unlock(&q->mutex); } if (canWait) { if (nthreads > 0) { int i, flags = WKQ_DEAD; for (i=0; i<nthreads; i++) { if (THREAD_IsSelf(q->threads[i])) { /* We can handle this, but it's a bit awkward and * usually unnecessary. Let's issue a warning in * the debug build, just in case */ TRACE("WARNING! WKQ_Stop called on its own thread\n"); /* Settings WKQ_KILLME flag will cause WKQ_Thread to * kill the associated WorkQueue. Obviously, we have * to set this flag after all other threads have * terminated ot they all will try to do the same */ flags |= WKQ_KILLME; /* it's a bit weird that a thread is detaching itself * but it seems to work */ THREAD_Detach(q->threads[i]); } else { THREAD_Join(q->threads[i]); } } ASSERT(!(q->flags & WKQ_DEAD)); MUTEX_Lock(&q->mutex); q->flags |= flags; EVENT_Set(&q->stopEvent); MUTEX_Unlock(&q->mutex); } else { WKQ_Wait(q); } } }
/** * Submits asynchronous XRPC call. Doesn't wait until the call completes. * This resolves the deadlock between Ecmt Gateway and emulator.exe */ STATIC void GWENG_SubmitAsyncCall(MidpSession* midp, XRpcString method, XRpcContainer* params) { if (params) { AsyncXRpcEntry* a = MEM_New(AsyncXRpcEntry); if (a) { memset(a, 0, sizeof(*a)); a->method = method; a->params = params; MUTEX_Lock(&midp->xrpcMutex); QUEUE_InsertTail(&midp->xrpcQueue, &a->entry); MUTEX_Unlock(&midp->xrpcMutex); if (!WKI_Submit(midp->xrpcWorkItem)) { /* * The work item is busy processing pending calls. It * could be that GWENG_AsyncXRpc callback has already * exited the loop but hasn't returned yet. In that case, * this asynchronous call would remain in the queue until * we submit the next one. That's not good. Try to "kick" * it with another work item. */ WKQ_InvokeLater(midp->xrpcWorkThread, GWENG_AsyncXRpc, midp); } } else { XRPC_FreeContainer(params); } } }
/** * "session finished" callback */ STATIC void GWENG_MidpSessionClose(void * context, XRpcSession * session) { Iterator* i; EcmtGateway* gw = context; XRpcSession* xrpcSession = XRPC_GetCurrentSession(gw->xrpc); MUTEX_Lock(&gw->mutex); i = HASH_Values(&gw->ecmtSessionMap); if (i) { while (ITR_HasNext(i)) { MidpSession* midp = ITR_Next(i); if (midp->key.xrpcSession == xrpcSession) { char p[ECMT_MIDP_DEBUG_CLOSE_SIZE]; TRACE1("GW: terminating session 0x%08lx\n",midp->sid); GWENG_MidpFillHeader(p,midp->sid,ECMT_MIDP_DEBUG_OPCODE_CLOSE); GWENG_QueueAdd(gw->handsetQueue,KECMT_MIDP_DEBUG_PLUGIN_UID,p, ECMT_MIDP_DEBUG_CLOSE_SIZE); ITR_Remove(i); GWENG_MidpFree(gw, midp); } } ITR_Delete(i); } MUTEX_Unlock(&gw->mutex); }
bool sThreadData::IsDone(){ bool buf; MUTEX_Lock(); buf = done; MUTEX_UnLock(); return buf; }
/** * Returns a wait context from the pool, or allocates a new one */ STATIC Waiter * WKQ_GetWaiter(WorkQueueModule * module) { Waiter * waiter = NULL; ASSERT(module->initcount > 0); if (module->waitpool) { MUTEX_Lock(&module->mutex); if (module->waitpool) { waiter = module->waitpool; module->waitpool = waiter->next; waiter->next = NULL; module->nwait--; ASSERT(module->nwait >= 0); ASSERT(module->nwait || !module->waitpool); } MUTEX_Unlock(&module->mutex); } if (!waiter) { waiter = MEM_New(Waiter); if (waiter) { if (!EVENT_Init(&waiter->event)) { MEM_Free(waiter); waiter = NULL; } } } if (waiter) { EVENT_Reset(&waiter->event); } return waiter; }
/** * "open" method handler */ STATIC XRpcElement* GWENG_MidpOpen(void* ctx, const XRpcContainer* param) { /* decode parameters */ const XRpcIntElement* sidParam = XRPC_GetIntElementByName(param, ECMTGW_SEI_SEND_SID_PARAM); if (sidParam) { EcmtGateway* gw = ctx; MidpSession* midp; MUTEX_Lock(&gw->mutex); TRACE2("GW: MidpOpen(%08x.%08x)\n", XRPC_GetCurrentSession(gw->xrpc), XRPC_GetInt(sidParam)); midp = GWENG_MidpCreateSession(gw,XRPC_GetInt(sidParam)); if (midp) { char p[ECMT_MIDP_DEBUG_OPEN_SIZE]; GWENG_MidpFillHeader(p, midp->sid, ECMT_MIDP_DEBUG_OPCODE_OPEN); GWENG_QueueAdd(gw->handsetQueue,KECMT_MIDP_DEBUG_PLUGIN_UID,p, ECMT_MIDP_DEBUG_OPEN_SIZE); } MUTEX_Unlock(&gw->mutex); } else { TRACE("GW: open without cid!\n"); } return NULL; }
std::string sThreadData::Src(){ std::string buf; MUTEX_Lock(); buf = script; MUTEX_UnLock(); return buf; }
/** * ECMT message filter */ Bool GWENG_MidpFilter(EcmtReader* reader, int uid, const void* data, int datalen) { if (uid == KECMT_MIDP_DEBUG_GATEWAY_UID) { ASSERT(datalen >= ECMT_MIDP_DEBUG_HEADER_SIZE); if (datalen >= ECMT_MIDP_DEBUG_HEADER_SIZE) { MidpSession* midp; EcmtGateway* gw = reader->gw; I32u sid; I8u opcode = GWENG_MidpParseHeader(data,&sid); MUTEX_Lock(&gw->mutex); midp = HASH_Get(&gw->ecmtSessionMap,(HashKey)sid); if (midp) { GWENG_MidpHandleEcmtPacket(gw, midp, data, datalen); } else { TRACE1("GW: invalid SID 0x%08x\n",sid); } MUTEX_Unlock(&gw->mutex); } return False; } else { return True; } }
/** * Returns the next pseudorandom, uniformly distributed 32-bit integer * value from this random number generator's sequence. */ I32s RANDOM_NextI32(Random * r) { Bool unlock = BoolValue(r->syn && MUTEX_Lock(&r->mutex)); I32s next = NEXT_RANDOM(r,32); if (unlock) MUTEX_Unlock(&r->mutex); return next; }
/** * Actually submits the XRPC calls from the work thread. */ STATIC void GWENG_AsyncXRpc(WorkItem * w, void* arg) { QEntry* e; MidpSession* session = arg; MUTEX_Lock(&session->xrpcMutex); while ((e = QUEUE_RemoveHead(&session->xrpcQueue)) != NULL) { AsyncXRpcEntry* a = QCAST(e,AsyncXRpcEntry,entry); MUTEX_Unlock(&session->xrpcMutex); XRPC_Notify(XRPC_GetClient(session->key.xrpcSession), ECMTGW_SEI_PROTOCOL, a->method, XRPC_ContainerToElement(a->params)); XRPC_FreeContainer(a->params); MEM_Free(a); MUTEX_Lock(&session->xrpcMutex); } MUTEX_Unlock(&session->xrpcMutex); }
/** * Acquires the critical section */ Bool CS_Lock(CritSect * cs) { if (MUTEX_IsLocked(&cs->mutex) || MUTEX_Lock(&cs->mutex)) { cs->count++; return True; } return False; }
/** * "connect" method handler */ STATIC XRpcElement* GWENG_MidpConnect(void* ctx, const XRpcContainer* param) { /* decode parameters */ const XRpcIntElement* sidParam = XRPC_GetIntElementByName(param, ECMTGW_SEI_CONNECT_SID_PARAM); const XRpcIntElement* cidParam = XRPC_GetIntElementByName(param, ECMTGW_SEI_CONNECT_CID_PARAM); const XRpcShortElement* portParam = XRPC_GetShortElementByName(param, ECMTGW_SEI_CONNECT_PORT_PARAM); if (sidParam && cidParam && portParam) { MidpSession* midp; EcmtGateway* gw = ctx; I32u cid = XRPC_GetInt(cidParam); Port port = XRPC_GetShort(portParam); MidpSessionKey key; key.xrpcSid = XRPC_GetInt(sidParam); key.xrpcSession = XRPC_GetCurrentSession(gw->xrpc); TRACE4("GW: MidpConnect(%08x.%08x.%u, port %hu)\n", key.xrpcSession, key.xrpcSid, cid, port); MUTEX_Lock(&gw->mutex); midp = HASH_Get(&gw->midpSessionMap,&key); if (midp) { MidpConnection* conn = MEM_New(MidpConnection); if (conn) { memset(conn, 0, sizeof(*conn)); conn->connId = cid; if (HASH_Put(&midp->connMap, (HashKey)cid, conn)) { char pkt[ECMT_MIDP_DEBUG_CONNECT_SIZE]; GWENG_MidpFillHeader(pkt,midp->sid,ECMT_MIDP_DEBUG_OPCODE_CONNECT); *((I32u*)(pkt+ECMT_MIDP_DEBUG_CONNECT_CID_OFFSET)) = htonl(cid); *((I16u*)(pkt+ECMT_MIDP_DEBUG_CONNECT_PORT_OFFSET)) = htons(port); GWENG_QueueAdd(gw->handsetQueue, KECMT_MIDP_DEBUG_PLUGIN_UID, pkt, ECMT_MIDP_DEBUG_CONNECT_SIZE); MUTEX_Unlock(&gw->mutex); return NULL; } MEM_Free(conn); } GWENG_MidpResetConn(gw, midp, cid, False, True); } else { TRACE3("GW: unexpected MIDP connect (%08x.%08x.%u)\n", key.xrpcSession, key.xrpcSid, cid); } MUTEX_Unlock(&gw->mutex); } return NULL; }
EXPORT_FUNC TT_Error TT_Use_Stream( TT_Stream org_stream, TT_Stream* stream ) { MUTEX_Lock( files.lock ); *stream = org_stream; files.stream = STREAM2REC( org_stream ); /* set current stream */ return TT_Err_Ok; }
/** * Returns the seed that can be used to restart the current pseudorandom * number sequence from its current point. */ Seed RANDOM_GetState(const Random * r) { Seed state = (Seed)0; if (r->rng.rng_state) { Bool unlock = BoolValue(r->syn && MUTEX_Lock((Mutex*)(&r->mutex))); state = (*(r->rng.rng_state))(r->ctx); if (unlock) MUTEX_Unlock((Mutex*)(&r->mutex)); } return state; }
/** * "send" method handler */ STATIC XRpcElement* GWENG_MidpSend(void* ctx, const XRpcContainer* param) { /* decode parameters */ const XRpcIntElement* sidParam = XRPC_GetIntElementByName(param, ECMTGW_SEI_SEND_SID_PARAM); const XRpcIntElement* cidParam = XRPC_GetIntElementByName(param, ECMTGW_SEI_SEND_CID_PARAM); const XRpcBinaryElement* dataParam = XRPC_GetBinaryElementByName(param, ECMTGW_SEI_SEND_DATA_PARAM); if (sidParam && cidParam && dataParam) { MidpSession* midp; EcmtGateway* gw = ctx; size_t size = XRPC_GetBinaryDataSize(dataParam); I32u cid = XRPC_GetInt(cidParam); const XRpcByte* data = XRPC_GetBinaryData(dataParam); MidpSessionKey key; key.xrpcSid = XRPC_GetInt(sidParam); key.xrpcSession = XRPC_GetCurrentSession(gw->xrpc); TRACE4("GW: MidpSend(%08x.%08x.%u, %d bytes)\n", key.xrpcSession, key.xrpcSid, cid, size); DEBUG_ONLY(PRINT_Dump(DEBUG_Trace,data,size,0)); MUTEX_Lock(&gw->mutex); midp = HASH_Get(&gw->midpSessionMap,&key); if (midp) { MidpConnection* conn = HASH_Get(&midp->connMap,(HashKey)cid); if (conn) { char h[ECMT_MIDP_DEBUG_SEND_DATA_OFFSET]; I32u seq = conn->outCount++; GWENG_MidpFillHeader(h,midp->sid,ECMT_MIDP_DEBUG_OPCODE_SEND); *((I32u*)(h+ECMT_MIDP_DEBUG_SEND_CID_OFFSET)) = htonl(cid); *((I32u*)(h+ECMT_MIDP_DEBUG_SEND_SEQ_OFFSET)) = htonl(seq); GWENG_QueueAdd2(gw->handsetQueue, KECMT_MIDP_DEBUG_PLUGIN_UID, h, ECMT_MIDP_DEBUG_SEND_DATA_OFFSET, data, size); } else { TRACE1("GW: invalid conn id %u\n",cid); GWENG_MidpResetConn(gw, midp, cid, False, True); } } else { TRACE2("GW: unexpected MIDP send (%08x.%08x)\n", key.xrpcSession, key.xrpcSid); } MUTEX_Unlock(&gw->mutex); } return NULL; }
/** * Returns a pseudorandom, uniformly distributed 32-bit integer value * between 0 (inclusive) and the specified value (exclusive), drawn * from this random number generator's sequence. */ I32s RANDOM_NextInt32(Random * r, int n) { Bool unlock = BoolValue(r->syn && MUTEX_Lock(&r->mutex)); I32s bits, val; ASSERT(n>0); if ((n & -n) == n) { /* i.e., n is a power of 2 */ I64s next = (I64s)NEXT_RANDOM(r,31); if (unlock) MUTEX_Unlock(&r->mutex); return (I32s)((n * next) >> 31); }
/** * Sets the seed of this random number generator. */ Seed RANDOM_SetSeed(Random * r, Seed s) { Bool unlock = BoolValue(r->syn && MUTEX_Lock(&r->mutex)); r->seed = s; (*(r->rng.rng_seed))(r->ctx, s); #ifndef __KERNEL__ r->haveNextGaussian = False; #endif /* __KERNEL__ */ if (unlock) MUTEX_Unlock(&r->mutex); return s; }
/** * Switches the work item into the "detached" state. A detached work * item is not waitable, the caller does not own the work item anymore. */ void WKI_Detach(WorkItem * w) { WorkQueue * q = WKI_GetQueue(w); ASSERT(!(w->flags & WKI_DETACHED)); MUTEX_Lock(&q->mutex); w->flags |= WKI_DETACHED; if (!w->submitQ.queue && !(w->flags & WKI_CALL)) { QUEUE_RemoveEntry(&w->itemsQ); WKQ_ReleaseWorkItem(&WKQ, w); } MUTEX_Unlock(&q->mutex); }
/** * Returns work item from the pool, allocates a new one if needed. */ STATIC WorkItem * WKQ_GetWorkItem(WorkQueueModule * mod, WorkQueue * q, WorkProc cb, WorkProc2 cb2, void * p1, void * p2) { WorkItem * w = NULL; ASSERT(mod->initcount > 0); /* can't use QUEUE_IsEmpty without synchronization */ if (!mod->itempool.size) { MUTEX_Lock(&mod->mutex); if (!QUEUE_IsEmpty(&mod->itempool)) { w = QCAST(QUEUE_RemoveHead(&mod->itempool),WorkItem,itemsQ); w->flags = 0; } MUTEX_Unlock(&mod->mutex); } if (!w) { w = MEM_New(WorkItem); if (w) { memset(w, 0, sizeof(*w)); } } if (w) { if (MUTEX_Lock(&q->mutex)) { w->proc = cb; w->proc2 = cb2; w->param = p1; w->param2 = p2; QUEUE_InsertTail(&q->items,&w->itemsQ); MUTEX_Unlock(&q->mutex); return w; } MEM_Free(w); } return NULL; }
EXPORT_FUNC TT_Error TT_Use_Stream( TT_Stream org_stream, TT_Stream* stream ) { MUTEX_Lock( files.lock ); /* lock file mutex */ *stream = org_stream; /* copy the stream */ files.stream = STREAM2REC( org_stream ); /* set current stream */ Stream_Activate( files.stream ); return TT_Err_Ok; }
/** * Converts exclusive lock to non-exclusive without releasing it. The caller * must own the lock exclusively. */ Bool RWLOCK_DropWrite(RWLock * lock) { Bool success = False; RWEntry * entry; MUTEX_Lock(&lock->mutex); entry = RWLOCK_FindEntry(lock); /* current thread must have the lock */ ASSERT(entry); if (entry) { /* and it must be the write lock */ ASSERT(entry->write > 0); if (entry->write > 0) { QEntry * e; RWLockWaiter * shareWaiter = NULL; RWLockWaiter * exclusiveWaiter = NULL; /* convert write lock to read lock */ entry->read += entry->write; entry->write = 0; /* lock is no longer owned exclusively */ ASSERT(lock->flags & RWLOCK_FLAG_EXCLUSIVE_LOCK); lock->flags &= ~RWLOCK_FLAG_EXCLUSIVE_LOCK; /* * wake up shared waiters only unless the exclusive * waiter is first in the line */ e = QUEUE_First(&lock->shareWaiters); if (e) shareWaiter = QCAST(e,RWLockWaiter,entry); e = QUEUE_First(&lock->exclusiveWaiters); if (e) exclusiveWaiter = QCAST(e,RWLockWaiter,entry); if (shareWaiter && (!exclusiveWaiter || shareWaiter->index < exclusiveWaiter->index)) { EVENT_Set(shareWaiter->event); } /* success */ success = True; } } MUTEX_Unlock(&lock->mutex); return success; }
/** * Finds port by name. Optionally, adds a reference to the returned port. * The search is case insensitive. */ EcmtGatewayPort* GWENG_PortByName(EcmtGatewayTransport* t, Str name, Bool ref) { QEntry* e; EcmtGatewayPort* port = NULL; MUTEX_Lock(&t->mutex); for (e = QUEUE_First(&t->ports); e; e = QUEUE_Next(e)) { if (!StrCaseCmp(name, QCAST(e,EcmtGatewayPort,entry)->name)) { port = QCAST(e,EcmtGatewayPort,entry); if (ref) GWENG_PortAddRef(port); break; } } MUTEX_Unlock(&t->mutex); return port; }
/** * Cancels the work item. Returns True if work item has been removed from * the queue before being called, False in any other case. Unblocks the * waiters. */ Bool WKI_Cancel(WorkItem * w) { Bool canceled = False; WorkQueue * q = WKI_GetQueue(w); ASSERT(!(w->flags & WKI_DETACHED)); if (MUTEX_Lock(&q->mutex)) { if (QUEUE_RemoveEntry(&w->submitQ)) { canceled = True; w->flags |= WKI_CANCELED; WKI_Signal(w); } MUTEX_Unlock(&q->mutex); } return canceled; }
/** * Puts event back to the pool or deallocates it. */ STATIC void WKQ_ReleaseWaiter(WorkQueueModule * module, Waiter * waiter) { ASSERT(module->initcount > 0); if (module->nwait < module->maxwait) { MUTEX_Lock(&module->mutex); if (module->nwait < module->maxwait) { WKQ_WaiterToPool(module, waiter); waiter = NULL; } MUTEX_Unlock(&module->mutex); } if (waiter) { EVENT_Destroy(&waiter->event); MEM_Free(waiter); } }
/** * Attaches a waiter to the work item if the work item is waitable. * The caller is responsible for deallocating this waiter */ STATIC Waiter * WKI_AttachWaiter(WorkItem * w) { Waiter * waiter = NULL; WorkQueue * q = WKI_GetQueue(w); ASSERT(!(w->flags & WKI_DETACHED)); /* quick check without synchronization */ if (w->submitQ.queue || (w->flags & WKI_CALL)) { /* We avoid calling WKQ_GetWaiter under mutex because in NT kernel * mode environment this results in KeInitializeEvent being called * at IRQL DISPATCH_LEVEL. According to the Windows DDK documentation, * the callers of KeInitializeEvent must be running at IRQL * PASSIVE_LEVEL. I personally think it's a mistake in the * documentation, because KeInitializeEvent does not do anything * that would require the current thread to wait or to access * pageable memory, unless the event is allocated from paged pool * or the KeInitializeEvent code itself resides in a pageable code * segment (which does not seem to be the case). Anyway, I decided * to play it safe and follow the documentation. */ waiter = WKQ_GetWaiter(&WKQ); if (waiter) { Bool waitable = False; MUTEX_Lock(&q->mutex); /* the same check, this time under synchronization */ if (w->submitQ.queue || (w->flags & WKI_CALL)) { waiter->next = w->waiters; w->waiters = waiter; waitable = True; } MUTEX_Unlock(&q->mutex); if (!waitable) { /* Something must have changed while we were allocating the * waiter. Return it to the pool. In real life, this almost * never happens. */ WKQ_ReleaseWaiter(&WKQ, waiter); waiter = NULL; } } } return waiter; }
/** * Cancels all pending work items in the work queue. */ void WKQ_Cancel(WorkQueue * q) { if (MUTEX_Lock(&q->mutex)) { QEntry * e; while ((e = QUEUE_RemoveHead(&q->submit)) != NULL) { WorkItem * w = QCAST(e,WorkItem,submitQ); w->flags |= WKI_CANCELED; if (w->flags & WKI_DETACHED) { ASSERT(!w->waiters); QUEUE_RemoveEntry(&w->itemsQ); WKQ_ReleaseWorkItem(&WKQ, w); } else { WKI_Signal(w); } } MUTEX_Unlock(&q->mutex); } }
/** * Sets the idle timeout function for the work queue. If the cb parameter * is NULL, idle timeouts are disabled and the other parameters (ms, param) * are ignored. The timeout must be positive. * * NOTE: currently, the old callback may still be invoked (btu no more than * once) after this function has returned. */ void WKQ_SetIdle(WorkQueue * q, long ms, IdleProc cb, void * param) { if (MUTEX_Lock(&q->mutex)) { if (cb) { ASSERT(ms > 0); q->idleTimeout = MAX(ms,1); q->idleProc = cb; q->idleParam = param; } else { q->idleTimeout = 0; q->idleProc = NULL; q->idleParam = NULL; } /* notify the worker thread */ EVENT_Set(&q->event); MUTEX_Unlock(&q->mutex); } }
/** * Submits a work item to the specified work queue. Re-submitting the same * work before it has been executed just moves it to the tail of the work * queue. It does NOT schedule it to run twice. */ Bool WKI_Submit(WorkItem * w) { WorkQueue * q = WKI_GetQueue(w); ASSERT(q); ASSERT(!(w->flags & WKI_DETACHED)); if (q) { if (MUTEX_Lock(&q->mutex)) { if (q->flags & WKQ_ACTIVE) { w->flags &= ~(WKI_DONE | WKI_CANCELED); QUEUE_RemoveEntry(&w->submitQ); QUEUE_InsertTail(&q->submit, &w->submitQ); EVENT_Set(&q->event); MUTEX_Unlock(&q->mutex); return True; } MUTEX_Unlock(&q->mutex); /* fall through and return False */ } } return False; }
/** * "close" method handler */ STATIC XRpcElement* GWENG_MidpClose(void* ctx, const XRpcContainer* param) { /* decode parameters */ const XRpcIntElement* sidParam = XRPC_GetIntElementByName(param, ECMTGW_SEI_SEND_SID_PARAM); if (sidParam) { EcmtGateway* gw = ctx; MidpSession* midp; MidpSessionKey key; key.xrpcSid = XRPC_GetInt(sidParam); key.xrpcSession = XRPC_GetCurrentSession(gw->xrpc); MUTEX_Lock(&gw->mutex); TRACE2("GW: MidpClose(%08x.%08x)\n", key.xrpcSession, key.xrpcSid); midp = HASH_Get(&gw->midpSessionMap,&key); if (midp) { char p[ECMT_MIDP_DEBUG_CLOSE_SIZE]; GWENG_MidpFillHeader(p, midp->sid, ECMT_MIDP_DEBUG_OPCODE_CLOSE); GWENG_QueueAdd(gw->handsetQueue,KECMT_MIDP_DEBUG_PLUGIN_UID,p, ECMT_MIDP_DEBUG_CLOSE_SIZE); TRACE1("GW: closing session 0x%08lx (via XRPC)\n",midp->sid); GWENG_MidpFree(gw, midp); } else { TRACE("GW: XRPC close for non-existent session\n"); } MUTEX_Unlock(&gw->mutex); } else { TRACE("GW: close without cid!\n"); } return NULL; }