pid_t uwsgi_lock_fast_check(struct uwsgi_lock_item *uli) { if (OSSpinLockTry((OSSpinLock *) uli->lock_ptr)) { OSSpinLockUnlock((OSSpinLock *) uli->lock_ptr); return 0; } return uli->pid; }
/* Try to attain a lock. Do not block! Returns true if the lock was attained. */ PUBLIC bool mprTrySpinLock(MprSpin *lock) { int rc; if (lock == 0) return 0; #if USE_MPR_LOCK mprTryLock(&lock->cs); #elif MACOSX rc = !OSSpinLockTry(&lock->cs); #elif ME_UNIX_LIKE && ME_COMPILER_HAS_SPINLOCK rc = pthread_spin_trylock(&lock->cs) != 0; #elif ME_UNIX_LIKE rc = pthread_mutex_trylock(&lock->cs) != 0; #elif ME_WIN_LIKE rc = (lock->freed) ? 0 : (TryEnterCriticalSection(&lock->cs) == 0); #elif VXWORKS rc = semTake(lock->cs, NO_WAIT) != OK; #endif #if ME_DEBUG && COSTLY if (rc == 0) { assert(lock->owner != mprGetCurrentOsThread()); lock->owner = mprGetCurrentOsThread(); } #endif return (rc) ? 0 : 1; }
Code try_lock() { if(OSSpinLockTry(&native_)) { return cLockBusy; } return cLocked; }
void go(void* arg) { int got_it = 0; while (!got_it) { got_it = OSSpinLockTry(&lock); } close_it(*(io_connect_t*)arg); }
void* thread_func(void* arg){ for(;;) { int got_it = 0; while (!got_it) { got_it = OSSpinLockTry(&lock); } make_iokit_call(); } return NULL; }
void* thread_func(void* arg){ int got_it = 0; while (!got_it) { got_it = OSSpinLockTry(&lock); } // usleep(1); make_iokit_call(); return NULL; }
void go(void* arg){ int got_it = 0; while (!got_it) { got_it = OSSpinLockTry(&lock); } //usleep(1); unmap_user_memory(gl_context, handle); printf("called unmap from thread\n"); }
int tci_mutex_trylock(tci_mutex_t* mutex) { if (OSSpinLockTry(mutex)) { return 0; } else { return EBUSY; } }
void DefSpinLock::Leave() { #if defined(_WIN32) || defined(_WIN64) // windows #else // if lock possible.. exactly this state is not locked. if (OSSpinLockTry(&m_key)) return; OSSpinLockUnlock(&m_key); #endif }
inline void emptyAllRecvBufs(){ int i; for(i=0;i<pxshmContext->nodesize;i++){ if(i != pxshmContext->noderank){ sharedBufData *recvBuf = &(pxshmContext->recvBufs[i]); if(recvBuf->header->count > 0){ #if PXSHM_STATS pxshmContext->lockRecvCount++; #endif #if PXSHM_OSSPINLOCK if(! OSSpinLockTry(&recvBuf->header->lock)){ #elif PXSHM_LOCK if(sem_trywait(recvBuf->mutex) < 0){ #elif PXSHM_FENCE recvBuf->header->flagReceiver = 1; recvBuf->header->turn = SENDER; CmiMemoryReadFence(0,0); CmiMemoryWriteFence(0,0); //if((recvBuf->header->flagSender && recvBuf->header->turn == SENDER)){ if((recvBuf->header->flagSender)){ recvBuf->header->flagReceiver = 0; #endif }else{ MACHSTATE1(3,"emptyRecvBuf to be called for rank %d",i); emptyRecvBuf(recvBuf); #if PXSHM_OSSPINLOCK OSSpinLockUnlock(&recvBuf->header->lock); #elif PXSHM_LOCK sem_post(recvBuf->mutex); #elif PXSHM_FENCE CmiMemoryReadFence(0,0); CmiMemoryWriteFence(0,0); recvBuf->header->flagReceiver = 0; #endif } } } } }; inline void flushAllSendQs(){ int i; #if SENDQ_LIST int index_prev = -1; i = sendQ_head_index; while (i!= -1) { PxshmSendQ *sendQ = pxshmContext->sendQs[i]; CmiAssert(i != pxshmContext->noderank); if(sendQ->numEntries > 0){ #else for(i=0;i<pxshmContext->nodesize;i++) { if (i == pxshmContext->noderank) continue; PxshmSendQ *sendQ = pxshmContext->sendQs[i]; if(sendQ->numEntries > 0) { #endif #if PXSHM_OSSPINLOCK if(OSSpinLockTry(&pxshmContext->sendBufs[i].header->lock)){ #elif PXSHM_LOCK if(sem_trywait(pxshmContext->sendBufs[i].mutex) >= 0){ #elif PXSHM_FENCE pxshmContext->sendBufs[i].header->flagSender = 1; pxshmContext->sendBufs[i].header->turn = RECEIVER; CmiMemoryReadFence(0,0); CmiMemoryWriteFence(0,0); if(!(pxshmContext->sendBufs[i].header->flagReceiver && pxshmContext->sendBufs[i].header->turn == RECEIVER)){ #endif MACHSTATE1(3,"flushSendQ %d",i); flushSendQ(sendQ); #if PXSHM_OSSPINLOCK OSSpinLockUnlock(&pxshmContext->sendBufs[i].header->lock); #elif PXSHM_LOCK sem_post(pxshmContext->sendBufs[i].mutex); #elif PXSHM_FENCE CmiMemoryReadFence(0,0); CmiMemoryWriteFence(0,0); pxshmContext->sendBufs[i].header->flagSender = 0; #endif }else{ #if PXSHM_FENCE pxshmContext->sendBufs[i].header->flagSender = 0; #endif } } #if SENDQ_LIST if (sendQ->numEntries == 0) { if (index_prev != -1) pxshmContext->sendQs[index_prev]->next = sendQ->next; else sendQ_head_index = sendQ->next; i = sendQ->next; sendQ->next = -2; } else { index_prev = i; i = sendQ->next; } #endif } }; void emptyRecvBuf(sharedBufData *recvBuf){ int numMessages = recvBuf->header->count; int i=0; char *ptr=recvBuf->data; for(i=0;i<numMessages;i++){ int size; int rank, srcpe, seqno, magic, i; unsigned int broot; char *msg = ptr; char *newMsg; size = CMI_MSG_SIZE(msg); newMsg = (char *)CmiAlloc(size); memcpy(newMsg,msg,size); handleOneRecvedMsg(size, newMsg); ptr += size; MACHSTATE3(3,"message of size %d recvd ends at ptr-data %d total bytes %d bytes %d",size,ptr-recvBuf->data,recvBuf->header->bytes); } #if 1 if(ptr - recvBuf->data != recvBuf->header->bytes){ CmiPrintf("[%d] ptr - recvBuf->data %d recvBuf->header->bytes %d numMessages %d \n",_Cmi_mynode, ptr - recvBuf->data, recvBuf->header->bytes,numMessages); } #endif CmiAssert(ptr - recvBuf->data == recvBuf->header->bytes); recvBuf->header->count=0; recvBuf->header->bytes=0; } /************************** *sendQ helper functions * ****************/ void initSendQ(PxshmSendQ *q,int size, int rank){ q->data = (OutgoingMsgRec *)calloc(size, sizeof(OutgoingMsgRec)); q->size = size; q->numEntries = 0; q->begin = 0; q->end = 0; q->rank = rank; #if SENDQ_LIST q->next = -2; #endif } void pushSendQ(PxshmSendQ *q, char *msg, int size, int *refcount){ if(q->numEntries == q->size){ //need to resize OutgoingMsgRec *oldData = q->data; int newSize = q->size<<1; q->data = (OutgoingMsgRec *)calloc(newSize, sizeof(OutgoingMsgRec)); //copy head to the beginning of the new array CmiAssert(q->begin == q->end); CmiAssert(q->begin < q->size); memcpy(&(q->data[0]),&(oldData[q->begin]),sizeof(OutgoingMsgRec)*(q->size - q->begin)); if(q->end!=0){ memcpy(&(q->data[(q->size - q->begin)]),&(oldData[0]),sizeof(OutgoingMsgRec)*(q->end)); } free(oldData); q->begin = 0; q->end = q->size; q->size = newSize; } OutgoingMsgRec *omg = &q->data[q->end]; omg->size = size; omg->data = msg; omg->refcount = refcount; (q->end)++; if(q->end >= q->size){ q->end -= q->size; } q->numEntries++; } OutgoingMsgRec * popSendQ(PxshmSendQ *q){ OutgoingMsgRec * ret; if(0 == q->numEntries){ return NULL; } ret = &q->data[q->begin]; (q->begin)++; if(q->begin >= q->size){ q->begin -= q->size; } q->numEntries--; return ret; }
void CmiSendMessagePxshm(char *msg, int size, int dstnode, int *refcount) { #if PXSHM_STATS double _startSendTime = CmiWallTimer(); #endif LrtsPrepareEnvelope(msg, size); int dstRank = PxshmRank(dstnode); MEMDEBUG(CmiMemoryCheck()); /* MACHSTATE4(3,"Send Msg Pxshm ogm %p size %d dst %d dstRank %d",ogm,ogm->size,ogm->dst,dstRank); MACHSTATE4(3,"Send Msg Pxshm ogm %p size %d dst %d dstRank %d",ogm,ogm->size,ogm->dst,dstRank); */ CmiAssert(dstRank >=0 && dstRank != pxshmContext->noderank); sharedBufData *dstBuf = &(pxshmContext->sendBufs[dstRank]); PxshmSendQ *sendQ = pxshmContext->sendQs[dstRank]; #if PXSHM_OSSPINLOCK if(! OSSpinLockTry(&dstBuf->header->lock)){ #elif PXSHM_LOCK if(sem_trywait(dstBuf->mutex) < 0){ #elif PXSHM_FENCE dstBuf->header->flagSender = 1; dstBuf->header->turn = RECEIVER; CmiMemoryReadFence(0,0); CmiMemoryWriteFence(0,0); //if(dstBuf->header->flagReceiver && dstBuf->header->turn == RECEIVER){ if(dstBuf->header->flagReceiver){ dstBuf->header->flagSender = 0; #endif /**failed to get the lock insert into q and retain the message*/ #if SENDQ_LIST if (sendQ->numEntries == 0 && sendQ->next == -2) { sendQ->next = sendQ_head_index; sendQ_head_index = dstRank; } #endif pushSendQ(pxshmContext->sendQs[dstRank], msg, size, refcount); (*refcount)++; MEMDEBUG(CmiMemoryCheck()); return; }else{ /*** * We got the lock for this buffer * first write all the messages in the sendQ and then write this guy * */ if(pxshmContext->sendQs[dstRank]->numEntries == 0){ // send message user event int ret = sendMessage(msg,size,refcount,dstBuf,pxshmContext->sendQs[dstRank]); #if SENDQ_LIST if (sendQ->numEntries > 0 && sendQ->next == -2) { sendQ->next = sendQ_head_index; sendQ_head_index = dstRank; } #endif MACHSTATE(3,"Pxshm Send succeeded immediately"); }else{ (*refcount)+=2;/*this message should not get deleted when the queue is flushed*/ pushSendQ(pxshmContext->sendQs[dstRank],msg,size,refcount); // MACHSTATE3(3,"Pxshm ogm %p pushed to sendQ length %d refcount %d",ogm,pxshmContext->sendQs[dstRank]->numEntries,ogm->refcount); int sent = flushSendQ(sendQ); (*refcount)--; /*if it has been sent, can be deleted by caller, if not will be deleted when queue is flushed*/ MACHSTATE1(3,"Pxshm flushSendQ sent %d messages",sent); } /* unlock the recvbuffer*/ #if PXSHM_OSSPINLOCK OSSpinLockUnlock(&dstBuf->header->lock); #elif PXSHM_LOCK sem_post(dstBuf->mutex); #elif PXSHM_FENCE CmiMemoryReadFence(0,0); CmiMemoryWriteFence(0,0); dstBuf->header->flagSender = 0; #endif } #if PXSHM_STATS pxshmContext->sendCount ++; pxshmContext->sendTime += (CmiWallTimer()-_startSendTime); #endif MEMDEBUG(CmiMemoryCheck()); }; inline void emptyAllRecvBufs(); inline void flushAllSendQs(); /********** * Extract all the messages from the recvBuffers you can * Flush all sendQs * ***/ inline void CommunicationServerPxshm(){ #if PXSHM_STATS double _startCommServerTime =CmiWallTimer(); #endif MEMDEBUG(CmiMemoryCheck()); emptyAllRecvBufs(); flushAllSendQs(); #if PXSHM_STATS pxshmContext->commServerTime += (CmiWallTimer()-_startCommServerTime); #endif MEMDEBUG(CmiMemoryCheck()); }; static void CmiNotifyStillIdlePxshm(CmiIdleState *s){ CommunicationServerPxshm(); } static void CmiNotifyBeginIdlePxshm(CmiIdleState *s) { CmiNotifyStillIdle(s); }
bool try_lock() { return OSSpinLockTry(&spin); }
bool SpinLockMac::trylock() { return OSSpinLockTry(&_spinlock); }