bool try_lock() { check_for_hierarchy_violation (); if (!internal_mutex.try_lock()) return false; update_hierarchy_value (); return true; }
void faultyBusyIncrement() { while (true) { if (m1.try_lock()) { ++i; m2.unlock(); // expected-warning{{Potential copy-paste error; did you really mean to use 'm2' here?}} if (i > 1000) { return; } } } }
void busyIncrement() { while (true) { if (m1.try_lock()) { ++i; m1.unlock(); // expected-note{{Similar code using 'm1' here}} if (i > 1000) { return; } } } }
void busyIncrement() { while (true) { if (m1.try_lock()) { ++i; m1.unlock(); // expected-note{{suggestion is based on the usage of this variable in a similar piece of code}} if (i > 1000) { return; } } } }
void faultyBusyIncrement() { while (true) { if (m1.try_lock()) { ++i; m2.unlock(); // expected-warning{{suspicious code clone detected; did you mean to use 'm1'?}} if (i > 1000) { return; } } } }
void lector() { //while(1) { mtx.lock(); cl++; db.try_lock(); mtx.unlock(); file_read(); mtx.lock(); cl--; if(cl == 0) db.unlock(); mtx.unlock(); } }
ul read_count() { ul sum; int lock = gblcnt_mutex.try_lock(); if ( lock ) { sum = globalcount; for ( auto &ctr : counter ) { sum += (ul)ctr; } gblcnt_mutex.unlock(); } else { return 0; } return sum; }
bool add_count ( ul delta, atomic<int> *counter, ul *countermax ) { if ( (*countermax - *counter) >= delta ) { // Fastpath *counter += delta; return true; } int lock = gblcnt_mutex.try_lock(); if ( lock ) { globalize_count( counter, countermax ); if ( globalcountmax - globalcount - globalreserve < (ul)delta ) { gblcnt_mutex.unlock(); return false; } globalcount += (ul)delta; // Slowpath balance_count( counter, countermax ); gblcnt_mutex.unlock(); return true; } else { return false; } }
void Sloong::SloongWallUS::ProcessEventList(int id, queue<RECVINFO>* pList, mutex& oLock, int sock, int nPriorityLevel, CLuaPacket* pUserInfo, CEpollEx* pEpoll, CMsgProc* pMsgProc) { // if not empty while (!pList->empty()) { if (oLock.try_lock() == false) { continue; } unique_lock<mutex> readLoc(oLock, std::adopt_lock); // recheck is not empty when lock done. if (pList->empty()) { oLock.unlock(); continue; } RECVINFO msg = pList->front(); pList->pop(); oLock.unlock(); ProcessEvent(id, &msg, sock, nPriorityLevel, pUserInfo, pEpoll,pMsgProc); } }
void parallelQuickSortWithPool1(vector<int> &arr, int callCnt, int st = -1, int ed = -1) { //c++11 里的线程内变量为 thread_local 或者__thread(mac下测试正常能用),但是vs从2015开始才支持,所以win下用 __declspec (thread),但是经测试,发现无效 //static __declspec (thread) int regressionCnt = 0;//无效,因为只有一个线程能用到,其他线程阻塞 //regressionCnt++; //cout << "id=" << std::this_thread::get_id() << ',' << "regressionCnt=" << regressionCnt << endl; callCnt++; bool loop = true; while (loop){ //mtx2.lock(); if (cpuFinishCount <= 0) { //mtx2.unlock(); //cout << "id=" << std::this_thread::get_id() << " is breaked"<<endl; //printf("id=%d is breaked\n", std::this_thread::get_id()); //printArr(arr); break; } //mtx2.unlock(); if (st != -1 || mtx.try_lock()>0) { int start, end; if (st!=-1) { start = st; end = ed; loop = false;//坑啊,因为漏了这一句,查了半天错 } else{ if (taskArr.size() == 0){ mtx.unlock(); break; //这里必须break //continue; } auto task = taskArr[taskArr.size() - 1]; taskArr.pop_back(); mtx.unlock(); start = task->start; end = task->end; callCnt = 0; //free(task);//???这里会报错,故注释 } int a = arr[start]; int j = start + 1; for (int i = j; i <= end; i++) { if (arr[i] <= a) { if (i != j) swap0(arr, i, j++); else j++; } } if (j > start + 1) { swap0(arr, start, j - 1); } //cout << "id=" << std::this_thread::get_id() << ','<<"start,end="<< start<<","<<end<<endl; //printf("id=%d , ", std::this_thread::get_id()); //printf("start=%d ,end=%d\n", start, end); //printArr(arr); auto id = std::this_thread::get_id(); bool has2 = j > start + 2 && j < end; if (j > start + 2) { //mtx2.lock(); cpuFinishCount++; //mtx2.unlock(); int nextstart = start; int nextend = j - 2; while (arr[nextend] == arr[j - 1] && nextend>start)//去重,重复相同的直接跳过, nextend--; if (nextstart != nextend) { if (has2 || callCnt > NESTING) { auto v0 = (struct v *)malloc(sizeof(struct v*)); v0->start = nextstart; v0->end = nextend; //mtx.lock(); taskArr.push_back(move(v0)); //mtx.unlock(); } else { parallelQuickSortWithPool1(arr, callCnt, nextstart, nextend); } } } if (j < end) { //mtx2.lock(); cpuFinishCount++; //mtx2.unlock(); int nextstart= j ; int nextend= end; while (arr[nextstart] == arr[j - 1] && nextstart<nextend) nextstart++; if (nextstart != nextend) { if (callCnt>NESTING) { auto v0 = (struct v *)malloc(sizeof(struct v*)); v0->start = nextstart; v0->end = nextend; //mtx.lock(); taskArr.push_back(v0); //mtx.unlock(); } else parallelQuickSortWithPool1(arr, callCnt, nextstart, nextend); } } //mtx2.lock(); cpuFinishCount--; //mtx2.unlock(); st = -1; ed = -1; //结束时必须置为-1,表示下次循环必须从taskArr里取任务,否则,下次进入时仍旧会把start-end这段已经排好序的值再排序一次, //然后cpuFinishCount-1,这样会导致开头的break,从而计算错误 } Sleep(0); } }
int ExtTryLock() { return m_mutex.try_lock()?1:0; }
bool try_lock(error_code& ec = throws) { return try_lock("mutex::try_lock", ec); }