AuthzDocumentsUpdateGuard::~AuthzDocumentsUpdateGuard() { if (_lockedForUpdate) { unlock(); } }
int MaClient::sendCore(char *method, char *requestUrl, char *postData, int postLen) { char abuf[MPR_HTTP_MAX_PASS * 2], encDetails[MPR_HTTP_MAX_PASS * 2]; char *host; int port, len, rc, nbytes; mprAssert(requestUrl && *requestUrl); lock(); reset(); mprLog(3, tMod, "sendCore: %s %s\n", method, requestUrl); this->method = mprStrdup(method); timestamp = mprGetTime(0); if (timeoutPeriod < 0) { timeoutPeriod = MPR_HTTP_CLIENT_TIMEOUT; } if (timeoutPeriod > 0) { if (!mprGetDebugMode()) { timer = new MprTimer(MPR_HTTP_TIMER_PERIOD, timeoutWrapper, (void *) this); } } if (*requestUrl == '/') { url.parse(requestUrl); host = (proxyHost) ? proxyHost : defaultHost; port = (proxyHost) ? proxyPort : defaultPort; } else { url.parse(requestUrl); host = (proxyHost) ? proxyHost : url.host; port = (proxyHost) ? proxyPort : url.port; } if (sock) { if (port != currentPort || strcmp(host, currentHost) != 0) { // // This request is for a different host or port. Must close socket. // sock->close(0); sock->dispose(); sock = 0; } } if (sock == 0) { sock = new MprSocket(); mprLog(3, tMod, "Opening new socket on: %s:%d\n", host, port); rc = sock->openClient(host, port, MPR_SOCKET_NODELAY); if (rc < 0) { mprLog(MPR_ERROR, tMod, "Can't open socket on %s:%d, %d\n", host, port, rc); unlock(); sock->dispose(); sock = 0; return rc; } sock->setBufSize(-1, MPR_HTTP_CLIENT_BUFSIZE); currentHost = mprStrdup(host); currentPort = port; } else { mprLog(3, tMod, "Reusing Keep-Alive socket on: %s:%d\n", host, port); } // // Remove this flush when pipelining is supported // inBuf->flush(); fd = sock->getFd(); if (proxyHost && *proxyHost) { if (url.query && *url.query) { outBuf->putFmt("%s http://%s:%d%s?%s HTTP/1.1\r\n", method, proxyHost, proxyPort, url.uri, url.query); } else { outBuf->putFmt("%s http://%s:%d%s HTTP/1.1\r\n", method, proxyHost, proxyPort, url.uri); } } else { if (url.query && *url.query) { outBuf->putFmt("%s %s?%s HTTP/1.1\r\n", method, url.uri, url.query); } else { outBuf->putFmt("%s %s HTTP/1.1\r\n", method, url.uri); } } if (serverAuthType) { if (strcmp(serverAuthType, "basic") == 0) { mprSprintf(abuf, sizeof(abuf), "%s:%s", user, password); maEncode64(encDetails, sizeof(encDetails), abuf); outBuf->putFmt("Authorization: %s %s\r\n", serverAuthType, encDetails); #if BLD_FEATURE_DIGEST } else if (strcmp(serverAuthType, "digest") == 0) { char a1Buf[256], a2Buf[256], digestBuf[256]; char *ha1, *ha2, *digest, *qop; authNc++; if (secret == 0) { if (createSecret() < 0) { mprLog(MPR_ERROR, tMod, "Can't create secret\n"); return MPR_ERR_CANT_INITIALIZE; } } mprFree(authCnonce); maCalcNonce(&authCnonce, secret, 0, realm); mprSprintf(a1Buf, sizeof(a1Buf), "%s:%s:%s", user, realm, password); ha1 = maMD5(a1Buf); mprSprintf(a2Buf, sizeof(a2Buf), "%s:%s", method, url.uri); ha2 = maMD5(a2Buf); qop = (serverQop) ? serverQop : (char*) ""; if (mprStrCmpAnyCase(serverQop, "auth") == 0) { mprSprintf(digestBuf, sizeof(digestBuf), "%s:%s:%08x:%s:%s:%s", ha1, serverNonce, authNc, authCnonce, serverQop, ha2); } else if (mprStrCmpAnyCase(serverQop, "auth-int") == 0) { mprSprintf(digestBuf, sizeof(digestBuf), "%s:%s:%08x:%s:%s:%s", ha1, serverNonce, authNc, authCnonce, serverQop, ha2); } else { qop = ""; mprSprintf(digestBuf, sizeof(digestBuf), "%s:%s:%s", ha1, serverNonce, ha2); } mprFree(ha1); mprFree(ha2); digest = maMD5(digestBuf); if (*qop == '\0') { outBuf->putFmt("Authorization: Digest " "username=\"%s\", realm=\"%s\", nonce=\"%s\", " "uri=\"%s\", response=\"%s\"\r\n", user, realm, serverNonce, url.uri, digest); } else if (strcmp(qop, "auth") == 0) { outBuf->putFmt("Authorization: Digest " "username=\"%s\", realm=\"%s\", domain=\"%s\", " "algorithm=\"MD5\", qop=\"%s\", cnonce=\"%s\", " "nc=\"%08x\", nonce=\"%s\", opaque=\"%s\", " "stale=\"FALSE\", uri=\"%s\", response=\"%s\"\r\n", user, realm, serverDomain, serverQop, authCnonce, authNc, serverNonce, serverOpaque, url.uri, digest); } else if (strcmp(qop, "auth-int") == 0) { ; } mprFree(digest); #endif // BLD_FEATURE_HTTP_DIGEST } } outBuf->putFmt("Host: %s\r\n", host); outBuf->putFmt("User-Agent: %s\r\n", MPR_HTTP_CLIENT_NAME); if (userFlags & MPR_HTTP_KEEP_ALIVE) { outBuf->putFmt("Connection: Keep-Alive\r\n"); } else { outBuf->putFmt("Connection: close\r\n"); } if (postLen > 0) { outBuf->putFmt("Content-Length: %d\r\n", postLen); } if (postData) { outBuf->putFmt("Content-Type: application/x-www-form-urlencoded\r\n"); } if (userHeaders) { outBuf->put(userHeaders); } outBuf->put("\r\n"); outBuf->addNull(); // // Flush to the socket with any post data. Writes can fail because the // server prematurely closes a keep-alive connection. // len = outBuf->getLength(); if ((rc = sock->write(outBuf->getStart(), len)) != len) { flags |= MPR_HTTP_TERMINATED; unlock(); mprLog(MPR_ERROR, tMod, "Can't write to socket on %s:%d, %d\n", host, port, rc); return rc; } #if BLD_DEBUG mprLog(3, MPR_RAW, tMod, "Request >>>>\n%s\n", outBuf->getStart()); #endif if (postData) { sock->setBlockingMode(1); for (len = 0; len < postLen; ) { nbytes = postLen - len; rc = sock->write(&postData[len], nbytes); #if BLD_DEBUG mprLog(3, MPR_RAW, tMod, "POST DATA %s\n", &postData[len]); #endif if (rc < 0) { unlock(); mprLog(MPR_ERROR, tMod, "Can't write post data to socket on %s:%d, %d\n", host, port, rc); flags |= MPR_HTTP_TERMINATED; sock->dispose(); sock = 0; return rc; } len += rc; } sock->setBlockingMode(0); } sock->setCallback(readEventWrapper, (void*) this, 0, MPR_READABLE); // // If no callback, then we must block // if (callback == 0) { unlock(); while (state != MPR_HTTP_CLIENT_DONE) { // // If multithreaded and the events thread is not yet running, // we still want to work. // #if BLD_FEATURE_MULTITHREAD if (mprGetMpr()->isRunningEventsThread() && mprGetMpr()->poolService->getMaxPoolThreads() > 0) { completeCond->waitForCond(250); } else #endif mprGetMpr()->serviceEvents(1, 100); } } else { unlock(); } return 0; }
bool LockerImpl::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) { // Clear out whatever is in stateOut. stateOut->locks.clear(); stateOut->globalMode = MODE_NONE; stateOut->globalRecursiveCount = 0; // First, we look at the global lock. There is special handling for this (as the flush // lock goes along with it) so we store it separately from the more pedestrian locks. LockRequest* globalRequest = _find(resourceIdGlobal); if (NULL == globalRequest) { // If there's no global lock there isn't really anything to do. invariant(_requests.empty()); return false; } // If the global lock has been acquired more than once, we're probably somewhere in a // DBDirectClient call. It's not safe to release and reacquire locks -- the context using // the DBDirectClient is probably not prepared for lock release. if (globalRequest->recursiveCount > 1) { return false; } // The global lock has been acquired just once. invariant(1 == globalRequest->recursiveCount); stateOut->globalMode = globalRequest->mode; stateOut->globalRecursiveCount = globalRequest->recursiveCount; // Flush lock state is inferred from the global state so we don't bother to store it. // Next, the non-global locks. for (LockRequestsMap::const_iterator it = _requests.begin(); it != _requests.end(); it++) { const ResourceId& resId = it->first; const LockRequest* request = it->second; // This is handled separately from normal locks as mentioned above. if (resourceIdGlobal == resId) { continue; } // This is an internal lock that is obtained when the global lock is locked. if (resourceIdMMAPV1Flush == resId) { continue; } // We don't support saving and restoring document-level locks. invariant(RESOURCE_DATABASE == resId.getType() || RESOURCE_COLLECTION == resId.getType()); // And, stuff the info into the out parameter. Locker::LockSnapshot::OneLock info; info.resourceId = resId; info.mode = request->mode; info.recursiveCount = request->recursiveCount; stateOut->locks.push_back(info); } // Sort locks from coarsest to finest. They'll later be acquired in this order. std::sort(stateOut->locks.begin(), stateOut->locks.end(), SortByGranularity()); // Unlock everything. // Step 1: Unlock all requests that are not-flush and not-global. for (size_t i = 0; i < stateOut->locks.size(); ++i) { for (size_t j = 0; j < stateOut->locks[i].recursiveCount; ++j) { unlock(stateOut->locks[i].resourceId); } } // Step 2: Unlock the global lock. for (size_t i = 0; i < stateOut->globalRecursiveCount; ++i) { unlock(resourceIdGlobal); } // Step 3: Unlock flush. It's only acquired on the first global lock acquisition // so we only unlock it once. invariant(unlock(resourceIdMMAPV1Flush)); return true; }
/* * generic single channel send/recv * if the bool pointer is nil, * then the full exchange will * occur. if pres is not nil, * then the protocol will not * sleep but return if it could * not complete. * * sleep can wake up with g->param == nil * when a channel involved in the sleep has * been closed. it is easiest to loop and re-run * the operation; we'll see that it's now closed. */ void runtime·chansend(ChanType *t, Hchan *c, byte *ep, bool *pres, void *pc) { SudoG *sg; SudoG mysg; G* gp; int64 t0; if(c == nil) { USED(t); if(pres != nil) { *pres = false; return; } runtime·park(nil, nil, "chan send (nil chan)"); return; // not reached } if(runtime·gcwaiting) runtime·gosched(); if(debug) { runtime·printf("chansend: chan=%p; elem=", c); c->elemalg->print(c->elemsize, ep); runtime·prints("\n"); } t0 = 0; mysg.releasetime = 0; if(runtime·blockprofilerate > 0) { t0 = runtime·cputicks(); mysg.releasetime = -1; } runtime·lock(c); // TODO(dvyukov): add similar instrumentation to select. if(raceenabled) runtime·racereadpc(c, pc, runtime·chansend); if(c->closed) goto closed; if(c->dataqsiz > 0) goto asynch; sg = dequeue(&c->recvq); if(sg != nil) { if(raceenabled) racesync(c, sg); runtime·unlock(c); gp = sg->g; gp->param = sg; if(sg->elem != nil) c->elemalg->copy(c->elemsize, sg->elem, ep); if(sg->releasetime) sg->releasetime = runtime·cputicks(); runtime·ready(gp); if(pres != nil) *pres = true; return; } if(pres != nil) { runtime·unlock(c); *pres = false; return; } mysg.elem = ep; mysg.g = g; mysg.selgen = NOSELGEN; g->param = nil; enqueue(&c->sendq, &mysg); runtime·park(runtime·unlock, c, "chan send"); if(g->param == nil) { runtime·lock(c); if(!c->closed) runtime·throw("chansend: spurious wakeup"); goto closed; } if(mysg.releasetime > 0) runtime·blockevent(mysg.releasetime - t0, 2); return; asynch: if(c->closed) goto closed; if(c->qcount >= c->dataqsiz) { if(pres != nil) { runtime·unlock(c); *pres = false; return; } mysg.g = g; mysg.elem = nil; mysg.selgen = NOSELGEN; enqueue(&c->sendq, &mysg); runtime·park(runtime·unlock, c, "chan send"); runtime·lock(c); goto asynch; } if(raceenabled) runtime·racerelease(chanbuf(c, c->sendx)); c->elemalg->copy(c->elemsize, chanbuf(c, c->sendx), ep); if(++c->sendx == c->dataqsiz) c->sendx = 0; c->qcount++; sg = dequeue(&c->recvq); if(sg != nil) { gp = sg->g; runtime·unlock(c); if(sg->releasetime) sg->releasetime = runtime·cputicks(); runtime·ready(gp); } else runtime·unlock(c); if(pres != nil) *pres = true; if(mysg.releasetime > 0) runtime·blockevent(mysg.releasetime - t0, 2); return; closed: runtime·unlock(c); runtime·panicstring("send on closed channel"); }
void MultiRobotsWorld::glDraw() { static const GLfloat white[]={0.8f,0.8f,0.8f,1.0f}, gray[]={0.2f,0.2f,0.2f,1.0}; glPushMatrix(); glTranslatef(0.5*lattice->gridScale[0],0.5*lattice->gridScale[1],0.5*lattice->gridScale[2]); // glTranslatef(0.5*lattice->gridScale[0],0.5*lattice->gridScale[1],0); glDisable(GL_TEXTURE_2D); vector <GlBlock*>::iterator ic=tabGlBlocks.begin(); lock(); while (ic!=tabGlBlocks.end()) { ((MultiRobotsGlBlock*)(*ic))->glDraw(objBlock); ic++; } unlock(); glPopMatrix(); glMaterialfv(GL_FRONT,GL_AMBIENT,gray); glMaterialfv(GL_FRONT,GL_DIFFUSE,white); glMaterialfv(GL_FRONT,GL_SPECULAR,gray); glMaterialf(GL_FRONT,GL_SHININESS,40.0); glPushMatrix(); enableTexture(true); glBindTexture(GL_TEXTURE_2D,idTextureWall); glScalef(lattice->gridSize[0]*lattice->gridScale[0], lattice->gridSize[1]*lattice->gridScale[1], lattice->gridSize[2]*lattice->gridScale[2]); glBegin(GL_QUADS); // bottom glNormal3f(0,0,1.0f); glTexCoord2f(0,0); glVertex3f(0.0f,0.0f,0.0f); glTexCoord2f(lattice->gridSize[0]/4.0f,0); glVertex3f(1.0f,0.0f,0.0f); glTexCoord2f(lattice->gridSize[0]/4.0f,lattice->gridSize[1]/4.0f); glVertex3f(1.0,1.0,0.0f); glTexCoord2f(0,lattice->gridSize[1]/4.0f); glVertex3f(0.0,1.0,0.0f); // top glNormal3f(0,0,-1.0f); glTexCoord2f(0,0); glVertex3f(0.0f,0.0f,1.0f); glTexCoord2f(0,lattice->gridSize[1]/4.0f); glVertex3f(0.0,1.0,1.0f); glTexCoord2f(lattice->gridSize[0]/4.0f,lattice->gridSize[1]/4.0f); glVertex3f(1.0,1.0,1.0f); glTexCoord2f(lattice->gridSize[0]/4.0f,0); glVertex3f(1.0f,0.0f,1.0f); // left glNormal3f(1.0,0,0); glTexCoord2f(0,0); glVertex3f(0.0f,0.0f,0.0f); glTexCoord2f(lattice->gridSize[1]/4.0f,0); glVertex3f(0.0f,1.0f,0.0f); glTexCoord2f(lattice->gridSize[1]/4.0f,lattice->gridSize[2]/4.0f); glVertex3f(0.0,1.0,1.0f); glTexCoord2f(0,lattice->gridSize[2]/4.0f); glVertex3f(0.0,0.0,1.0f); // right glNormal3f(-1.0,0,0); glTexCoord2f(0,0); glVertex3f(1.0f,0.0f,0.0f); glTexCoord2f(0,lattice->gridSize[2]/4.0f); glVertex3f(1.0,0.0,1.0f); glTexCoord2f(lattice->gridSize[1]/4.0f,lattice->gridSize[2]/4.0f); glVertex3f(1.0,1.0,1.0f); glTexCoord2f(lattice->gridSize[1]/4.0f,0); glVertex3f(1.0f,1.0f,0.0f); // back glNormal3f(0,-1.0,0); glTexCoord2f(0,0); glVertex3f(0.0f,1.0f,0.0f); glTexCoord2f(lattice->gridSize[0]/4.0f,0); glVertex3f(1.0f,1.0f,0.0f); glTexCoord2f(lattice->gridSize[0]/4.0f,lattice->gridSize[2]/4.0f); glVertex3f(1.0f,1.0,1.0f); glTexCoord2f(0,lattice->gridSize[2]/4.0f); glVertex3f(0.0,1.0,1.0f); // front glNormal3f(0,1.0,0); glTexCoord2f(0,0); glVertex3f(0.0f,0.0f,0.0f); glTexCoord2f(0,lattice->gridSize[2]/4.0f); glVertex3f(0.0,0.0,1.0f); glTexCoord2f(lattice->gridSize[0]/4.0f,lattice->gridSize[2]/4.0f); glVertex3f(1.0f,0.0,1.0f); glTexCoord2f(lattice->gridSize[0]/4.0f,0); glVertex3f(1.0f,0.0f,0.0f); glEnd(); glPopMatrix(); // draw the axes objRepere->glDraw(); }
int UavcanNode::ioctl(file *filp, int cmd, unsigned long arg) { int ret = OK; lock(); switch (cmd) { case PWM_SERVO_ARM: arm_actuators(true); break; case PWM_SERVO_SET_ARM_OK: case PWM_SERVO_CLEAR_ARM_OK: case PWM_SERVO_SET_FORCE_SAFETY_OFF: // these are no-ops, as no safety switch break; case PWM_SERVO_DISARM: arm_actuators(false); break; case MIXERIOCGETOUTPUTCOUNT: *(unsigned *)arg = _output_count; break; case MIXERIOCRESET: if (_mixers != nullptr) { delete _mixers; _mixers = nullptr; _groups_required = 0; } break; case MIXERIOCLOADBUF: { const char *buf = (const char *)arg; unsigned buflen = strnlen(buf, 1024); if (_mixers == nullptr) { _mixers = new MixerGroup(control_callback, (uintptr_t)_controls); } if (_mixers == nullptr) { _groups_required = 0; ret = -ENOMEM; } else { ret = _mixers->load_from_buf(buf, buflen); if (ret != 0) { warnx("mixer load failed with %d", ret); delete _mixers; _mixers = nullptr; _groups_required = 0; ret = -EINVAL; } else { _mixers->groups_required(_groups_required); } } break; } default: ret = -ENOTTY; break; } unlock(); if (ret == -ENOTTY) { ret = CDev::ioctl(filp, cmd, arg); } return ret; }
nsresult DOMStorageDBThread::InsertDBOp(DOMStorageDBThread::DBOperation* aOperation) { MonitorAutoLock monitor(mThreadObserver->GetMonitor()); // Sentinel to don't forget to delete the operation when we exit early. nsAutoPtr<DOMStorageDBThread::DBOperation> opScope(aOperation); if (mStopIOThread) { // Thread use after shutdown demanded. MOZ_ASSERT(false); return NS_ERROR_NOT_INITIALIZED; } if (NS_FAILED(mStatus)) { MonitorAutoUnlock unlock(mThreadObserver->GetMonitor()); aOperation->Finalize(mStatus); return mStatus; } switch (aOperation->Type()) { case DBOperation::opPreload: case DBOperation::opPreloadUrgent: if (mPendingTasks.IsScopeUpdatePending(aOperation->Scope())) { // If there is a pending update operation for the scope first do the flush // before we preload the cache. This may happen in an extremely rare case // when a child process throws away its cache before flush on the parent // has finished. If we would preloaded the cache as a priority operation // before the pending flush, we would have got an inconsistent cache content. mFlushImmediately = true; } else if (mPendingTasks.IsScopeClearPending(aOperation->Scope())) { // The scope is scheduled to be cleared, so just quickly load as empty. // We need to do this to prevent load of the DB data before the scope has // actually been cleared from the database. Preloads are processed // immediately before update and clear operations on the database that // are flushed periodically in batches. MonitorAutoUnlock unlock(mThreadObserver->GetMonitor()); aOperation->Finalize(NS_OK); return NS_OK; } // NO BREAK case DBOperation::opGetUsage: if (aOperation->Type() == DBOperation::opPreloadUrgent) { SetHigherPriority(); // Dropped back after urgent preload execution mPreloads.InsertElementAt(0, aOperation); } else { mPreloads.AppendElement(aOperation); } // DB operation adopted, don't delete it. opScope.forget(); // Immediately start executing this. monitor.Notify(); break; default: // Update operations are first collected, coalesced and then flushed // after a short time. mPendingTasks.Add(aOperation); // DB operation adopted, don't delete it. opScope.forget(); ScheduleFlush(); break; } return NS_OK; }
bool epoll_wait_call::_wait(int timeout) { int i, ready_fds, fd; bool cq_ready = false; __log_func("calling os epoll: %d", m_epfd); if (timeout) { lock(); if (m_epfd_info->m_ready_fds.empty()) { m_epfd_info->going_to_sleep(); } else { timeout = 0; } unlock(); } if (m_sigmask) { ready_fds = orig_os_api.epoll_pwait(m_epfd, m_p_ready_events, m_maxevents, timeout, m_sigmask); } else { ready_fds = orig_os_api.epoll_wait(m_epfd, m_p_ready_events, m_maxevents, timeout); } if (timeout) { lock(); m_epfd_info->return_from_sleep(); unlock(); } if (ready_fds < 0) { vma_throw_object(io_mux_call::io_error); } // convert the returned events to user events and mark offloaded fds m_n_all_ready_fds = 0; for (i = 0; i < ready_fds; ++i) { fd = m_p_ready_events[i].data.fd; // wakeup event if(m_epfd_info->is_wakeup_fd(fd)) { lock(); m_epfd_info->remove_wakeup_fd(); unlock(); continue; } // If it's CQ if (m_epfd_info->is_cq_fd(m_p_ready_events[i].data.u64)) { cq_ready = true; continue; } if ((m_p_ready_events[i].events & EPOLLIN)) { socket_fd_api* temp_sock_fd_api = fd_collection_get_sockfd(fd); if (temp_sock_fd_api) { // Instructing the socket to sample the OS immediately to prevent hitting EAGAIN on recvfrom(), // after iomux returned a shadow fd as ready (only for non-blocking sockets) temp_sock_fd_api->set_immediate_os_sample(); } } // Copy event bits and data m_events[m_n_all_ready_fds].events = m_p_ready_events[i].events; if (!m_epfd_info->get_data_by_fd(fd, &m_events[m_n_all_ready_fds].data)) { continue; } ++m_n_all_ready_fds; } return cq_ready; }
int epoll_wait_call::get_current_events() { if (m_epfd_info->m_ready_fds.empty()) { return m_n_all_ready_fds; } vector<socket_fd_api *> socket_fd_vec; lock(); int i,r,w; i = r = w = m_n_all_ready_fds; socket_fd_api *p_socket_object; epoll_fd_rec fd_rec; ep_ready_fd_map_t::iterator iter = m_epfd_info->m_ready_fds.begin(); while (iter != m_epfd_info->m_ready_fds.end() && i < m_maxevents) { ep_ready_fd_map_t::iterator iter_cpy = iter; // for protection needs ++iter; p_socket_object = fd_collection_get_sockfd(iter_cpy->first); if (p_socket_object) { if(!m_epfd_info->get_fd_rec_by_fd(iter_cpy->first, fd_rec)) continue; m_events[i].events = 0; //initialize bool got_event = false; //epoll_wait will always wait for EPOLLERR and EPOLLHUP; it is not necessary to set it in events. uint32_t mutual_events = iter_cpy->second & (fd_rec.events | EPOLLERR | EPOLLHUP); //EPOLLHUP & EPOLLOUT are mutually exclusive. see poll man pages. epoll adapt poll behavior. if ((mutual_events & EPOLLHUP) && (mutual_events & EPOLLOUT)) { mutual_events &= ~EPOLLOUT; } if (mutual_events & EPOLLIN) { if (handle_epoll_event(p_socket_object->is_readable(NULL), EPOLLIN, iter_cpy, fd_rec, i)) { r++; got_event = true; } mutual_events &= ~EPOLLIN; } if (mutual_events & EPOLLOUT) { if (handle_epoll_event(p_socket_object->is_writeable(), EPOLLOUT, iter_cpy, fd_rec, i)) { w++; got_event = true; } mutual_events &= ~EPOLLOUT; } if (mutual_events) { if (handle_epoll_event(true, mutual_events, iter_cpy, fd_rec, i)) { got_event = true; } } if (got_event) { socket_fd_vec.push_back(p_socket_object); ++i; } } else { m_epfd_info->m_ready_fds.erase(iter_cpy); } } int ready_rfds = r - m_n_all_ready_fds; //MNY: not only rfds, different counters for read/write ? int ready_wfds = w - m_n_all_ready_fds; m_n_ready_rfds += ready_rfds; m_n_ready_wfds += ready_wfds; m_p_stats->n_iomux_rx_ready += ready_rfds; unlock(); /* * for checking ring migration we need a socket context. * in epoll we separate the rings from the sockets, so only here we access the sockets. * therefore, it is most convenient to check it here. * we need to move the ring migration to the epfd, going over the registered sockets, * when polling the rings was not fruitful. * this will be more similar to the behavior of select/poll. * see RM task 212058 */ for (unsigned int j = 0; j < socket_fd_vec.size(); j++) { socket_fd_vec[j]->consider_rings_migration(); } return (i); }
void JunctionMap::insert_junction(char strand, int chr, int start, int end, bool consensus, const char* intron_string, int junction_qual, const char *read_id, int coverage = 1) { lock() ; //Sorted list by donor positions first and then acceptor positions Junction j; if (coverage<0) // annotation coverage = anno_pseudo_coverage ; if (junction_qual<0) // annotation junction_qual = anno_pseudo_coverage ; // init junction j, in most cases we use this junction j.start = start; j.end = end; j.coverage = coverage; j.strand = strand; j.consensus = consensus ; j.intron_string = intron_string; j.read_id = read_id ; j.junction_qual = junction_qual ; //fprintf(stdout,"%c %i %i %i\n",strand, chr, start, end); if (junctionlist_by_start[chr].empty()) { junctionlist_by_start[chr].push_back(j); junctionlist_by_end[chr].push_back(j); unlock() ; return; } std::deque<Junction>::iterator it_s = my_lower_bound_by_start(junctionlist_by_start[chr].begin(), junctionlist_by_start[chr].end(), start) ; std::deque<Junction>::iterator it_e = my_lower_bound_by_end(junctionlist_by_end[chr].begin(), junctionlist_by_end[chr].end(), end) ; // first handle list sorted by end bool handled = false; for (; it_e != junctionlist_by_end[chr].end(); it_e++) { if (end < (*it_e).end) { junctionlist_by_end[chr].insert(it_e, j); handled = true; break; } if (end == (*it_e).end) { if (start < (*it_e).start) { junctionlist_by_end[chr].insert(it_e, j); handled = true; break; } if (start == (*it_e).start) { if (strand == (*it_e).strand) { if ((*it_e).consensus != consensus) { fprintf(stderr, "ERROR: consensus mismatch:\n%s:\t%i-%i %c %i %i %i\n%s:\t%i-%i %c %i %i %i\n", (*it_e).read_id.c_str(), (*it_e).start, (*it_e).end, (*it_e).strand, (*it_e).coverage, (*it_e).consensus, (*it_e).junction_qual, read_id, start, end, strand, coverage, consensus, junction_qual) ; if (!consensus) // try to handle this case (*it_e).consensus=false ; } if (junction_qual > (*it_e).junction_qual) { (*it_e).junction_qual = junction_qual ; (*it_e).read_id = read_id ; } if ((*it_e).coverage!=0 && coverage!=0) (*it_e).coverage += coverage; else (*it_e).coverage = 0; handled = true; break ; } if (strand == '+') { junctionlist_by_end[chr].insert(it_e, j); handled = true; break; } } } continue; } if (!handled) junctionlist_by_end[chr].push_back(j); // handle list sorted by start for (; it_s !=junctionlist_by_start[chr].end(); it_s++) { if (start < (*it_s).start) { junctionlist_by_start[chr].insert(it_s, j); unlock() ; return; } if (start == (*it_s).start) { if (end < (*it_s).end) { junctionlist_by_start[chr].insert(it_s, j); unlock() ; return; } if (end == (*it_s).end) { if (strand == (*it_s).strand) { if ((*it_s).consensus != consensus) { fprintf(stderr, "ERROR: consensus mismatch:\n%s:\t%i-%i %c %i %i %i\n%s:\t%i-%i %c %i %i %i\n", (*it_s).read_id.c_str(), (*it_s).start, (*it_s).end, (*it_s).strand, (*it_s).coverage, (*it_s).consensus, (*it_s).junction_qual, read_id, start, end, strand, coverage, consensus, junction_qual) ; //assert(0) ; // this should not happen -> please report this bug and try commenting out the assertion if (!consensus) // try to handle this case (*it_s).consensus = false ; } if (junction_qual > (*it_s).junction_qual) { (*it_s).junction_qual = junction_qual ; (*it_s).read_id = read_id ; } if ((*it_s).coverage!=0 && coverage!=0) (*it_s).coverage += coverage; else (*it_s).coverage = 0; unlock() ; return; } if (strand == '+') { junctionlist_by_start[chr].insert(it_s, j); unlock() ; return; } } } continue; } junctionlist_by_start[chr].push_back(j); unlock() ; return ; }
void unlock_hash(int i) { unlock(&n_table->entries[i].mutex); }
void JunctionMap::filter_junctions(int min_coverage, int min_junction_qual, int filter_by_map, const GenomeMaps & genomemaps, int verbosity) { lock() ; if (verbosity>0) { fprintf(stdout, "Filtering junctions, requiring\n* %i as minimum confirmation count\n", min_coverage) ; if (min_junction_qual>0) fprintf(stdout, "* requiring minimum junction quality of %i (usually distance to border)\n", min_junction_qual) ; if (filter_by_map>=0) fprintf(stdout, "* requiring junction next to mapped read or annotated exon with distance at most %i bp\n", filter_by_map) ; } int total=0, used_nonconsensus=0, used_consensus=0, filtered_consensus=0, filtered_nonconsensus=0 ; int N=0, T=0 ; for (unsigned int chr=0; chr < genome->nrChromosomes(); chr++) { assert(junctionlist_by_start[chr].size() == junctionlist_by_end[chr].size()) ; if (junctionlist_by_start[chr].empty()) continue; // create copy of list std::deque<Junction>::iterator it_s = junctionlist_by_start[chr].begin(); std::deque<Junction>::iterator it_e = junctionlist_by_end[chr].begin(); std::deque<Junction> list_s ; std::deque<Junction> list_e ; while (!junctionlist_by_start[chr].empty() and it_s != junctionlist_by_start[chr].end()) { list_s.push_back(*it_s) ; it_s++ ; } junctionlist_by_start[chr].clear() ; while (!junctionlist_by_end[chr].empty() and it_e != junctionlist_by_end[chr].end()) { list_e.push_back(*it_e) ; it_e++ ; } junctionlist_by_end[chr].clear() ; // filter junction list sorted by end // do all the counting for the filter step on list_s, junctions are anyway the same it_e = list_e.begin(); while (!list_e.empty() and it_e != list_e.end()) { assert((*it_e).coverage>=0); bool take = true ; if ((*it_e).junction_qual<min_junction_qual) take = false ; if ((*it_e).coverage<min_coverage) take = false ; if (((*it_e).coverage < 2*min_coverage || ((*it_e).junction_qual<30)) && min_junction_qual!=0 && (!(*it_e).consensus)) take = false ; if (take && filter_by_map>=0) { bool map=false ; for (int p=-filter_by_map; p<=filter_by_map && !map; p++) { if ((*it_e).start+p>=0 && (*it_e).start+p<(int)genome->chromosome(chr).length()) map |= genomemaps.CHR_MAP(genome->chromosome(chr), (*it_e).start+p) ; if ((*it_e).end+p>=0 && (*it_e).end+p<(int)genome->chromosome(chr).length()) map |= genomemaps.CHR_MAP(genome->chromosome(chr), (*it_e).end+p) ; } if (!map) take=false ; } if (take) { junctionlist_by_end[chr].push_back(*it_e) ; } it_e++; } // filter junction list sorted by start // do all the counting here it_s = list_s.begin(); while (!list_s.empty() and it_s != list_s.end()) { assert((*it_s).coverage>=0); bool take = true ; if ((*it_s).junction_qual<min_junction_qual) take = false ; if ((*it_s).coverage<min_coverage) take = false ; if (((*it_s).coverage < 2*min_coverage || ((*it_s).junction_qual<30)) && min_junction_qual!=0 && (!(*it_s).consensus)) take = false ; if (take && filter_by_map>=0) { bool map=false ; for (int p=-filter_by_map; p<=filter_by_map && !map; p++) { if ((*it_s).start+p>=0 && (*it_s).start+p<(int)genome->chromosome(chr).length()) map |= genomemaps.CHR_MAP(genome->chromosome(chr), (*it_s).start+p) ; if ((*it_s).end+p>=0 && (*it_s).end+p<(int)genome->chromosome(chr).length()) map |= genomemaps.CHR_MAP(genome->chromosome(chr), (*it_s).end+p) ; } if (!map) take=false ; } if (!take) { if ((*it_s).consensus) filtered_consensus++ ; else filtered_nonconsensus++ ; } else { if ((*it_s).consensus) used_consensus++ ; else used_nonconsensus++ ; junctionlist_by_start[chr].push_back(*it_s) ; } it_s++; } int n=filtered_consensus+filtered_nonconsensus+used_consensus+used_nonconsensus ; int t=used_consensus+used_nonconsensus ; if (verbosity>0) fprintf(stdout, "%s: analyzed %i junctions, accepted %i junctions (%2.1f%%)\n", genome->chromosome(chr).desc(), n, t, 100.0*t/n) ; total+=junctionlist_by_start[chr].size(); N+=n ; T+=t ; } unlock() ; if (verbosity>0) fprintf(stdout, "All: analyzed %i junctions, accepted %i junctions (%2.1f%%)\n", N, T, 100.0*T/N) ; fprintf(stdout,"Number of junctions in database (min support=%i): %i/%i consensus, %i/%i nonconsensus, %i total\n", min_coverage, used_consensus, used_consensus+filtered_consensus, used_nonconsensus, used_nonconsensus+filtered_nonconsensus, total); }
void create_sysfs_files() { lock(sysfs_lock); device_create_bin_file(); unlock(sysfs_lock); }
Texture::LockedPixels::~LockedPixels() { unlock(); }
void JSLock::unlock(ExecState* exec) { unlock(exec->globalData().isSharedInstance() ? LockForReal : SilenceAssertionsOnly); }
DWORD BuildCubesVert::runThread() { int i = cubeIndex; Eigen::Vector3f center = Eigen::Vector3f( (*mCubes)[i].X + (*mCubes)[i].Width/2.f, (*mCubes)[i].Y + (*mCubes)[i].Height/2.f, (*mCubes)[i].Z + (*mCubes)[i].Depth/2.f); int inside = 0; bool addFace[6]; memset(addFace, true, 6); Eigen::Vector3f points[6]; points[0] = center + Eigen::Vector3f((*mCubes)[i].Width, 0.f, 0.f); points[1] = center - Eigen::Vector3f((*mCubes)[i].Width, 0.f, 0.f); points[2] = center + Eigen::Vector3f(0.f, (*mCubes)[i].Height, 0.f); points[3] = center - Eigen::Vector3f(0.f, (*mCubes)[i].Height, 0.f); points[4] = center + Eigen::Vector3f(0.f, 0.f, (*mCubes)[i].Depth); points[5] = center - Eigen::Vector3f(0.f, 0.f, (*mCubes)[i].Depth); for (unsigned int p = 0; p < 6; p++) { kdtreeNode target; target.xyz[0] = points[p].x(); target.xyz[1] = points[p].y(); target.xyz[2] = points[p].z(); std::pair<treeType::const_iterator,double> found = kdCubes->find_nearest(target); int k = found.first->index; //add the min/max vertex to the list Eigen::Vector3f t_min = Eigen::Vector3f( (*mCubes)[k].X, (*mCubes)[k].Y, (*mCubes)[k].Z); Eigen::Vector3f t_max = Eigen::Vector3f( (*mCubes)[k].X + (*mCubes)[k].Width, (*mCubes)[k].Y + (*mCubes)[k].Height, (*mCubes)[k].Z + (*mCubes)[k].Depth); for (unsigned int p = 0; p < 6; p++) { if (points[p].x() > t_min.x() && points[p].y() > t_min.y() && points[p].z() > t_min.z() && points[p].x() < t_max.x() && points[p].y() < t_max.y() && points[p].z() < t_max.z()) { addFace[p] = false; inside++; } } } /* for (unsigned int k = 0; k < (*mCubes).size(); k++) { //add the min/max vertex to the list Eigen::Vector3f t_min = Eigen::Vector3f( (*mCubes)[k].X, (*mCubes)[k].Y, (*mCubes)[k].Z); Eigen::Vector3f t_max = Eigen::Vector3f( (*mCubes)[k].X + (*mCubes)[k].Width, (*mCubes)[k].Y + (*mCubes)[k].Height, (*mCubes)[k].Z + (*mCubes)[k].Depth); for (unsigned int p = 0; p < 6; p++) { if (points[p].x() > t_min.x() && points[p].y() > t_min.y() && points[p].z() > t_min.z() && points[p].x() < t_max.x() && points[p].y() < t_max.y() && points[p].z() < t_max.z()) { addFace[p] = false; inside++; } } if (inside >= 6) break; } */ if (inside < 6) { lock(); //add the min/max vertex to the list Eigen::Vector3f min = Eigen::Vector3f( (*mCubes)[i].X, (*mCubes)[i].Y, (*mCubes)[i].Z); Eigen::Vector3f max = Eigen::Vector3f( (*mCubes)[i].X + (*mCubes)[i].Width, (*mCubes)[i].Y + (*mCubes)[i].Height, (*mCubes)[i].Z + (*mCubes)[i].Depth); // Create Vertices float3 verts[8] = { float3(min.x(), min.y(), max.z()), float3(max.x(), min.y(), max.z()), float3(max.x(), max.y(), max.z()), float3(min.x(), max.y(), max.z()), float3(min.x(), min.y(), min.z()), float3(max.x(), min.y(), min.z()), float3(max.x(), max.y(), min.z()), float3(min.x(), max.y(), min.z()), }; // Add Indices unsigned int startIndex = mVertices->size(); // Right if (addFace[0]) { for (unsigned int index = 0; index < 6; index++) { mIndices->push_back(VolumeIndicesRight[index] + startIndex); } } // Left if (addFace[1]) { for (unsigned int index = 0; index < 6; index++) { mIndices->push_back(VolumeIndicesLeft[index] + startIndex); } } // Back if (addFace[2]) { for (unsigned int index = 0; index < 6; index++) { mIndices->push_back(VolumeIndicesBack[index] + startIndex); } } // Front if (addFace[3]) { for (unsigned int index = 0; index < 6; index++) { mIndices->push_back(VolumeIndicesFront[index] + startIndex); } } // Top if (addFace[4]) { for (unsigned int index = 0; index < 6; index++) { mIndices->push_back(VolumeIndicesTop[index] + startIndex); } } // Bottom if (addFace[5]) { for (unsigned int index = 0; index < 6; index++) { mIndices->push_back(VolumeIndicesBottom[index] + startIndex); } } // Add Vertices for (unsigned int index = 0; index < 8; index++) { mVertices->push_back(VertexPositionColor(verts[index], verts[index])); } unlock(); } return 0; }
sampleFrameA * Mixer::renderNextBuffer() { MicroTimer timer; static song::playPos last_metro_pos = -1; FxMixer * fxm = engine::fxMixer(); song::playPos p = engine::getSong()->getPlayPos( song::Mode_PlayPattern ); if( engine::getSong()->playMode() == song::Mode_PlayPattern && engine::getPianoRoll()->isRecording() == true && p != last_metro_pos && p.getTicks() % (DefaultTicksPerTact / 4 ) == 0 ) { addPlayHandle( new samplePlayHandle( "misc/metronome01.ogg" ) ); last_metro_pos = p; } lockInputFrames(); // swap buffer m_inputBufferWrite = ( m_inputBufferWrite + 1 ) % 2; m_inputBufferRead = ( m_inputBufferRead + 1 ) % 2; // clear new write buffer m_inputBufferFrames[ m_inputBufferWrite ] = 0; unlockInputFrames(); // now we have to make sure no other thread does anything bad // while we're acting... lock(); // remove all play-handles that have to be deleted and delete // them if they still exist... // maybe this algorithm could be optimized... ConstPlayHandleList::Iterator it_rem = m_playHandlesToRemove.begin(); while( it_rem != m_playHandlesToRemove.end() ) { PlayHandleList::Iterator it = qFind( m_playHandles.begin(), m_playHandles.end(), *it_rem ); if( it != m_playHandles.end() ) { delete *it; m_playHandles.erase( it ); } it_rem = m_playHandlesToRemove.erase( it_rem ); } // rotate buffers m_writeBuffer = ( m_writeBuffer + 1 ) % m_poolDepth; m_readBuffer = ( m_readBuffer + 1 ) % m_poolDepth; m_writeBuf = m_bufferPool[m_writeBuffer]; m_readBuf = m_bufferPool[m_readBuffer]; // clear last audio-buffer clearAudioBuffer( m_writeBuf, m_framesPerPeriod ); // prepare master mix (clear internal buffers etc.) fxm->prepareMasterMix(); // create play-handles for new notes, samples etc. engine::getSong()->processNextBuffer(); // STAGE 1: run and render all play handles MixerWorkerThread::fillJobQueue<PlayHandleList>( m_playHandles ); MixerWorkerThread::startAndWaitForJobs(); // removed all play handles which are done for( PlayHandleList::Iterator it = m_playHandles.begin(); it != m_playHandles.end(); ) { if( ( *it )->affinityMatters() && ( *it )->affinity() != QThread::currentThread() ) { ++it; continue; } if( ( *it )->done() ) { delete *it; it = m_playHandles.erase( it ); } else { ++it; } } // STAGE 2: process effects of all instrument- and sampletracks MixerWorkerThread::fillJobQueue<QVector<AudioPort *> >( m_audioPorts ); MixerWorkerThread::startAndWaitForJobs(); // STAGE 3: do master mix in FX mixer fxm->masterMix( m_writeBuf ); unlock(); emit nextAudioBuffer(); // and trigger LFOs EnvelopeAndLfoParameters::instances()->trigger(); Controller::triggerFrameCounter(); const float new_cpu_load = timer.elapsed() / 10000.0f * processingSampleRate() / m_framesPerPeriod; m_cpuLoad = tLimit( (int) ( new_cpu_load * 0.1f + m_cpuLoad * 0.9f ), 0, 100 ); return m_readBuf; }
/* * This routine runs a command and waits for its completion. Stdoutput and Stderr are returned in *out and *err * respectively. The command returns the exit status of the command. * Valid flags are: * MPR_CMD_NEW_SESSION Create a new session on Unix * MPR_CMD_SHOW Show the commands window on Windows * MPR_CMD_IN Connect to stdin */ int mprRunCmdV(MprCmd *cmd, int argc, char **argv, char **out, char **err, int flags) { int rc, status; if (err) { *err = 0; flags |= MPR_CMD_ERR; } else { flags &= ~MPR_CMD_ERR; } if (out) { *out = 0; flags |= MPR_CMD_OUT; } else { flags &= ~MPR_CMD_OUT; } if (flags & MPR_CMD_OUT) { mprFree(cmd->stdoutBuf); cmd->stdoutBuf = mprCreateBuf(cmd, MPR_BUFSIZE, -1); } if (flags & MPR_CMD_ERR) { mprFree(cmd->stderrBuf); cmd->stderrBuf = mprCreateBuf(cmd, MPR_BUFSIZE, -1); } mprSetCmdCallback(cmd, cmdCallback, NULL); lock(cmd); rc = mprStartCmd(cmd, argc, argv, NULL, flags); /* * Close the pipe connected to the client's stdin */ if (cmd->files[MPR_CMD_STDIN].fd >= 0) { mprCloseCmdFd(cmd, MPR_CMD_STDIN); } if (rc < 0) { if (err) { if (rc == MPR_ERR_CANT_ACCESS) { *err = mprAsprintf(cmd, -1, "Can't access command %s", cmd->program); } else if (MPR_ERR_CANT_OPEN) { *err = mprAsprintf(cmd, -1, "Can't open standard I/O for command %s", cmd->program); } else if (rc == MPR_ERR_CANT_CREATE) { *err = mprAsprintf(cmd, -1, "Can't create process for %s", cmd->program); } } unlock(cmd); return rc; } if (cmd->flags & MPR_CMD_DETACH) { unlock(cmd); return 0; } unlock(cmd); if (mprWaitForCmd(cmd, -1) < 0) { return MPR_ERR_NOT_READY; } lock(cmd); if (mprGetCmdExitStatus(cmd, &status) < 0) { unlock(cmd); return MPR_ERR; } if (err && flags & MPR_CMD_ERR) { mprAddNullToBuf(cmd->stderrBuf); *err = mprGetBufStart(cmd->stderrBuf); } if (out && flags & MPR_CMD_OUT) { mprAddNullToBuf(cmd->stdoutBuf); *out = mprGetBufStart(cmd->stdoutBuf); } unlock(cmd); return status; }
/************ serial ports initialization ***************/ int sinit() { int i; struct stty *t; char *q; /* initialize stty[] and serial ports */ for (i = 0; i < NR_STTY; i++){ q = p; prints("sinit:"); printi(i); t = &stty[i]; /* initialize data structures and pointers */ if (i==0) t->port = 0x3F8; /* COM1 base address */ else t->port = 0x2F8; /* COM2 base address */ t->inchars.value = t->inlines.value = 0; t->inlines.queue = t->inchars.queue = 0; t->mutex.value = 1; t->mutex.queue = 0; t->outspace.value = OUTBUFLEN; t->outspace.queue = 0; t->inhead = t->intail = 0; t->ehead = t->etail = t->e_count = 0; t->outhead =t->outtail = t->tx_on = 0; // initialize control chars; NOT used in MTX but show how anyway t->ison = t->echo = 1; /* is on and echoing */ t->erase = '\b'; t->kill = '@'; t->intr = (char)0177; /* del */ t->quit = (char)034; /* control-C */ t->x_on = (char)021; /* control-Q */ t->x_off = (char)023; /* control-S */ t->eof = (char)004; /* control-D */ lock(); // CLI; no interrupts out_byte(t->port+MCR, 0x09); /* IRQ4 on, DTR on */ out_byte(t->port+IER, 0x00); /* disable serial port interrupts */ out_byte(t->port+LCR, 0x80); /* ready to use 3f9,3f8 as divisor */ out_byte(t->port+DIVH, 0x00); out_byte(t->port+DIVL, 12); /* divisor = 12 ===> 9600 bauds */ /******** term 9600 /dev/ttyS0: 8 bits/char, no parity *************/ out_byte(t->port+LCR, 0x03); /******************************************************************* Writing to 3fc ModemControl tells modem : DTR, then RTS ==> let modem respond as a DCE. Here we must let the (crossed) cable tell the TVI terminal that the "DCE" has DSR and CTS. So we turn the port's DTR and RTS on. ********************************************************************/ out_byte(t->port+MCR, 0x0B); /* 1011 ==> IRQ4, RTS, DTR on */ out_byte(t->port+IER, 0x01); /* Enable Rx interrupt, Tx off */ unlock(); enable_irq(4-i); // COM1: IRQ4; COM2: IRQ3 /* show greeting message */ //USE bputc() to PRINT MESSAGE ON THE SERIAL PORT: serial port # ready } }
int WaylandNativeWindow::dequeueBuffer(BaseNativeWindowBuffer **buffer, int *fenceFd){ HYBRIS_TRACE_BEGIN("wayland-platform", "dequeueBuffer", ""); WaylandNativeWindowBuffer *wnb=NULL; TRACE("%p", buffer); lock(); readQueue(false); HYBRIS_TRACE_BEGIN("wayland-platform", "dequeueBuffer_wait_for_buffer", ""); HYBRIS_TRACE_COUNTER("wayland-platform", "m_freeBufs", "%i", m_freeBufs); while (m_freeBufs==0) { HYBRIS_TRACE_COUNTER("wayland-platform", "m_freeBufs", "%i", m_freeBufs); readQueue(true); } std::list<WaylandNativeWindowBuffer *>::iterator it = m_bufList.begin(); for (; it != m_bufList.end(); it++) { if ((*it)->busy) continue; if ((*it)->youngest == 1) continue; break; } if (it==m_bufList.end()) { HYBRIS_TRACE_BEGIN("wayland-platform", "dequeueBuffer_worst_case_scenario", ""); HYBRIS_TRACE_END("wayland-platform", "dequeueBuffer_worst_case_scenario", ""); it = m_bufList.begin(); for (; it != m_bufList.end() && (*it)->busy; it++) {} } if (it==m_bufList.end()) { unlock(); HYBRIS_TRACE_BEGIN("wayland-platform", "dequeueBuffer_no_free_buffers", ""); HYBRIS_TRACE_END("wayland-platform", "dequeueBuffer_no_free_buffers", ""); TRACE("%p: no free buffers", buffer); return NO_ERROR; } wnb = *it; assert(wnb!=NULL); HYBRIS_TRACE_END("wayland-platform", "dequeueBuffer_wait_for_buffer", ""); /* If the buffer doesn't match the window anymore, re-allocate */ if (wnb->width != m_window->width || wnb->height != m_window->height || wnb->format != m_format || wnb->usage != m_usage) { TRACE("wnb:%p,win:%p %i,%i %i,%i x%x,x%x x%x,x%x", wnb,m_window, wnb->width,m_window->width, wnb->height,m_window->height, wnb->format,m_format, wnb->usage,m_usage); destroyBuffer(wnb); m_bufList.erase(it); wnb = addBuffer(); } wnb->busy = 1; *buffer = wnb; queue.push_back(wnb); --m_freeBufs; HYBRIS_TRACE_COUNTER("wayland-platform", "m_freeBufs", "%i", m_freeBufs); HYBRIS_TRACE_BEGIN("wayland-platform", "dequeueBuffer_gotBuffer", "-%p", wnb); HYBRIS_TRACE_END("wayland-platform", "dequeueBuffer_gotBuffer", "-%p", wnb); HYBRIS_TRACE_END("wayland-platform", "dequeueBuffer_wait_for_buffer", ""); unlock(); return NO_ERROR; }
void DOMStorageDBThread::ThreadFunc() { nsresult rv = InitDatabase(); MonitorAutoLock lockMonitor(mThreadObserver->GetMonitor()); if (NS_FAILED(rv)) { mStatus = rv; mStopIOThread = true; return; } // Create an nsIThread for the current PRThread, so we can observe runnables // dispatched to it. nsCOMPtr<nsIThread> thread = NS_GetCurrentThread(); nsCOMPtr<nsIThreadInternal> threadInternal = do_QueryInterface(thread); MOZ_ASSERT(threadInternal); // Should always succeed. threadInternal->SetObserver(mThreadObserver); while (MOZ_LIKELY(!mStopIOThread || mPreloads.Length() || mPendingTasks.HasTasks() || mThreadObserver->HasPendingEvents())) { // Process xpcom events first. while (MOZ_UNLIKELY(mThreadObserver->HasPendingEvents())) { mThreadObserver->ClearPendingEvents(); MonitorAutoUnlock unlock(mThreadObserver->GetMonitor()); bool processedEvent; do { rv = thread->ProcessNextEvent(false, &processedEvent); } while (NS_SUCCEEDED(rv) && processedEvent); } if (MOZ_UNLIKELY(TimeUntilFlush() == 0)) { // Flush time is up or flush has been forced, do it now. UnscheduleFlush(); if (mPendingTasks.Prepare()) { { MonitorAutoUnlock unlockMonitor(mThreadObserver->GetMonitor()); rv = mPendingTasks.Execute(this); } if (!mPendingTasks.Finalize(rv)) { mStatus = rv; NS_WARNING("localStorage DB access broken"); } } NotifyFlushCompletion(); } else if (MOZ_LIKELY(mPreloads.Length())) { nsAutoPtr<DBOperation> op(mPreloads[0]); mPreloads.RemoveElementAt(0); { MonitorAutoUnlock unlockMonitor(mThreadObserver->GetMonitor()); op->PerformAndFinalize(this); } if (op->Type() == DBOperation::opPreloadUrgent) { SetDefaultPriority(); // urgent preload unscheduled } } else if (MOZ_UNLIKELY(!mStopIOThread)) { lockMonitor.Wait(TimeUntilFlush()); } } // thread loop mStatus = ShutdownDatabase(); if (threadInternal) { threadInternal->SetObserver(nullptr); } }
KDLockFile::Private::~Private() { unlock(); }
void runtime·chanrecv(ChanType *t, Hchan* c, byte *ep, bool *selected, bool *received) { SudoG *sg; SudoG mysg; G *gp; int64 t0; if(runtime·gcwaiting) runtime·gosched(); if(debug) runtime·printf("chanrecv: chan=%p\n", c); if(c == nil) { USED(t); if(selected != nil) { *selected = false; return; } runtime·park(nil, nil, "chan receive (nil chan)"); return; // not reached } t0 = 0; mysg.releasetime = 0; if(runtime·blockprofilerate > 0) { t0 = runtime·cputicks(); mysg.releasetime = -1; } runtime·lock(c); if(c->dataqsiz > 0) goto asynch; if(c->closed) goto closed; sg = dequeue(&c->sendq); if(sg != nil) { if(raceenabled) racesync(c, sg); runtime·unlock(c); if(ep != nil) c->elemalg->copy(c->elemsize, ep, sg->elem); gp = sg->g; gp->param = sg; if(sg->releasetime) sg->releasetime = runtime·cputicks(); runtime·ready(gp); if(selected != nil) *selected = true; if(received != nil) *received = true; return; } if(selected != nil) { runtime·unlock(c); *selected = false; return; } mysg.elem = ep; mysg.g = g; mysg.selgen = NOSELGEN; g->param = nil; enqueue(&c->recvq, &mysg); runtime·park(runtime·unlock, c, "chan receive"); if(g->param == nil) { runtime·lock(c); if(!c->closed) runtime·throw("chanrecv: spurious wakeup"); goto closed; } if(received != nil) *received = true; if(mysg.releasetime > 0) runtime·blockevent(mysg.releasetime - t0, 2); return; asynch: if(c->qcount <= 0) { if(c->closed) goto closed; if(selected != nil) { runtime·unlock(c); *selected = false; if(received != nil) *received = false; return; } mysg.g = g; mysg.elem = nil; mysg.selgen = NOSELGEN; enqueue(&c->recvq, &mysg); runtime·park(runtime·unlock, c, "chan receive"); runtime·lock(c); goto asynch; } if(raceenabled) runtime·raceacquire(chanbuf(c, c->recvx)); if(ep != nil) c->elemalg->copy(c->elemsize, ep, chanbuf(c, c->recvx)); c->elemalg->copy(c->elemsize, chanbuf(c, c->recvx), nil); if(++c->recvx == c->dataqsiz) c->recvx = 0; c->qcount--; sg = dequeue(&c->sendq); if(sg != nil) { gp = sg->g; runtime·unlock(c); if(sg->releasetime) sg->releasetime = runtime·cputicks(); runtime·ready(gp); } else runtime·unlock(c); if(selected != nil) *selected = true; if(received != nil) *received = true; if(mysg.releasetime > 0) runtime·blockevent(mysg.releasetime - t0, 2); return; closed: if(ep != nil) c->elemalg->copy(c->elemsize, ep, nil); if(selected != nil) *selected = true; if(received != nil) *received = false; if(raceenabled) runtime·raceacquire(c); runtime·unlock(c); if(mysg.releasetime > 0) runtime·blockevent(mysg.releasetime - t0, 2); }
UniLock::~UniLock() { unlock(); }
~ScopedLocker() { unlock(); }
/* * The host timer does maintenance activities and will fire per second while there is active requests. * When multi-threaded, the host timer runs as an event off the service thread. Because we lock the host here, * connections cannot be deleted while we are modifying the list. */ static void hostTimer(MaHost *host, MprEvent *event) { Mpr *mpr; MaStage *stage; MaConn *conn; MprModule *module; MaHttp *http; int next, count; mprAssert(event); http = host->server->http; /* * Locking ensures connections won't be deleted */ lock(host); updateCurrentDate(host); mprLog(host, 8, "hostTimer: %d active connections", mprGetListCount(host->connections)); /* * Check for any expired connections */ for (count = 0, next = 0; (conn = mprGetNextItem(host->connections, &next)) != 0; count++) { /* * Workaround for a GCC bug when comparing two 64bit numerics directly. Need a temporary. */ int64 diff = conn->expire - host->now; if (diff < 0 && !mprGetDebugMode(host)) { conn->keepAliveCount = 0; if (!conn->disconnected) { if (conn->request) { mprLog(host, 6, "Open request timed out due to inactivity: %s", conn->request->url); } else { mprLog(host, 6, "Idle connection timed out"); } conn->disconnected = 1; mprDisconnectSocket(conn->sock); } } } /* Check for unloadable modules - must be idle */ mpr = mprGetMpr(http); if (mprGetListCount(host->connections) == 0) { for (next = 0; (module = mprGetNextItem(mpr->moduleService->modules, &next)) != 0; ) { if (module->timeout) { if (module->lastActivity + module->timeout < host->now) { if ((stage = maLookupStage(http, module->name)) != 0) { mprLog(host, 2, "Unloading inactive module %s", module->name); if (stage->match) { mprError(conn, "Can't unload modules with match routines"); module->timeout = 0; } else { maUnloadModule(http, module->name); stage->flags |= MA_STAGE_UNLOADED; } } else { maUnloadModule(http, module->name); } } else { count++; } } } } if (count == 0) { mprFree(event); host->timer = 0; } unlock(host); }
int MaClient::sendRequest(char *host, int port, MprBuf* hdrBuf, char *postData, int postLen) { int len, rc; lock(); reset(); mprLog(3, tMod, "sendRequest: %s:%d\n", host, port); timestamp = mprGetTime(0); if (timeoutPeriod < 0) { timeoutPeriod = MPR_HTTP_CLIENT_TIMEOUT; } if (timeoutPeriod > 0) { if (!mprGetDebugMode()) { timer = new MprTimer(MPR_HTTP_TIMER_PERIOD, timeoutWrapper, (void *) this); } } if (sock == 0) { sock = new MprSocket(); mprLog(3, tMod, "Opening new socket on: %s:%d\n", host, port); rc = sock->openClient(host, port, MPR_SOCKET_NODELAY); if (rc < 0) { mprLog(MPR_ERROR, tMod, "Can't open socket on %s:%d, %d\n", host, port, rc); unlock(); sock->dispose(); sock = 0; return rc; } sock->setBufSize(-1, MPR_HTTP_CLIENT_BUFSIZE); } else { mprLog(3, tMod, "Reusing Keep-Alive socket on: %s:%d\n", host, port); } // // Remove this flush when pipelining is supported // inBuf->flush(); fd = sock->getFd(); // // Flush to the socket with any post data. Writes can fail because the // server prematurely closes a keep-alive connection. // len = hdrBuf->getLength(); if ((rc = sock->write(hdrBuf->getStart(), len)) != len) { flags |= MPR_HTTP_TERMINATED; unlock(); mprLog(MPR_ERROR, tMod, "Can't write to socket on %s:%d, %d\n", host, port, rc); return rc; } hdrBuf->addNull(); if (postData) { sock->setBlockingMode(1); if ((rc = sock->write(postData, postLen)) != postLen) { flags |= MPR_HTTP_TERMINATED; unlock(); mprLog(MPR_ERROR, tMod, "Can't write post data to socket on %s:%d, %d\n", host, port, rc); return rc; } sock->setBlockingMode(0); } sock->setCallback(readEventWrapper, (void*) this, 0, MPR_READABLE); // // If no callback, then we must block // if (callback == 0) { unlock(); while (state != MPR_HTTP_CLIENT_DONE) { // // If multithreaded and the events thread is not yet running, // we still want to work. // #if BLD_FEATURE_MULTITHREAD if (mprGetMpr()->isRunningEventsThread()) { completeCond->waitForCond(250); } else #endif mprGetMpr()->serviceEvents(1, 100); } } else { unlock(); } return 0; }
/* * See locking note for maAddConn */ void maRemoveConn(MaHost *host, MaConn *conn) { lock(host); mprRemoveItem(host->connections, conn); unlock(host); }
void CacheEntry::BackgroundOp(uint32_t aOperations, bool aForceAsync) { mLock.AssertCurrentThreadOwns(); if (!CacheStorageService::IsOnManagementThread() || aForceAsync) { if (mBackgroundOperations.Set(aOperations)) CacheStorageService::Self()->Dispatch(this); LOG(("CacheEntry::BackgroundOp this=%p dipatch of %x", this, aOperations)); return; } { mozilla::MutexAutoUnlock unlock(mLock); MOZ_ASSERT(CacheStorageService::IsOnManagementThread()); if (aOperations & Ops::FRECENCYUPDATE) { ++mUseCount; #ifndef M_LN2 #define M_LN2 0.69314718055994530942 #endif // Half-life is dynamic, in seconds. static double half_life = CacheObserver::HalfLifeSeconds(); // Must convert from seconds to milliseconds since PR_Now() gives usecs. static double const decay = (M_LN2 / half_life) / static_cast<double>(PR_USEC_PER_SEC); double now_decay = static_cast<double>(PR_Now()) * decay; if (mFrecency == 0) { mFrecency = now_decay; } else { // TODO: when C++11 enabled, use std::log1p(n) which is equal to log(n + 1) but // more precise. mFrecency = log(exp(mFrecency - now_decay) + 1) + now_decay; } LOG(("CacheEntry FRECENCYUPDATE [this=%p, frecency=%1.10f]", this, mFrecency)); // Because CacheFile::Set*() are not thread-safe to use (uses WeakReference that // is not thread-safe) we must post to the main thread... nsRefPtr<nsRunnableMethod<CacheEntry> > event = NS_NewRunnableMethodWithArg<double>(this, &CacheEntry::StoreFrecency, mFrecency); NS_DispatchToMainThread(event); } if (aOperations & Ops::REGISTER) { LOG(("CacheEntry REGISTER [this=%p]", this)); CacheStorageService::Self()->RegisterEntry(this); } if (aOperations & Ops::UNREGISTER) { LOG(("CacheEntry UNREGISTER [this=%p]", this)); CacheStorageService::Self()->UnregisterEntry(this); } } // unlock if (aOperations & Ops::CALLBACKS) { LOG(("CacheEntry CALLBACKS (invoke) [this=%p]", this)); InvokeCallbacks(); } }
void QWSWindowSurface::endPaint(const QRegion &) { unlock(); }