void LsapiConn::onTimer() { if ( m_respState && !m_reqReceived &&( DateTime::s_curTime - m_lReqSentTime >= 3 )) { LOG_NOTICE(( getLogger(), "[%s] No request delivery notification has been received from LSAPI application, possible dead lock.", getLogId() )); if ( ((LsapiWorker *)getWorker())->getConfig().getSelfManaged() ) getWorker()->addNewProcess(); else connError( ETIMEDOUT ); return; } /* if ( m_lLastRespRecvTime ) { long tm = time( NULL ); long delta = tm - m_lLastRespRecvTime; if (( delta > getWorker()->getTimeout() )&&( m_iRespBodyRecv )) { if ( m_pChunkIS ) LOG_INFO(( getLogger(), "[%s] Timeout, partial chunk encoded body received," " received: %d, chunk len: %d, remain: %d!", getLogId(), m_iRespBodyRecv, m_pChunkIS->getChunkLen(), m_pChunkIS->getChunkRemain() )); else LOG_INFO((getLogger(), "[%s] Timeout, partial response body received," " body len: %d, received: %d!", getLogId(), m_iRespBodySize, m_iRespBodyRecv )); setState( CLOSING ); if ( getConnector() ) getConnector()->endResponse( 0, 0 ); return; } else if (( m_pChunkIS )&&( delta > 2 )) { if ((!m_pChunkIS->getChunkLen())&&( getConnector() )) { LOG_INFO(( getLogger(), "[%s] Missing trailing CRLF in Chunked Encoding!", getLogId() )); setState( CLOSING ); getConnector()->endResponse( 0, 0 ); return; } } }*/ ExtConn::onTimer(); }
void ProxyConn::onTimer() { // if (!( getEvents() & POLLIN )) // { // LS_WARN( this, "Oops! POLLIN is turned off for this proxy connection," // " turn it on, this should never happen!!!!"); // continueRead(); // } // if (( m_iTotalPending > 0 )&& !( getEvents() & POLLOUT )) // { // LS_WARN( this, "Oops! POLLOUT is turned off while there is pending data," // " turn it on, this should never happen!!!!"); // continueWrite(); // } if (m_lLastRespRecvTime) { long tm = time(NULL); long delta = tm - m_lLastRespRecvTime; if ((delta > getWorker()->getTimeout()) && (m_iRespBodyRecv)) { if (m_pChunkIS) { LS_INFO(this, "Timeout, partial chunk encoded body received," " received: %d, chunk len: %d, remain: %d!", m_iRespBodyRecv, m_pChunkIS->getChunkLen(), m_pChunkIS->getChunkRemain()); } else LS_INFO(this, "Timeout, partial response body received," " body len: %d, received: %d!", m_iRespBodySize, m_iRespBodyRecv); setState(ABORT); getConnector()->endResponse(0, 0);; return; } else if ((m_pChunkIS) && (!m_pChunkIS->getChunkLen()) && (delta > 1)) { if ((getConnector())) { LS_DBG_L(this, "Missing trailing CRLF in Chunked Encoding," " remain: %d!", m_pChunkIS->getChunkRemain()); // const char * p = m_pChunkIS->getLastBytes(); // LS_INFO(this, // "Last 8 bytes are: %#x %#x %#x %#x %#x %#x %#x %#x", // p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); // HttpReq * pReq = getConnector()->getHttpSession()->getReq(); // pReq->dumpHeader(); setState(CLOSING); getConnector()->endResponse(0, 0); return; } } } ExtConn::onTimer(); }
int main(int argc, char* argv[]) { // Create Worker Pool initWorkers() ; sem_t workPool ; sem_init(&workPool, 0, THREAD_WORKERS) ; // Create the Socket int sockfd, set = 1 ; if ((sockfd = socket(AF_INET, SOCK_STREAM, 0)) == -1) { std::cout << "Unable to create a socket!" << std::endl ; return 1 ; } setsockopt (sockfd, SOL_SOCKET, SO_REUSEADDR, &set, sizeof(set)) ; // Initialize the socket structure struct sockaddr_in address ; bzero(&address, sizeof(address)) ; address.sin_family = AF_INET ; address.sin_addr.s_addr = INADDR_ANY ; address.sin_port = htons(SERVER_PORT) ; // Bind socket address to socket if (bind(sockfd, (struct sockaddr *) &address, sizeof(address)) == -1) { std::cout << "Cannot bind to the socket!" << std::endl ; return 1 ; } // Convert the socket to a listening socket if (listen(sockfd, CONNECT_QUEUE) == -1) { std::cout << "Unable to make the socket a listening socket!" << std::endl ; return 1 ; } // Loop indefintely to accept any connections while (true) { int conn ; struct sockaddr_in client ; socklen_t clientLen = sizeof(client) ; bzero(&client, clientLen) ; if ((conn = accept(sockfd, (struct sockaddr *) &client, &clientLen)) == -1) { std::cout << "Unable to accept any connections!" << std::endl ; return 1 ; } //fcntl (conn, F_SETFL, fcntl(conn, F_GETFL) | O_NONBLOCK) ; sem_wait (&workPool) ; pthread_t thread = getWorker () ; pthread_create (&thread, NULL, threadHelper, &conn) ; sem_post (&workPool) ; } // Should never get here sem_destroy(&workPool) ; pthread_exit(NULL) ; return 1 ; }
bool RequestWorkerPool::sendRequest(DataRequest * req) { RequestWorker * tmp = getWorker();//HttpWorkerPool::getWorker();//new HttpRequestWorker(request); tmp->setInputrequest(req); tmp->setIsBeingUsed(true); tmp->execute(); return true; }
void CTP_Executor::Core() { std::queue<CTP_Task*> task_queue; CTP_Task* task = NULL; CTP_Worker* worker = NULL; while(m_isShutdown) { m_queueLock.mtLock(); if(m_blockQueue.empty()) { m_queueLock.mtWait(); } if(m_blockQueue.empty()) { m_queueLock.mtUnlock(); continue; } while(!m_blockQueue.empty()) { task_queue.push(m_blockQueue.front()); m_blockQueue.pop(); } m_queueLock.mtUnlock(); while(!task_queue.empty()) { task = task_queue.front(); task_queue.pop(); worker = getWorker(); worker->setTask(task); } } m_queueLock.mtLock(); while(!m_blockQueue.empty()) { task = m_blockQueue.front(); m_blockQueue.pop(); worker = getWorker(); worker->setTask(task); } m_queueLock.mtUnlock(); }
int LsapiConn::close() { ExtConn::close(); if ( m_pid > 0 ) { ((LsapiWorker *)getWorker())->moveToStopList( m_pid ); m_pid = -1; } return 0; }
int JConn::processPacketHeader( unsigned char * &p ) { if (( *p != AJP_RESP_PREFIX_B1)|| ( *(p+1) != AJP_RESP_PREFIX_B2 )) { LOG_ERR(( getLogger(), "[%s] Invalid AJP response signature %x%x", getLogId(), (int) *p, (int) *(p+1) )); return -1; } p+= 2; m_curPacketSize = getInt( p ); if ( m_curPacketSize > AJP_MAX_PKT_BODY_SIZE ) { LOG_ERR(( getLogger(), "[%s] packet size is too large - %d", getLogId(), m_curPacketSize )); return -1; } m_packetType = *p++; m_packetLeft = m_curPacketSize - 1; switch(m_packetType) { case AJP13_RESP_BODY_CHUNK: m_iPacketState = CHUNK_LEN; break; case AJP13_RESP_HEADERS: m_iPacketState = STATUS_CODE; break; case AJP13_END_RESP: if ( *p != 1) { if ( D_ENABLED( DL_LESS ) ) LOG_D(( getLogger(), "[%s] close connection required by servlet engine %s ", getLogId(), getWorker()->getURL() )); setState( CLOSING ); } p++; if ( getConnector() ) { incReqProcessed(); if ( getState() == ABORT ) setState( PROCESSING ); setInProcess( 0 ); getConnector()->endResponse( 0, 0 ); } break; case AJP13_MORE_REQ_BODY: default: break; } return 0; }
void ProxyConn::init(int fd, Multiplexer *pMplx) { EdStream::init(fd, pMplx, POLLIN | POLLOUT | POLLHUP | POLLERR); reset(); m_iSsl = ((ProxyWorker *)getWorker())->getConfig().getSsl(); if ((m_iSsl) && (m_ssl.getSSL())) m_ssl.release(); m_lReqBeginTime = time(NULL); //Increase the number of successful request to avoid max connections reduction. incReqProcessed(); }
int JConn::buildReqHeader() { m_pReqHeaderEnd = m_buf + 4; int ret = JkAjp13::buildReq( getConnector()->getHttpConn(), m_pReqHeaderEnd, &m_buf[ AJP_MAX_PACKET_SIZE ] ); if ( ret == -1 ) return -1; m_pBufEnd = m_pReqHeaderEnd; ret = JkAjp13::buildWorkerHeader( (JWorker *)getWorker(), m_pBufEnd, &m_buf[ AJP_MAX_PACKET_SIZE] ); if ( ret == -1 ) return -1; JkAjp13::buildAjpHeader( m_buf, m_pBufEnd - m_buf - 4 ); return 0; }
bool PipelineManager::removeWorker(int id) { Worker* worker = NULL; worker = getWorker(id); if (!worker) { return false; } worker->stop(); delete workers[id]; workers.erase(id); return true; }
int LsapiConn::connect( Multiplexer * pMplx ) { LsapiWorker * pWorker = (LsapiWorker *)getWorker(); if ( pWorker->selfManaged() ) return ExtConn::connect( pMplx ); int fds[2]; errno = ECONNRESET; if ( socketpair( AF_UNIX, SOCK_STREAM, 0, fds ) == -1 ) { LOG_ERR(( "[LsapiConn::connect()] socketpair() failed!" )); return -1; } fcntl( fds[0], F_SETFD, FD_CLOEXEC ); setReqProcessed( 0 ); setToStop( 0 ); //if ( pApp->getCurInstances() >= pApp->getConfig().getInstances() ) // return -1; m_pid = LocalWorker::workerExec( pWorker->getConfig(), fds[1] ); ::close( fds[1] ); if ( m_pid == -1 ) { ::close( fds[0] ); return -1; } else { if ( D_ENABLED( DL_LESS ) ) LOG_D(( "[%s] add child process pid: %d", pWorker->getName(), m_pid )); PidRegistry::add( m_pid, pWorker, 0 ); } ::fcntl( fds[0], F_SETFL, HttpGlobals::getMultiplexer()->getFLTag() ); init( fds[0], pMplx ); //Increase the number of successful request to avoid max connections reduction. incReqProcessed(); setState( PROCESSING ); onWrite(); return 1; }
bool PipelineManager::deletePath(Path* path) { std::vector<int> pathFilters = path->getFilters(); int orgFilterId = path->getOriginFilterID(); int dstFilterId = path->getDestinationFilterID(); Worker *worker = NULL; if (filters.count(orgFilterId) <= 0 || filters.count(dstFilterId) <= 0) { return false; } for (auto it : pathFilters) { if (filters.count(it) <= 0) { return false; } } if(!filters[orgFilterId]->disconnectWriter(path->getOrgWriterID())) { utils::errorMsg("Error disconnecting path head!"); return false; } if(!filters[dstFilterId]->disconnectReader(path->getDstReaderID())) { utils::errorMsg("Error disconnecting path tail!"); return false; } for (auto it : pathFilters) { worker = getWorker(filters[it]->getWorkerId()); if (worker) { worker->removeProcessor(it); } delete filters[it]; filters.erase(it); } delete path; return true; }
void DServerCmdWrite::run() { #if DS_CMD_WRITE_DEBUG cout << "DServerCmdWrite::run" << getIndexStr() << endl; #endif if (!checkWorkerAvailable()) return; if (!startWorkerStreamCommunication()) { setErrorText("Could not initiate communication!"); return; } int algID,typID; arrayalgebra::extractIds(getWorker() -> getTType(),algID,typID); TypeConstructor* tc = am->GetTC(algID,typID); while (nextIndex()) { const int curIdx = getIndex(); string master_port =int2Str((1800+curIdx)); string sendCmd = "let r" + getWorker() -> getName() + int2Str(curIdx) + " = " + "receiveD(" + getWorker() -> getMasterHostIP_() + ",p" + master_port + ")"; #if DS_CMD_WRITE_DEBUG cout << "Sending:" << sendCmd << endl; #endif //The sendD-operator on the worker is started if (!sendSecondoCmdToWorkerSOS(sendCmd, true)) { string errMsg; if (hasCmdError()) errMsg = getCmdErrorText(); else errMsg = "Could not write data from worker!"; setErrorText(errMsg); waitForSecondoResultThreaded(); return; } // open communication with receiveD - TypeMap DServerCmdCallBackCommunication callBack(getWorker() ->getMasterHostIP(), master_port); callBack.startSocket(); callBack.startSocketCommunication(); // send TYPE to receiveD - TypeMap if (!callBack.sendTextToCallBack("TYPE", getWorker() -> getTTypeStr())) {; waitForSecondoResultThreaded(); return; } // await "CLOSE" tag if (!callBack.getTagFromCallBack("CLOSE")) { waitForSecondoResultThreaded(); return; } // stop communication w/ reveive TypeMap callBack.closeSocketCommunication(); // type map has finished //The callback connection from the value-mapping // is opened and stored if (!(callBack.startSocketCommunication())) { setErrorText(callBack.getErrorText()); waitForSecondoResultThreaded(); return; } // send TYPE to receiveD - TypeMap if (!callBack.sendTextToCallBack("TYPE", getWorker() -> getTTypeStr())) { setErrorText(callBack.getErrorText()); waitForSecondoResultThreaded(); return; } if (!callBack.getTagFromCallBack("GOTTYPE")) { setErrorText(callBack.getErrorText()); waitForSecondoResultThreaded(); return; } //The element is converted into a binary stream of data SmiRecordFile recF(false,0); SmiRecord rec; SmiRecordId recID; Cmd_Mutex.acquire(); recF.Open("send"); recF.AppendRecord(recID,rec); size_t size = 0; am->SaveObj(algID,typID,rec,size,getWorker() -> getTType(), (*(getInElements()))[curIdx]); char* buffer = new char[size]; rec.Read(buffer,size,0); //rec.Truncate(3); recF.DeleteRecord(recID); recF.Close(); Cmd_Mutex.release(); //Size of the binary data is sent if (!callBack.sendTextToCallBack("SIZE", size)) { waitForSecondoResultThreaded(); return; } #if DS_CMD_WRITE_DEBUG cout << "Send Size:" << size << endl; #endif //The actual data are sent if (!callBack.Write(buffer,size)) { waitForSecondoResultThreaded(); return; } delete [] buffer ; Attribute* a; if(tc->NumOfFLOBs() > 0 ) a = static_cast<Attribute*>((am->Cast(algID,typID)) (((*(getInElements()))[curIdx]).addr)); //Flobs are sent to worker for(int i = 0; i < tc->NumOfFLOBs(); i++) { //send FLOB Tag as info if (!callBack.sendTagToCallBack("FLOB")) { waitForSecondoResultThreaded(); return; } Flob* f = a->GetFLOB(i); //Flob is converted to binary data SmiSize si = f->getSize(); int n_blocks = si / 1024 + 1; char* buf = new char[n_blocks*1024]; memset(buf,0,1024*n_blocks); f->read(buf,si,0); #if DS_CMD_WRITE_DEBUG cout << "Send Flob - Size:" << si << endl; #endif //Size of the binary data is sent if (!callBack.sendTextToCallBack("FLOBSIZE", si)) { waitForSecondoResultThreaded(); return; } //Flob data is sent for(int j = 0; j<n_blocks;j++) callBack.Write(buf+j*1024,1024); delete [] buf; //send FLOB Tag as info bool noErr = true; if (!callBack.getTagFromCallBackTF("GOTFLOB", "ERROR", noErr)) { waitForSecondoResultThreaded(); return; } if (!noErr) { waitForSecondoResultThreaded(); return; } } // if (!callBack.sendTagToCallBack("CLOSE")) { waitForSecondoResultThreaded(); return; } if (!callBack.getTagFromCallBack("FINISH")) { waitForSecondoResultThreaded(); return; } callBack.closeCallBackCommunication(); if (!waitForSecondoResultThreaded()) { string errMsg; if (hasCmdError()) errMsg = getCmdErrorText(); else errMsg = "Could not write data to worker!"; setErrorText(errMsg); } } // while(nextIndex()) if (!closeWorkerStreamCommunication()) { setErrorText("Could not stop communication!"); return; } #if DS_CMD_WRITE_DEBUG cout << "DServerCmdWrite::run DONE" << endl; #endif } // run()
int CgidConn::buildSSIExecHeader() { static unsigned int s_id = 0; HttpSession *pSession = getConnector()->getHttpSession(); HttpReq * pReq = pSession->getReq(); const char * pReal; const AutoStr2 * psChroot; const char * pChroot; int ret; uid_t uid; gid_t gid; pReal = pReq->getRealPath()->c_str(); ret = pReq->getUGidChroot( &uid, &gid, &psChroot ); if ( ret ) return ret; // if ( D_ENABLED( DL_LESS ) ) // LOG_D(( getLogger(), // "[%s] UID: %d, GID: %d", // getLogId(), pHeader->m_uid, pHeader->m_gid )); if ( psChroot ) { // if ( D_ENABLED( DL_LESS ) ) // LOG_D(( getLogger(), // "[%s] chroot: %s, real path: %s", // getLogId(), pChroot->c_str(), pReal )); pChroot = psChroot->c_str(); ret = psChroot->len(); } else { pChroot = NULL; ret = 0; } char achBuf[4096]; memccpy( achBuf, pReal, 0, 4096 ); char * argv[256]; char ** p; char * pDir ; SUExec::buildArgv( achBuf, &pDir, argv, 256 ); if ( pDir ) *(argv[0]-1) = '/'; else pDir = argv[0]; int priority = ((CgidWorker *)getWorker())->getConfig().getPriority(); m_req.buildReqHeader( uid, gid, priority, pChroot, ret, pDir, strlen( pDir ), ((CgidWorker *)getWorker())->getConfig().getRLimits() ); p = &argv[1]; while( *p ) { m_req.appendArgv( *p, strlen( *p ) ); ++p; } m_req.appendArgv( NULL, 0 ); HttpCgiTool::buildEnv( &m_req, pSession ); m_req.finalize( s_id++, ((CgidWorker *)getWorker())->getConfig().getSecret(), LSCGID_TYPE_CGI ); return 0; }
int CgidConn::buildReqHeader() { static unsigned int s_id = 0; HttpSession *pSession = getConnector()->getHttpSession(); HttpReq * pReq = pSession->getReq(); const char * pQueryString = pReq->getQueryString(); const char * pQsEnd = pReq->getQueryString() + pReq->getQueryStringLen(); const char * pReal; const AutoStr2 * psChroot; const AutoStr2 * realPath = pReq->getRealPath(); const char * pChroot; int ret; uid_t uid; gid_t gid; pReal = realPath->c_str(); ret = pReq->getUGidChroot( &uid, &gid, &psChroot ); if ( ret ) return ret; // if ( D_ENABLED( DL_LESS ) ) // LOG_D(( getLogger(), // "[%s] UID: %d, GID: %d", // getLogId(), pHeader->m_uid, pHeader->m_gid )); if ( psChroot ) { // if ( D_ENABLED( DL_LESS ) ) // LOG_D(( getLogger(), // "[%s] chroot: %s, real path: %s", // getLogId(), pChroot->c_str(), pReal )); pChroot = psChroot->c_str(); ret = psChroot->len(); } else { pChroot = NULL; ret = 0; } int priority = ((CgidWorker *)getWorker())->getConfig().getPriority(); m_req.buildReqHeader( uid, gid, priority, pChroot, ret, pReal, pReq->getRealPath()->len(), ((CgidWorker *)getWorker())->getConfig().getRLimits() ); if ( *pQueryString && (memchr( pQueryString, '=', pQsEnd - pQueryString ) == NULL )) { char * pPlus; do { pPlus = (char*)memchr( pQueryString, '+', pQsEnd - pQueryString); if ( pPlus != pQueryString ) { int len; if ( pPlus ) len = pPlus - pQueryString; else len = pQsEnd - pQueryString; m_req.appendArgv( pQueryString, len ); } if ( pPlus ) pQueryString = pPlus + 1; }while( pPlus ); } m_req.appendArgv( NULL, 0 ); HttpCgiTool::buildEnv( &m_req, pSession ); m_req.finalize( s_id++, ((CgidWorker *)getWorker())->getConfig().getSecret(), LSCGID_TYPE_CGI ); return 0; }
void receiveMessages () { cleanBuffers (); do { if (! atLeastOneActiveThread ()) { waitMessage (); } int src, tag; while (probeMessage (src, tag)) { receiveMessage (src, tag); initMessage (); switch (tag) { case RUNNER_STOP_TAG: unpackTerminationOfRunner (); break; case SYNCHRONIZE_REQ_TAG: unpackSynchronRequest (); break; case SYNCHRONIZED_TAG: { RUNNER_ID runner_id; unpack (runner_id); COOP_ID coop_id; unpack (coop_id); getCooperative (coop_id) -> notifySynchronized (); break; } case COOP_TAG: COOP_ID coop_id; unpack (coop_id); getCooperative (coop_id) -> unpack (); getCooperative (coop_id) -> notifyReceiving (); break; case SCHED_REQUEST_TAG: unpackResourceRequest (); break; case SCHED_RESULT_TAG: { /* Unpacking the resource */ SERVICE_ID serv_id; unpack (serv_id); Service * serv = getService (serv_id); int dest; unpack (dest); WORKER_ID worker_id; unpack (worker_id); /* Going back ... */ initMessage (); pack (worker_id); pack (serv_id); serv -> packData (); serv -> notifySendingData (); sendMessage (dest, TASK_DATA_TAG); break; } case TASK_DATA_TAG: { WORKER_ID worker_id; unpack (worker_id); Worker * worker = getWorker (worker_id); worker -> setSource (src); worker -> unpackData (); worker -> wakeUp (); break; } case TASK_RESULT_TAG: { SERVICE_ID serv_id; unpack (serv_id); Service * serv = getService (serv_id); serv -> unpackResult (); break; } case TASK_DONE_TAG: unpackTaskDone (); break; default: ; }; } } while ( ! atLeastOneActiveThread () && atLeastOneActiveRunner () /*&& ! allResourcesFree ()*/ ); }
WorkflowProcessItem * GTUtilsWorkflowDesigner::addElement(HI::GUITestOpStatus &os, const QString &algName, bool exactMatch) { addAlgorithm(os, algName, exactMatch); CHECK_OP(os, NULL); return getWorker(os, algName); }