bool LocalNode::_connect( NodePtr node, ConnectionPtr connection ) { EQASSERT( connection.isValid( )); EQASSERT( node->getNodeID() != getNodeID( )); if( !node.isValid() || _state != STATE_LISTENING || !connection->isConnected() || node->_state != STATE_CLOSED ) { return false; } _addConnection( connection ); // send connect packet to peer NodeConnectPacket packet; packet.requestID = registerRequest( node.get( )); packet.nodeID = _id; packet.nodeType = getType(); connection->send( packet, serialize( )); bool connected = false; if( !waitRequest( packet.requestID, connected, 10000 /*ms*/ )) { EQWARN << "Node connection handshake timeout - peer not a Collage node?" << std::endl; return false; } if( !connected ) return false; EQASSERT( node->_id != NodeID::ZERO ); EQASSERTINFO( node->_id != _id, _id ); EQINFO << node << " connected to " << *(Node*)this << std::endl; return true; }
bool Server::_cmdReleaseConfig( co::ICommand& command ) { UUID configID = command.get< UUID >(); uint32_t requestID = command.get< uint32_t >(); LBVERB << "Handle release config " << command << " config " << configID << std::endl; co::NodePtr node = command.getNode(); Config* config = 0; const Configs& configs = getConfigs(); for( Configs::const_iterator i = configs.begin(); i != configs.end() && !config; ++i ) { Config* candidate = *i; if( candidate->getID() == configID ) config = candidate; } if( !config ) { LBWARN << "Release request for unknown config" << std::endl; node->send( fabric::CMD_SERVER_RELEASE_CONFIG_REPLY ) << requestID; return true; } if( config->isRunning( )) { LBWARN << "Release of running configuration" << std::endl; config->exit(); // Make sure config is exited } const uint32_t destroyRequestID = registerRequest(); node->send( fabric::CMD_SERVER_DESTROY_CONFIG ) << config->getID() << destroyRequestID; waitRequest( destroyRequestID ); #ifdef EQUALIZER_USE_HWSD if( config->isAutoConfig( )) { LBASSERT( _admins.empty( )); config->deregister(); config::Server::release( config ); } else #endif { ConfigRestoreVisitor restore; config->accept( restore ); config->commit(); } node->send( fabric::CMD_SERVER_RELEASE_CONFIG_REPLY ) << requestID; LBLOG( lunchbox::LOG_ANY ) << "----- Released Config -----" << std::endl; return true; }
void *kernel_launcher (void *data) { struct kernelLauncherData *kd = (struct kernelLauncherData *)data; size_t *global_work = (size_t *)malloc(sizeof(size_t)*(kd->work_dim)); cl_uint i; for(i = 0; i < kd->work_dim; i++) { if (i != kd->max_index) global_work[i] = kd->global_work_size[i]; } unsigned int one = 1; unsigned int zero = 0; for(i = 0; i < 3; i++) { if (i < kd->work_dim) __real_clSetKernelArg (kd->kernel,kd->arg_num+1+i, sizeof(unsigned int), &(kd->global_work_size[i])); else __real_clSetKernelArg (kd->kernel, kd->arg_num+1+i, sizeof(unsigned int), &one); } for(i = 0; i < 3; i++) { if (i < kd->work_dim) { __real_clSetKernelArg (kd->kernel, kd->arg_num+7+i, sizeof(unsigned int), &(kd->num_groups[i])); } else __real_clSetKernelArg (kd->kernel, kd->arg_num+7+i, sizeof(unsigned int), &one); } cl_uint j; size_t remain = kd->size; size_t step = kd->step; if (kd->local_work_size != NULL) step /= kd->local_work_size[kd->max_index]; unsigned int off = kd->off; while (remain > 0) { size_t size = remain >= 2*step ? step : remain; if (kd->local_work_size != NULL) global_work[kd->max_index] = size*kd->local_work_size[kd->max_index]; else global_work[kd->max_index] = size; remain -= size; for(j = 0; j < 3; j++) { if (j == kd->max_index) __real_clSetKernelArg (kd->kernel, kd->arg_num+4+j, sizeof(unsigned int), &off); else __real_clSetKernelArg (kd->kernel, kd->arg_num+4+j, sizeof(unsigned int), &zero); } sendRequest('g', kd->index, 1); waitRequest('g', kd->index); __real_clEnqueueNDRangeKernel (queue_GPUSparc[kd->index], kd->kernel, kd->work_dim, kd->global_work_offset, global_work, kd->local_work_size, 0, NULL, kd->event); __real_clFinish (queue_GPUSparc[kd->index]); sendFinish('g', kd->index, 1); off += size; } free (global_work); return NULL; }
void PublishToMePlugin::startWaitTimer(int msecs, const char* slot) { if (!m_waitTimer) { m_waitTimer = new QTimer(this); m_waitTimer->setSingleShot(true); } m_waitTimer->setInterval(msecs); m_waitTimer->start(); emit waitRequest(msecs, false); disconnect(m_waitTimer, SIGNAL(timeout()), this, 0); connect(m_waitTimer, SIGNAL(timeout()), this, slot); }
void run() { if (check) { mId = (tagType3->*check)(mSerivceBlockList); } if (update) { mId = (tagType3->*update)(mSerivceBlockList, mDataArray); } checkInvalidId(); waitRequest(); }
LocalNode::SendToken LocalNode::acquireSendToken( NodePtr node ) { EQASSERT( !inCommandThread( )); EQASSERT( !_inReceiverThread( )); NodeAcquireSendTokenPacket packet; packet.requestID = registerRequest(); node->send( packet ); bool ret = false; if( waitRequest( packet.requestID, ret, Global::getTimeout( ))) return node; EQERROR << "Timeout while acquiring send token " << packet.requestID << std::endl; return 0; }
bool Server::_cmdReleaseConfig( co::Command& command ) { const ServerReleaseConfigPacket* packet = command.get<ServerReleaseConfigPacket>(); EQINFO << "Handle release config " << packet << std::endl; ServerReleaseConfigReplyPacket reply( packet ); co::NodePtr node = command.getNode(); Config* config = 0; const Configs& configs = getConfigs(); for( Configs::const_iterator i = configs.begin(); i != configs.end() && !config; ++i ) { Config* candidate = *i; if( candidate->getID() == packet->configID ) config = candidate; } if( !config ) { EQWARN << "Release request for unknown config" << std::endl; node->send( reply ); return true; } if( config->isRunning( )) { EQWARN << "Release of running configuration" << std::endl; config->exit(); // Make sure config is exited } fabric::ServerDestroyConfigPacket destroyConfigPacket; destroyConfigPacket.requestID = registerRequest(); destroyConfigPacket.configID = config->getID(); node->send( destroyConfigPacket ); waitRequest( destroyConfigPacket.requestID ); ConfigRestoreVisitor restore; config->accept( restore ); node->send( reply ); EQLOG( co::base::LOG_ANY ) << "----- Released Config -----" << std::endl; return true; }
bool LocalNode::disconnect( NodePtr node ) { if( !node || _state != STATE_LISTENING ) return false; if( node->_state != STATE_CONNECTED ) return true; EQASSERT( !inCommandThread( )); NodeDisconnectPacket packet; packet.requestID = registerRequest( node.get( )); send( packet ); waitRequest( packet.requestID ); _objectStore->removeNode( node ); return true; }
void RedisProxy::writeReplyFinished(Context *c) { ClientPacket* packet = (ClientPacket*)c; m_monitor->replyClientFinished(packet); packet->finishedState = ClientPacket::Unknown; packet->commandType = -1; packet->sendBuff.clear(); packet->recvBuff.clear(); packet->sendBytes = 0; packet->recvBytes = 0; packet->sendToRedisBytes = 0; packet->requestServant = NULL; packet->redisSocket = NULL; packet->recvBufferOffset = 0; packet->sendBufferOffset = 0; packet->sendParseResult.reset(); packet->recvParseResult.reset(); waitRequest(c); }
void run() { if (readBlock) { mId = (tagType2->*readBlock)(mAddr); } if (writeBlock) { mId = (tagType2->*writeBlock)(mAddr, mDataArray); } if (selectSector) { mId = (tagType2->*selectSector)(mSector); } checkInvalidId(); waitRequest(); }
void RedisProxy::writeReply(Context *c) { ClientPacket* packet = (ClientPacket*)c; if (!packet->isRecvParseEnd()) { switch (packet->parseRecvBuffer()) { case RedisProto::ProtoError: closeConnection(c); break; case RedisProto::ProtoIncomplete: waitRequest(c); break; case RedisProto::ProtoOK: readRequestFinished(c); break; default: break; } } else { TcpServer::writeReply(c); } }
void run() { if (readAll) { mId = (tagType1->*readAll)(); } if (readIdentification) { mId = (tagType1->*readIdentification)(); } if (readByte) { mId = (tagType1->*readByte)(mAddr); } if (writeByte) { mId = (tagType1->*writeByte)(mAddr, mData, mMode); } if (readBlock) { mId = (tagType1->*readBlock)(mAddr); } if (writeBlock) { mId = (tagType1->*writeBlock)(mAddr, mDataArray, mMode); } if (readSegment) { mId = (tagType1->*readSegment)(mAddr); } checkInvalidId(); waitRequest(); }
void run() { if (selectByName) { mId = (mTagType4->*selectByName)(mName); } if (selectById) { mId = (mTagType4->*selectById)(mFileId); } if (read) { mId = (mTagType4->*read)(mLength, mStartOffset); } if (write) { mId = (mTagType4->*write)(mDataArray, mStartOffset); } checkInvalidId(); waitRequest(); }
cl_int decomposer_GPUSparc(cl_kernel kernel, cl_uint work_dim, const size_t * global_work_offset, const size_t * global_work_size, const size_t * local_work_size, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event) { cl_uint i; cl_int ret = CL_SUCCESS; map<cl_kernel, struct kernelinfo_GPUSparc>::iterator kiter; kiter = kernelmap_GPUSparc.find (kernel); if (kiter == kernelmap_GPUSparc.end()) { GPUSparcLog ("Cannot find kernel in clEnqueueNDRangeKernel\n"); } cl_kernel *kernels = kiter->second.kernel; size_t *local_work = (size_t *)malloc(sizeof(size_t) * work_dim); unsigned int *num_group = (unsigned int *)malloc(sizeof(unsigned int) * work_dim); for(i = 0; i < work_dim; i++) { local_work[i] = (local_work_size != NULL)? local_work_size[i] : 1; num_group[i] = global_work_size[i]/local_work[i]; } timeval t1, t2; cl_uint max_index = getMaxIndex (work_dim,global_work_size, local_work); cl_uint step = getStep (work_dim, global_work_size, local_work, max_index); map<cl_uint, cl_mem> args = kiter->second.args; map<cl_uint, cl_mem>::iterator argiter; map<cl_mem, struct meminfo_GPUSparc>::iterator miter; /* inter-kernel dependency */ GPUSparcLog ("Arg Sync\n"); for (argiter = args.begin(); argiter != args.end(); ++argiter) { miter = memmap_GPUSparc.find (argiter->second); if (miter == memmap_GPUSparc.end()) { GPUSparcLog ("Cannot find args in clEnqueueNDRangeKernel\n"); continue; } struct meminfo_GPUSparc *minfo = &(miter->second); if (!(miter->second.merged) && multiGPUmode) { ret |= bufferMerger_GPUSparc (minfo, NULL); } if (!(miter->second.synched)) { ret |= bufferSynchronizer_GPUSparc (minfo, NULL, NULL, NULL); } } gettimeofday (&t1, NULL); cl_event *tevent = (cl_event *)malloc(sizeof(cl_event) * nGPU_GPUSparc); cl_uint arg_index = kiter->second.arg_num; if (!multiGPUmode) { if (migration) gpuid = -1; sendRequest('g', gpuid, 1); gpuid = waitRequest('g', -1); struct kernelLauncherData *kd = (struct kernelLauncherData *)malloc(sizeof(struct kernelLauncherData)); kd->index = gpuid; kd->kernel = kernels[gpuid]; kd->max_index = max_index; kd->work_dim = work_dim; kd->global_work_size = global_work_size; kd->global_work_offset = global_work_offset; kd->local_work_size = local_work_size; kd->step = step; kd->num_groups = num_group; kd->arg_num = arg_index; kd->event = NULL; kd->size = global_work_size[max_index]/local_work[max_index]; kd->off = 0; single_kernel_launcher (kd); } else { struct kernelLauncherData *kd = (struct kernelLauncherData *)malloc(sizeof(struct kernelLauncherData)*nGPU_GPUSparc); int total = global_work_size[max_index]/local_work[max_index]; int nGPU = nGPU_GPUSparc; for(i = 0; i < nGPU_GPUSparc; i++) { kd[i].index = i; kd[i].kernel = kernels[i]; kd[i].work_dim = work_dim; kd[i].max_index = max_index; kd[i].global_work_size = global_work_size; kd[i].global_work_offset = global_work_offset; kd[i].local_work_size = local_work_size; kd[i].step = step; kd[i].num_groups = num_group; kd[i].arg_num = arg_index; kd[i].event = &tevent[i]; int amount = CEIL(total,nGPU); total -= amount; nGPU--; kd[i].size = amount; if (i == 0) kd[i].off = 0; else kd[i].off = kd[i-1].off + kd[i-1].size; } pthread_t *kthread = (pthread_t *)malloc(sizeof(pthread_t) * nGPU_GPUSparc); for(i = 0; i < nGPU_GPUSparc; i++) { pthread_create (&kthread[i], NULL, kernel_launcher, &kd[i]); } for(i = 0; i < nGPU_GPUSparc; i++) { pthread_join (kthread[i], NULL); } if (event != NULL) { eventmap_GPUSparc.insert (map<cl_event, cl_event *>::value_type (tevent[0], tevent)); *event = tevent[0]; } else free (tevent); } gettimeofday (&t2, NULL); GPUSparcLog ("kernel time: %f\n", ELAPSEDTIME(t1, t2)); for (argiter = args.begin(); argiter != args.end(); ++argiter) { miter = memmap_GPUSparc.find (argiter->second); struct meminfo_GPUSparc *minfo = &miter->second; minfo->merged = false; if (!multiGPUmode) minfo->cohered_gpu = gpuid; } if (migration) gpuid = -1; free (num_group); free (local_work); return ret; }
void PublishToMePlugin::checkWaitTime() { QNetworkReply *reply = qobject_cast<QNetworkReply*>(sender()); if (!reply) { emit error(tr("Network error")); return; } const QString redirect = getRedirect(reply); if (!redirect.isEmpty()) { if (FILE_REGEXP.indexIn(redirect) == 0) { emit downloadRequest(QNetworkRequest(redirect)); } else if (m_redirects < MAX_REDIRECTS) { followRedirect(redirect, SLOT(checkWaitTime())); } else { emit error(tr("Maximum redirects reached")); } reply->deleteLater(); return; } switch (reply->error()) { case QNetworkReply::NoError: break; case QNetworkReply::OperationCanceledError: reply->deleteLater(); return; default: emit error(reply->attribute(QNetworkRequest::HttpReasonPhraseAttribute).toString()); reply->deleteLater(); return; } const QString response = QString::fromUtf8(reply->readAll()); if (FILE_REGEXP.indexIn(response) != -1) { QString url = FILE_REGEXP.cap(); if (url.startsWith("/")) { url.prepend(reply->url().scheme() + "://" + reply->url().authority()); } emit downloadRequest(QNetworkRequest(url)); } else if (response.contains("Downloading is not possible")) { const QTime time = QTime::fromString(response.section("Please wait", 1, 1).section("to download", 0, 0) .trimmed(), "hh:mm:ss"); if (time.isValid()) { emit waitRequest(QTime(0, 0).msecsTo(time), true); } else { emit error(tr("Unknown error")); } } else { QString recaptchaKey = response.section("/file/captcha.html?v=", 1, 1).section('"', 0, 0); if (recaptchaKey.isEmpty()) { emit error(tr("No captcha key found")); } else { recaptchaKey.prepend(QString("http://%1/file/captcha.html?v=").arg(reply->url().host())); emit captchaRequest(RECAPTCHA_PLUGIN_ID, CaptchaType::Image, recaptchaKey, "submitCaptchaResponse"); } } reply->deleteLater(); }
NodePtr LocalNode::_connect( const NodeID& nodeID, NodePtr peer ) { EQASSERT( nodeID != NodeID::ZERO ); NodePtr node; // Make sure that only one connection request based on the node identifier // is pending at a given time. Otherwise a node with the same id might be // instantiated twice in _cmdGetNodeDataReply(). The alternative to this // mutex is to register connecting nodes with this local node, and handle // all cases correctly, which is far more complex. Node connections only // happen a lot during initialization, and are therefore not time-critical. base::ScopedMutex<> mutex( _connectMutex ); { base::ScopedMutex< base::SpinLock > mutexNodes( _nodes ); NodeHash::const_iterator i = _nodes->find( nodeID ); if( i != _nodes->end( )) node = i->second; } if( node.isValid( )) { EQASSERT( node->isConnected( )); if( !node->isConnected( )) connect( node ); return node->isConnected() ? node : 0; } EQINFO << "Connecting node " << nodeID << std::endl; EQASSERT( _id != nodeID ); NodeGetNodeDataPacket packet; packet.requestID = registerRequest(); packet.nodeID = nodeID; peer->send( packet ); void* result = 0; waitRequest( packet.requestID, result ); if( !result ) { EQINFO << "Node " << nodeID << " not found on " << peer->getNodeID() << std::endl; return 0; } EQASSERT( dynamic_cast< Node* >( (Dispatcher*)result )); node = static_cast< Node* >( result ); node->unref( CO_REFERENCED_PARAM ); // ref'd before serveRequest() if( node->isConnected( )) return node; if( connect( node )) return node; { base::ScopedMutex< base::SpinLock > mutexNodes( _nodes ); // connect failed - maybe simultaneous connect from peer? NodeHash::const_iterator i = _nodes->find( nodeID ); if( i != _nodes->end( )) { node = i->second; if( !node->isConnected( )) connect( node ); } } return node->isConnected() ? node : 0; }