void CTCPSocket::shutdownOutput() { bool useNewJob = false; { CLock lock(&m_mutex); // shutdown socket for writing try { ARCH->closeSocketForWrite(m_socket); } catch (XArchNetwork&) { // ignore } // shutdown buffer for writing if (m_writable) { sendEvent(getOutputShutdownEvent()); onOutputShutdown(); useNewJob = true; } } if (useNewJob) { setJob(newJob()); } }
void jobConCombiner1Archivo2BloqueYConPrimerReduceEjecutado_pedirSiguienteTarea_devuelveTareaReduce() { t_job *job = newJob(CON_COMBINER); jobAgregarArchivoMDFS(job,"/holy.txt",2); t_tarea *tarea1 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(tarea1,newNodo("nodo1", "192.168.1.101", "5001"),"archivo1"); tareaMarcarFinalizada(tarea1); t_tarea *tarea2 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(tarea2,newNodo("nodo2", "192.168.1.101", "5001"),"archivo2"); tareaMarcarFinalizada(tarea2); t_tarea *tarea3 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(tarea3,newNodo("nodo1", "192.168.1.101", "5001"),"reduce1"); tareaMarcarFinalizada(tarea3); t_tarea *tarea4 = jobObtenerSiguienteTarea(job); CU_ASSERT_TRUE(tareaEsReduce(tarea4)); t_list *archivosRemotos = tareaReduceObtenerArchivosRemotos(tarea4); t_archivoRemoto *archivo = list_get(archivosRemotos,0); CU_ASSERT_EQUAL(list_size(archivosRemotos),1); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNodo(archivo)->nombre,"nodo2"); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNombreArchivo(archivo),"archivo2"); }
void jobConCombiner1Archivo1BloqueYConReduceEjecutado_pedirSiguienteTarea_devuelveReduceFinal() { t_job *job = newJob(CON_COMBINER); jobAgregarArchivoMDFS(job,"/holy.txt",1); t_tarea *map1 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(map1,newNodo("nodo1", "192.168.1.101", "5001"),"archivo1"); tareaMarcarFinalizada(map1); t_tarea *reduce = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(reduce,newNodo("nodo1", "192.168.1.101", "5001"),"reduce1"); tareaMarcarFinalizada(reduce); t_tarea *reduceFinal = jobObtenerSiguienteTarea(job); CU_ASSERT_TRUE(tareaEsReduce(reduceFinal)); t_list *archivosRemotos = tareaReduceObtenerArchivosRemotos(reduceFinal); t_archivoRemoto *archivo = list_get(archivosRemotos,0); CU_ASSERT_EQUAL(list_size(archivosRemotos),1); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNodo(archivo)->nombre,"nodo1"); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNombreArchivo(archivo),"reduce1"); }
void CTCPSocket::shutdownInput() { bool useNewJob = false; { CLock lock(&m_mutex); // shutdown socket for reading try { ARCH->closeSocketForRead(m_socket); } catch (XArchNetwork&) { // ignore } // shutdown buffer for reading if (m_readable) { sendEvent(m_events->forIStream().inputShutdown()); onInputShutdown(); useNewJob = true; } } if (useNewJob) { setJob(newJob()); } }
ISocketMultiplexerJob* SecureSocket::serviceAccept(ISocketMultiplexerJob* job, bool, bool write, bool error) { Lock lock(&getMutex()); int status = 0; #ifdef SYSAPI_WIN32 status = secureAccept(static_cast<int>(getSocket()->m_socket)); #elif SYSAPI_UNIX status = secureAccept(getSocket()->m_fd); #endif // If status < 0, error happened if (status < 0) { return NULL; } // If status > 0, success if (status > 0) { sendEvent(m_events->forClientListener().accepted()); return newJob(); } // Retry case return new TSocketMultiplexerMethodJob<SecureSocket>( this, &SecureSocket::serviceAccept, getSocket(), isReadable(), isWritable()); }
// M/M/1 workload generator to generate a stream of jobs. void generateWorkloadMM1(const double serviceTime, const double utilization, vector<Job> &jobStream){ // this->logOut << "[GEN_MM1] Generating M/M/1 workload..." << endl; double interArrival = (1 / utilization) * serviceTime; // ms double newArrival = 0; double newService = 0; double arrTime = 0; const int noOfJobs = JOB_LOG_LENGTH; random_device rd; // Random seed default_random_engine eng(rd()); // Random engine exponential_distribution<double> distriSer(1.0 / serviceTime); // Service time distribution exponential_distribution<double> distriArr(1.0 / interArrival); // Arrival time interval distribution for (int i = 0; i < noOfJobs; i++){ newService = distriSer(eng); newArrival = distriArr(eng); // Draw an arrival time interval sample arrTime = arrTime + newArrival; // Actual arrival time Job newJob(arrTime, newService, newArrival, utilization); jobStream.push_back(newJob); } }
//Public B9Edit::B9Edit(QWidget *parent, Qt::WFlags flags, QString infile) : QMainWindow(parent, flags) { ui.setupUi(this); setAcceptDrops(true); setStatusBar(0); ui.mainToolBar->setMovable(false); ui.mainToolBar->setMaximumHeight(24); ui.mainToolBar->addAction(ui.actionNew_Job); ui.mainToolBar->addAction(ui.actionOpen_Exsisting_Job_File); ui.mainToolBar->addAction(ui.actionSave_To_Job); ui.mainToolBar->addSeparator(); ui.mainToolBar->addAction(ui.actionShow_Slice_Window); pAboutBox = new aboutbox(this); pEditView = new SliceEditView(this); pEditView->pCPJ = &cPJ; pEditView->pBuilder = this; dirtied = false; continueLoading = true; if(infile == "") { newJob(); } else { openJob(infile); } }
void CTCPSocket::write(const void* buffer, UInt32 n) { bool wasEmpty; { CLock lock(&m_mutex); // must not have shutdown output if (!m_writable) { sendEvent(getOutputErrorEvent()); return; } // ignore empty writes if (n == 0) { return; } // copy data to the output buffer wasEmpty = (m_outputBuffer.getSize() == 0); m_outputBuffer.write(buffer, n); // there's data to write m_flushed = false; } // make sure we're waiting to write if (wasEmpty) { setJob(newJob()); } }
void CTCPSocket::connect(const CNetworkAddress& addr) { { CLock lock(&m_mutex); // fail on attempts to reconnect if (m_socket == NULL || m_connected) { sendConnectionFailedEvent("busy"); return; } try { if (ARCH->connectSocket(m_socket, addr.getAddress())) { sendEvent(getConnectedEvent()); onConnected(); } else { // connection is in progress m_writable = true; } } catch (XArchNetwork& e) { throw XSocketConnect(e.what()); } } setJob(newJob()); }
ISocketMultiplexerJob* CTCPSocket::serviceConnecting(ISocketMultiplexerJob* job, bool, bool write, bool error) { CLock lock(&m_mutex); // should only check for errors if error is true but checking a new // socket (and a socket that's connecting should be new) for errors // should be safe and Mac OS X appears to have a bug where a // non-blocking stream socket that fails to connect immediately is // reported by select as being writable (i.e. connected) even when // the connection has failed. this is easily demonstrated on OS X // 10.3.4 by starting a synergy client and telling to connect to // another system that's not running a synergy server. it will // claim to have connected then quickly disconnect (i guess because // read returns 0 bytes). unfortunately, synergy attempts to // reconnect immediately, the process repeats and we end up // spinning the CPU. luckily, OS X does set SO_ERROR on the // socket correctly when the connection has failed so checking for // errors works. (curiously, sometimes OS X doesn't report // connection refused. when that happens it at least doesn't // report the socket as being writable so synergy is able to time // out the attempt.) if (error || true) { try { // connection may have failed or succeeded ARCH->throwErrorOnSocket(m_socket); } catch (XArchNetwork& e) { sendConnectionFailedEvent(e.what().c_str()); onDisconnected(); return newJob(); } } if (write) { sendEvent(getConnectedEvent()); onConnected(); return newJob(); } return job; }
void jobConCombiner2Archivos2Bloques2NodosYMapsFinalizados_pedimosSiguientesTareas_devuelve2Reduces() { t_job *job = newJob(CON_COMBINER); jobAgregarArchivoMDFS(job,"/holy.txt",2); jobAgregarArchivoMDFS(job,"/holy2.txt",2); t_tarea *map1 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(map1,newNodo("nodo1", "192.168.1.101", "5001"),"archivo1"); tareaMarcarFinalizada(map1); t_tarea *map2 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(map2,newNodo("nodo2", "192.168.1.102", "5001"),"archivo2"); tareaMarcarFinalizada(map2); t_tarea *map3 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(map3,newNodo("nodo1", "192.168.1.101", "5001"),"archivo3"); tareaMarcarFinalizada(map3); t_tarea *map4 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(map4,newNodo("nodo2", "192.168.1.102", "5001"),"archivo4"); tareaMarcarFinalizada(map4); t_tarea *reduce1 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(reduce1,newNodo("nodo1", "192.168.1.101", "5001"),"reduce1"); t_tarea *reduce2 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(reduce2,newNodo("nodo2", "192.168.1.102", "5001"),"reduce2"); CU_ASSERT_TRUE(tareaEsReduce(reduce1)); t_list *archivosRemotosReduce1 = tareaReduceObtenerArchivosRemotos(reduce1); CU_ASSERT_EQUAL(list_size(archivosRemotosReduce1),2); t_archivoRemoto *archivo1Reduce1 = list_get(archivosRemotosReduce1,0); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNodo(archivo1Reduce1)->nombre,"nodo1"); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNombreArchivo(archivo1Reduce1),"archivo1"); t_archivoRemoto *archivo2Reduce1 = list_get(archivosRemotosReduce1, 1); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNodo(archivo2Reduce1)->nombre,"nodo1"); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNombreArchivo(archivo2Reduce1),"archivo3"); CU_ASSERT_TRUE(tareaEsReduce(reduce2)); t_list *archivosRemotosReduce2 = tareaReduceObtenerArchivosRemotos(reduce2); CU_ASSERT_EQUAL(list_size(archivosRemotosReduce2),2); t_archivoRemoto *archivo1Reduce2 = list_get(archivosRemotosReduce2,0); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNodo(archivo1Reduce2)->nombre,"nodo2"); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNombreArchivo(archivo1Reduce2),"archivo2"); t_archivoRemoto *archivo2Reduce2 = list_get(archivosRemotosReduce2, 1); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNodo(archivo2Reduce2)->nombre,"nodo2"); CU_ASSERT_STRING_EQUAL(archivoRemotoObtenerNombreArchivo(archivo2Reduce2),"archivo4"); }
CTCPSocket::CTCPSocket(CArchSocket socket) : m_mutex(), m_socket(socket), m_flushed(&m_mutex, true) { assert(m_socket != NULL); // socket starts in connected state init(); onConnected(); setJob(newJob()); }
void NewTask::on_buttonBoxWhetherOk_accepted() { if (QUrl(ui->lineEditUrl->text()).isValid() && QUrl(ui->lineEditFileName->text()).isValid()) { emit setDownloadedDirectory(m_dir); emit setFileName(ui->lineEditFileName->text()); emit setSaveLocation(ui->lineEditSaveLocation->text()); emit setThreadNum(ui->spinBoxThreadNum->value()); emit runDownloader(ui->lineEditUrl->text()); emit newJob(ui->lineEditFileName->text(), m_dir, ui->lineEditUrl->text(), ui->spinBoxThreadNum->value()); } }
TCPSocket::TCPSocket(IEventQueue* events, SocketMultiplexer* socketMultiplexer, ArchSocket socket) : IDataSocket(events), m_events(events), m_socket(socket), m_flushed(&m_mutex, true), m_socketMultiplexer(socketMultiplexer) { assert(m_socket != NULL); // socket starts in connected state init(); onConnected(); setJob(newJob()); }
static void atenderNuevoJob(SocketConecta *conexion) { loguearYMostrarEnPantalla(LOG_LEVEL_INFO, "Se establecio una nueva conexion con un JOB"); t_job *job = protocoloJobObtenerDatosJob(conexion); if (job != NULL) planificadorAgregarJob(job); else { t_job *job = newJob(SIN_COMBINER); job->conexion = conexion; protocoloJobNotificarFin(job); } }
void jobCon3MapsEnEjecucion_SeBajaUnNodo_laTareaMapDeEseNodoQuedaNoEjecutada() { t_job *job = newJob(SIN_COMBINER); jobAgregarArchivoMDFS(job,"/holy.txt",3); t_tarea *map1 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(map1, newNodo("nodo1", "192.168.1.101", "5001"), "archivo1"); t_tarea *map2 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(map2, newNodo("nodo2", "192.168.1.102", "5001"), "archivo2"); t_tarea *map3 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(map3, newNodo("nodo3", "192.168.1.102", "5001"), "archivo3"); jobDarDeBajaNodo(job, "nodo3"); CU_ASSERT_TRUE(tareaEstaNoEjecutada(map3)); }
void jobConCombiner1Archivo2BloqueYPrimerReduceFalla_pedirSiguienteTarea_devuelveMismoReduce() { t_job *job = newJob(CON_COMBINER); jobAgregarArchivoMDFS(job,"/holy.txt",2); t_tarea *tarea1 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(tarea1,newNodo("nodo1", "192.168.1.101", "5001"),"archivo1"); tareaMarcarFinalizada(tarea1); t_tarea *tarea2 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(tarea2,newNodo("nodo2", "192.168.1.101", "5001"),"archivo2"); tareaMarcarFinalizada(tarea2); t_tarea *tarea3 = jobObtenerSiguienteTarea(job); tareaMarcarEnEjecucion(tarea3,newNodo("nodo1", "192.168.1.101", "5001"),"reduce1"); tareaMarcarNoEjecutada(tarea3); t_tarea *tarea4 = jobObtenerSiguienteTarea(job); CU_ASSERT_PTR_EQUAL(tarea4, tarea3); }
ISocketMultiplexerJob* SecureSocket::serviceAccept(ISocketMultiplexerJob* job, bool, bool write, bool error) { Lock lock(&getMutex()); int status = 0; #ifdef SYSAPI_WIN32 status = secureAccept(static_cast<int>(getSocket()->m_socket)); #elif SYSAPI_UNIX status = secureAccept(getSocket()->m_fd); #endif if (status > 0) { return newJob(); } else if (status == 0) { return job; } // If status < 0, error happened return NULL; }
// All the magic happen here. First do a baseline queue simulation. Then do simulations for all policies. shared_ptr<PowerState> Server::doSleepScale(){ shared_ptr<PowerState> bestPolicy; this->logOut << "[DO_SLEEPSCALE] Adjusting the arrival times..." << endl; // Adjusting the workload log. double scale = this->jobLog.getUtilization() / this->estimator->est; // Compute the scaling factor this->logOut << "[DO_SLEEPSCALE] Empirical utilization in the log is " << this->jobLog.getUtilization() << ". Scaling factor is " << scale << endl; // Treat the past observed job stream as the future job stream, with offset increased by T minutes and utilization properly scaled. // This means we have to adjust the past observed job stream and store it in a new vector called jobStream vector<Job> jobStream; double offset = this->minute * 60 * 1000 - this->jobLog.getArrAt(0); double arrTimeNew = this->jobLog.getArrAt(0) + offset; this->logOut << "[DO_SLEEPSCALE] Job start at " << arrTimeNew << endl; double serTimeNew = this->jobLog.getSerAt(0); Job newJob(arrTimeNew, serTimeNew); jobStream.push_back(newJob); double arrSum = 0; double serSum = 0; for (auto i = 1; i < this->jobLog.getSize(); i++){ // Scale the inter-arrival time and add offset. arrTimeNew = (this->jobLog.getArrAt(i) - this->jobLog.getArrAt(i - 1)) * scale + this->jobLog.getArrAt(i - 1) + offset; arrSum = arrSum + (this->jobLog.getArrAt(i) - this->jobLog.getArrAt(i - 1)) * scale; serTimeNew = this->jobLog.getSerAt(i); serSum = serSum + serTimeNew; Job newJob(arrTimeNew, serTimeNew); jobStream.push_back(newJob); } this->logOut << "[DO_SLEEPSCALE] ...Arrival time adjusted! " << "This new workload for SleepScale has utilization " << serSum / arrSum << " and first job starts at " << jobStream.at(0).arrival << endl; // Construct the baseline -- maximum speed this->logOut << "[DO_SLEEPSCALE] Running the baseline..." << endl; simQueue(this->allPolicy.at(0), jobStream); this->logOut << "[DO_SLEEPSCALE] ...Baseline successfully constructed!" << endl; // Compute the baseline mean response time, if the system would run at maximum speed. double curBaselineER = (this->ER_baseline + this->allPolicy.at(0)->ER) / (this->totalNoOfJobs_baseline + jobStream.size()); this->logOut << "[DO_SLEEPSCALE] Current baseline ER is " << curBaselineER << ". Slowdown is " << this->slowDown << endl; double curPolicyER = 0; double curPolicyEP = MAX_NUM; bestPolicy = this->allPolicy.at(1); // Simulate all policies for (int i = 1; i != this->allPolicy.size(); ++i){ simQueue(this->allPolicy.at(i), jobStream); // Compute the system mean response time, if the system would run using this policy curPolicyER = (this->ER + this->allPolicy.at(i)->ER) / (this->totalNoOfJobs + jobStream.size()); // Reject a policy if its performance or power do not satisfy the constraint. if (this->allPolicy.at(i)->EP <= curPolicyEP && curPolicyER <= this->slowDown * curBaselineER){ bestPolicy = this->allPolicy.at(i); curPolicyEP = this->allPolicy.at(i)->EP; } } this->logOut << "[DO_SLEEPSCALE] Best policy has ER: " << (this->ER + bestPolicy->ER) / (this->totalNoOfJobs + jobStream.size()) << endl; this->logOut << "[DO_SLEEPSCALE] ...All policies simulated! SleepScale completes!" << endl; return bestPolicy; }
BOOL CLicenseMgr::CheckOutLicense(const char *feature) { m_feature = feature; std::string strErr; int status = 0; int res = 0; setPromptForFile(1); status = newJob(); if(!status) { res = checkOut(feature); if(!res) { int days = getExpireDays(feature); if (days < 0) { ::MessageBox(NULL,"License许可文件已过期!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); return FALSE; } return TRUE; } else { switch(res) { case -1: ::MessageBox(NULL,"找不到许可文件!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; case -2: ::MessageBox(NULL,"无效的许可文件格式!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; case -4: ::MessageBox(NULL,"已达到许可个数使用上限!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; case -8: ::MessageBox(NULL,"这个许可号/签名以及数据和模块不匹配!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; case -9: ::MessageBox(NULL,"系统的主机标识与许可文件中的主机标识不匹配!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; case -15: ::MessageBox(NULL,"连接不到许可服务器。\r\n可能是许可服务还没有启动,或者是使用了错误的port@host 或者许可文件,或许可文件中的TCP/IP端口号或者机器名被更改!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; case -34: ::MessageBox(NULL,"客户端和许可服务器系统之间的时钟差异太大!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; case -88: ::MessageBox(NULL,"系统时钟已经被回拨!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; case -97: ::MessageBox(NULL,"所需的供应商守护程序已关闭!",g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; default: UTF8Tomulti(getErrorString(res),strErr); CString strerrcode; strerrcode.Format("其他异常错误!错误代码:%d\n%s", res, strErr.c_str()); ::MessageBox(NULL,strerrcode,g_lpszMsgTitle,MB_OK|MB_ICONWARNING); break; } return FALSE; } } else { UTF8Tomulti(getErrorString(status),strErr); ::MessageBox(NULL,strErr.c_str(),g_lpszMsgTitle,MB_OK|MB_ICONWARNING); return FALSE; } return TRUE; }
ISocketMultiplexerJob* CTCPSocket::serviceConnected(ISocketMultiplexerJob* job, bool read, bool write, bool error) { CLock lock(&m_mutex); if (error) { sendEvent(getDisconnectedEvent()); onDisconnected(); return newJob(); } bool needNewJob = false; if (write) { try { // write data UInt32 n = m_outputBuffer.getSize(); const void* buffer = m_outputBuffer.peek(n); n = (UInt32)ARCH->writeSocket(m_socket, buffer, n); // discard written data if (n > 0) { m_outputBuffer.pop(n); if (m_outputBuffer.getSize() == 0) { sendEvent(getOutputFlushedEvent()); m_flushed = true; m_flushed.broadcast(); needNewJob = true; } } } catch (XArchNetworkShutdown&) { // remote read end of stream hungup. our output side // has therefore shutdown. onOutputShutdown(); sendEvent(getOutputShutdownEvent()); if (!m_readable && m_inputBuffer.getSize() == 0) { sendEvent(getDisconnectedEvent()); m_connected = false; } needNewJob = true; } catch (XArchNetworkDisconnected&) { // stream hungup onDisconnected(); sendEvent(getDisconnectedEvent()); needNewJob = true; } catch (XArchNetwork& e) { // other write error LOG((CLOG_WARN "error writing socket: %s", e.what().c_str())); onDisconnected(); sendEvent(getOutputErrorEvent()); sendEvent(getDisconnectedEvent()); needNewJob = true; } } if (read && m_readable) { try { UInt8 buffer[4096]; size_t n = ARCH->readSocket(m_socket, buffer, sizeof(buffer)); if (n > 0) { bool wasEmpty = (m_inputBuffer.getSize() == 0); // slurp up as much as possible do { m_inputBuffer.write(buffer, n); n = ARCH->readSocket(m_socket, buffer, sizeof(buffer)); } while (n > 0); // send input ready if input buffer was empty if (wasEmpty) { sendEvent(getInputReadyEvent()); } } else { // remote write end of stream hungup. our input side // has therefore shutdown but don't flush our buffer // since there's still data to be read. sendEvent(getInputShutdownEvent()); if (!m_writable && m_inputBuffer.getSize() == 0) { sendEvent(getDisconnectedEvent()); m_connected = false; } m_readable = false; needNewJob = true; } } catch (XArchNetworkDisconnected&) { // stream hungup sendEvent(getDisconnectedEvent()); onDisconnected(); needNewJob = true; } catch (XArchNetwork& e) { // ignore other read error LOG((CLOG_WARN "error reading socket: %s", e.what().c_str())); } } return needNewJob ? newJob() : job; }
// All the magic happen here. First do a baseline queue simulation. Then do simulations for all policies. shared_ptr<PowerState> Server::doSleepScale(){ shared_ptr<PowerState> bestPolicy; #ifdef GEN_MM1 // If job stream simulated has to be perfect M/M/1 vector<Job> jobStream; this->logOut << "[DO_SLEEPSCALE] Generating workload in perfect M/M/1 at utilization " << this->estimator->est << endl; generateWorkloadMM1(SER_TIME, this->estimator->est, jobStream); #else // Adjust the job log such that it starts from time 0 and has utilization this->estimator->est this->logOut << "[DO_SLEEPSCALE] Adjusting the arrival times..." << endl; vector<Job> jobStream; double arrTimeNew = 0; double interArrNew = this->jobLog.getInterArrAt(0) * (this->jobLog.getUtilizationAt(0) / this->estimator->est); // Scale the inter-arrival time arrTimeNew = arrTimeNew + interArrNew; double serTimeNew = this->jobLog.getSerAt(0); Job newJob(arrTimeNew, serTimeNew, interArrNew, this->estimator->est); jobStream.push_back(newJob); double serSum = 0; // Use to track empirical utilization in the job log. for (auto i = 1; i < this->jobLog.getSize(); i++){ interArrNew = this->jobLog.getInterArrAt(i) * (this->jobLog.getUtilizationAt(i) / this->estimator->est); arrTimeNew = arrTimeNew + interArrNew; serTimeNew = this->jobLog.getSerAt(i); serSum = serSum + serTimeNew; Job newJob(arrTimeNew, serTimeNew, interArrNew, this->estimator->est); jobStream.push_back(newJob); } assert(jobStream.size() == JOB_LOG_LENGTH); this->logOut << "[DO_SLEEPSCALE] Job log adjusted! " << "This new workload for SleepScale has utilization " << serSum / arrTimeNew << " and first job starts at " << jobStream.at(0).arrival << endl; #endif double curPolicyER = 0; double curPolicyEP = MAX_NUM; bestPolicy = this->allPolicy.at(1); // this->allPolicy.at(0) is the baseline policy. DO NOT USE! // Simulate all policies for (int i = 1; i != this->allPolicy.size(); ++i){ simQueue(this->allPolicy.at(i), jobStream); if (this->allPolicy.at(i)->EP <= curPolicyEP && this->allPolicy.at(i)->ER <= SER_TIME * SLEEPSCALE_SLOWDOWN){ bestPolicy = this->allPolicy.at(i); curPolicyEP = this->allPolicy.at(i)->EP; } } this->logOut << "[DO_SLEEPSCALE] All policies simulated! SleepScale completes!" << endl; this->logOut << "[DO_SLEEPSCALE] The best policy is f = " << bestPolicy->freq << " and low-power state = " << bestPolicy->idle << endl; return bestPolicy; }
ISocketMultiplexerJob* TCPSocket::serviceConnected(ISocketMultiplexerJob* job, bool read, bool write, bool error) { Lock lock(&m_mutex); if (error) { sendEvent(m_events->forISocket().disconnected()); onDisconnected(); return newJob(); } EJobResult result = kRetry; if (write) { try { result = doWrite(); } catch (XArchNetworkShutdown&) { // remote read end of stream hungup. our output side // has therefore shutdown. onOutputShutdown(); sendEvent(m_events->forIStream().outputShutdown()); if (!m_readable && m_inputBuffer.getSize() == 0) { sendEvent(m_events->forISocket().disconnected()); m_connected = false; } result = kNew; } catch (XArchNetworkDisconnected&) { // stream hungup onDisconnected(); sendEvent(m_events->forISocket().disconnected()); result = kNew; } catch (XArchNetwork& e) { // other write error LOG((CLOG_WARN "error writing socket: %s", e.what())); onDisconnected(); sendEvent(m_events->forIStream().outputError()); sendEvent(m_events->forISocket().disconnected()); result = kNew; } } if (read && m_readable) { try { result = doRead(); } catch (XArchNetworkDisconnected&) { // stream hungup sendEvent(m_events->forISocket().disconnected()); onDisconnected(); result = kNew; } catch (XArchNetwork& e) { // ignore other read error LOG((CLOG_WARN "error reading socket: %s", e.what())); } } return result == kBreak ? nullptr : result == kNew ? newJob() : job; }
/* This function generates a stream of jobs under a particular utilization newRho using BigHouse cdf input. The cdf files must be in BigHouse format. The parameter offset specifies in which minute the jobs are generated thus their arrivals are within that minute. */ void Server::generateWorkloadCDF(const vector<double> &ser_prob, const vector<double> &ser_sample, const vector<double> &arr_prob, const vector<double> &arr_sample, const int &offset, const double &newRho){ this->logOut << "[GEN_CDF] Generating workload from CDFs." << endl; // Do inverse transform sampling random_device rd; // Random seed default_random_engine eng(rd()); // Random engine uniform_real_distribution<double> genUniform(0.0, 1.0); // Generate uniform distribution double newServiceProb = 0; // A sample from the uniform distribution double newInterArrivalProb = 0; // A sample from the uniform distribution double newService; // A sample from service time CDF double newInterArrival; // A sample from inter-arrival time CDF double localSumService = 0; // Keep track of the sum of service times. double localSumInterArrival = 0; // Keep track of the arrival time. double totalJobCreated = 0; vector<double> newServiceVector; vector<double> newInterArrVector; // First generate 200 jobs to estimate the empirical utilization // This part is done repeatedly -- bad. for (int i = 0; i < 200; i++){ newServiceProb = genUniform(eng); // Sample uniform distribution newInterArrivalProb = genUniform(eng); /* Finding the corresponding value via look-up is done in linear way. It should be done in binary search. */ // Then find the corresponding service time value via lookup for (unsigned int i = 0; i < ser_prob.size() && ser_prob.at(i) < newServiceProb; i++) { int j = min(i + 1, ser_sample.size() - 1); newService = (ser_sample.at(i) + ser_sample.at(j)) / 2; } localSumService = localSumService + newService; newServiceVector.push_back(newService); // Then find the corresponding inter-arrival time value via lookup for (unsigned int i = 0; i < arr_prob.size() && arr_prob.at(i) < newInterArrivalProb; i++) { int j = min(i + 1, ser_sample.size() - 1); newInterArrival = (arr_sample.at(i) + arr_sample.at(j)) / 2; } localSumInterArrival = localSumInterArrival + newInterArrival; newInterArrVector.push_back(newInterArrival); } // Compute empirical utilization and the scale double scale = (localSumService / localSumInterArrival) / newRho; // Now for these jobs, push back into the jobStream with the scale until this minute is filled up localSumInterArrival = 0; // Reset localSumService = 0; // Reset int i = 0; while (i < newInterArrVector.size() && localSumInterArrival + newInterArrVector.at(i) * scale < 60 * 1000){ totalJobCreated++; Job newJob(offset * 60 * 1000 + localSumInterArrival + newInterArrVector.at(i) * scale, newServiceVector.at(i), newInterArrVector.at(i) * scale, newRho); // Has to enforce offset minute this->jobQueue.push_back(newJob); // Push into job queue that server is going to run on. this->jobLog.insertNewJob(newJob); // Push into job log that SleepScale is going to simulate on. localSumService = localSumService + newServiceVector.at(i); localSumInterArrival = localSumInterArrival + newInterArrVector.at(i) * scale; i++; } // If this minute is not filled up. Generate more jobs newServiceProb = genUniform(eng); // Sample uniform distribution newInterArrivalProb = genUniform(eng); // This is lazy! Should do a binary search! for (unsigned int i = 0; i < ser_prob.size() && ser_prob.at(i) < newServiceProb; i++) { int j = min(i + 1, ser_sample.size() - 1); newService = (ser_sample.at(i) + ser_sample.at(j)) / 2; } for (unsigned int i = 0; i < arr_prob.size() && arr_prob.at(i) < newInterArrivalProb; i++) { int j = min(i + 1, arr_sample.size() - 1); newInterArrival = (arr_sample.at(i) + arr_sample.at(j)) / 2; } while (localSumInterArrival + newInterArrival * scale < 60 * 1000){ totalJobCreated++; Job newJob(offset * 60 * 1000 + localSumInterArrival + newInterArrival * scale, newService, newInterArrival * scale, newRho); // Has to enforce offset minute this->jobQueue.push_back(newJob); this->jobLog.insertNewJob(newJob); localSumInterArrival = localSumInterArrival + newInterArrival * scale; localSumService = localSumService + newService; newServiceProb = genUniform(eng); // Sample uniform distribution newInterArrivalProb = genUniform(eng); // This is lazy! Should do a binary search! for (unsigned int i = 0; i < ser_prob.size() && ser_prob.at(i) < newServiceProb; i++) { int j = min(i + 1, ser_sample.size() - 1); newService = (ser_sample.at(i) + ser_sample.at(j)) / 2; } for (unsigned int i = 0; i < arr_prob.size() && arr_prob.at(i) < newInterArrivalProb; i++) { int j = min(i + 1, arr_sample.size() - 1); newInterArrival = (arr_sample.at(i) + arr_sample.at(j)) / 2; } } this->logOut << "[GEN_CDF] Workload generated successfully! Total number of jobs generated: " << totalJobCreated << ". Empirical utilization for this minute is " << localSumService / localSumInterArrival << ". Mean service time is " << localSumService / totalJobCreated << endl; }