Exemple #1
0
void EventLoop::Impl::processTasks()
{
    TaskQueue tq;
    std::unique_lock<LockType> ul(task_mutex_);
    task_queue_.swap(tq);
    
    while (auto node = tq.front_node()) {
        tq.pop_front();
        auto &task_slot = node->element();
        task_slot.state = TaskSlot::State::RUNNING;
        ul.unlock();
        {// execute the task
            LockGuard g(task_run_mutex_);
            if (task_slot.state != TaskSlot::State::INACTIVE) {
                task_slot();
                task_slot.state = TaskSlot::State::INACTIVE;
            }
        }
        ul.lock();
        if (task_slot.token) {
            task_slot.token->removeTaskNode(node);
        }
    }
}
Exemple #2
0
// pack the jobs into multiple packages - 2000 jobs per package
// and insert it into the ready queue of server that requested to steal tasks
//int Worker::migrateTasks(int num_tasks, ZHTClient &clientRet, int index){
void* migrateTasks(void *args) {

	Worker *worker = (Worker*) args;
	int index;
	while (ON) {
		//while(migrateq.size() > 0) {
		while (migratev.any()) {
			pthread_mutex_lock(&mq_lock);
			if (migratev.any()) {
				//int *index = (int*)args;                
				//index = migrateq.front();
				index = migratev.pop();
				//migrateq.pop();
				//cout << "1 worker = " << worker->selfIndex << " to index = " << index << " size = " << rqueue.size() << endl;
			} else {
				//cout << "migratev count = " << migratev.count() << endl;
				pthread_mutex_unlock(&mq_lock);
				continue;
			}
			if (index < 0 || index >= worker->num_nodes) {
				//cout << "bad index: worker = " << worker->selfIndex << " to index = " << index << endl;
				pthread_mutex_unlock(&mq_lock);
				continue;
			}
			pthread_mutex_unlock(&mq_lock);
			//cout << "2 worker = " << worker->selfIndex << " to index = " << index << " size = " << rqueue.size() << endl;
			pthread_mutex_lock(&m_lock);
			pthread_mutex_lock(&lock);
			int32_t num_tasks = rqueue.size() / 2;
			if (num_tasks < 1) {
				pthread_mutex_unlock(&lock);
				pthread_mutex_unlock(&m_lock);
				continue;;
			}
			try {	//cout << "going to send " << num_tasks << " tasks" << endl;
				mqueue.assign(rqueue.end() - num_tasks, rqueue.end());
				rqueue.erase(rqueue.end() - num_tasks, rqueue.end());
				//cout << "rqueue size = " << rqueue.size() << " mqueue size = " << mqueue.size() << endl;
			} catch (...) {
				cout
						<< "migrateTasks: cannot allocate memory while copying tasks to migrate queue"
						<< endl;
				pthread_exit(NULL);
			}
			pthread_mutex_unlock(&lock);

			map<uint32_t, NodeList> update_map = worker->get_map(mqueue);
			int update_ret = worker->zht_update(update_map, "nodehistory",
					index);
			/*if(index == worker->selfIndex) {
			 cout << "ALERT: MIGRATING TO ITSELF" << endl;
			 }*/
			int num_packages = 0;
			long total_submitted = 0;

			num_tasks = mqueue.size();
			while (total_submitted != num_tasks) {
				Package package;
				string alltasks;
				package.set_virtualpath(worker->ip);
				package.set_operation(22);
				num_packages++;
				int num_tasks_this_package = max_tasks_per_package;
				int num_tasks_left = num_tasks - total_submitted;
				if (num_tasks_left < max_tasks_per_package) {
					num_tasks_this_package = num_tasks_left;
				}
				for (int j = 0; j < num_tasks_this_package; j++) {
					//TaskQueue_item* qi = migrate_queue->remove_element();
					if (mqueue.size() < 1) {
						if (j > 0) {
							total_submitted = total_submitted + j;
							package.set_realfullpath(alltasks);
							string str = package.SerializeAsString();
							pthread_mutex_lock(&msg_lock);
							int32_t ret = worker->svrclient.svrtosvr(str,
									str.length(), index);
							pthread_mutex_unlock(&msg_lock);
						}
						//pthread_mutex_unlock (&m_lock);
						//return total_submitted;
						//pthread_exit(NULL);
						total_submitted = num_tasks;
						break;
					}
					try {
						alltasks.append(mqueue.front()->task_id);
						alltasks.append("\'\""); // Task ID
						/*stringstream num_moves_ss;
						 num_moves_ss << (mqueue.front()->num_moves + 1);
						 alltasks.append(num_moves_ss.str());  alltasks.append("\'\""); // Number of moves*/

						if (LOGGING) {
							migrate_fp << " taskid = "
									<< mqueue.front()->task_id;
							//migrate_fp << " num moves = " << (mqueue.front()->num_moves + 1);
						}
						delete mqueue.front();
						mqueue.pop_front();
					} catch (...) {
						cout
								<< "migrateTasks: Exception occurred while processing mqueue"
								<< endl;
					}
				}
				if (total_submitted == num_tasks) {
					break;
				}
				total_submitted = total_submitted + num_tasks_this_package;
				package.set_realfullpath(alltasks);
				string str = package.SerializeAsString(); //cout << "r1: " << total_submitted << " tasks" << endl;
				pthread_mutex_lock(&msg_lock);
				int32_t ret = worker->svrclient.svrtosvr(str, str.length(),
						index); //cout << "r1 sent" << endl;
				pthread_mutex_unlock(&msg_lock);
			}
			pthread_mutex_unlock(&m_lock);
			//cout << "matrix_server: No. of tasks sent = " << total_submitted << endl;
		}
	}
}
Exemple #3
0
// thread to monitor ready queue and execute tasks based on num of cores availability
void* check_ready_queue(void* args) {

	Worker *worker = (Worker*) args;

	TaskQueue_Item *qi;
	while (ON) {
		while (rqueue.size() > 0) {
			pthread_mutex_lock(&lock);
			if (rqueue.size() > 0) {
				qi = rqueue.front();
				rqueue.pop_front();
			} else {
				pthread_mutex_unlock(&lock);
				continue;
			}

			pthread_mutex_unlock(&lock);

			pthread_mutex_lock(&mutex_idle);
			worker->num_idle_cores--;
			pthread_mutex_unlock(&mutex_idle);

			if (!work_exec_flag) {

				work_exec_flag = 1;

				FILE *fp = fopen(file_worker_start.c_str(), "a+");
				if (fp != NULL) {
					//fputs("fopen example", fp);
					char fbuf[100];
					memset(fbuf, 0, sizeof(fbuf));
					sprintf(fbuf, "%s:%d Got jobs..Started excuting\n",
							worker->ip.c_str(), worker->selfIndex);
					fwrite(fbuf, sizeof(char), strlen(fbuf), fp);
					fflush(fp);
					fclose(fp);
				}

				/*worker_start << worker->ip << ":" << worker->selfIndex
				 << " Got jobs..Started excuting" << endl;*/

			}

			//cout << "task to lookup = " << qi->task_id << endl;
			string value = worker->zht_lookup(qi->task_id);
			Package recv_pkg;
			recv_pkg.ParseFromString(value); //cout << "check_ready_queue: task " << qi->task_id << " node history = " << recv_pkg.nodehistory() << endl;
			int num_vector_count, per_vector_count;
			vector<vector<string> > tokenize_string = tokenize(
					recv_pkg.realfullpath(), '\"', '\'', num_vector_count,
					per_vector_count);
			//cout << "worker " << worker->selfIndex<< " pertask processing done" << endl;
			/*cout << "task = " << qi->task_id << " notify list: ";
			 for(int l = 0; l < tokenize_string.at(1).size(); l++) {
			 cout << tokenize_string.at(1).at(l) << " ";
			 } cout << endl;*/

			stringstream duration_ss;
			try {
				duration_ss << tokenize_string.at(0).at(1);
			} catch (exception& e) {
				cout << "void* check_ready_queue: num_vector_count = "
						<< num_vector_count << " per_vector_count = "
						<< per_vector_count << endl;
				cout
						<< "void* check_ready_queue: (tokenize_string.at(0).at(1)) "
						<< " " << e.what() << endl;
				cout << "void* check_ready_queue: value = " << value << endl;
				exit(1);
			}
			double duration_tmp;
			//duration = 1000000;
			duration_ss >> duration_tmp;
			long duration = floor(duration_tmp * 1000 + 0.5);
			//duration = duration / duration - 1;
			string client_id;
			try {
				client_id = tokenize_string.at(0).at(2);
			} catch (exception& e) {
				cout << "void* check_ready_queue: num_vector_count = "
						<< num_vector_count << " per_vector_count = "
						<< per_vector_count << endl;
				cout
						<< "void* check_ready_queue: (tokenize_string.at(0).at(2)) "
						<< " " << e.what() << endl;
				exit(1);
			}

			uint64_t sub_time;
			try {
				stringstream sub_time_ss;
				sub_time_ss << tokenize_string.at(0).at(3);
				sub_time_ss >> sub_time;
			} catch (exception& e) {
				cout << "void* check_ready_queue: num_vector_count = "
						<< num_vector_count << " per_vector_count = "
						<< per_vector_count << endl;
				cout
						<< "void* check_ready_queue: (tokenize_string.at(0).at(3)) "
						<< " " << e.what() << endl;
				exit(1);
			}

			timespec task_start_time, task_end_time;
			clock_gettime(CLOCK_REALTIME, &task_start_time);
			//uint32_t exit_code = sleep(duration);
			//cout << "The duration of this task is:" << duration << endl;
			uint32_t exit_code = usleep(duration);
			clock_gettime(CLOCK_REALTIME, &task_end_time);

			// push completed task into complete queue
			pthread_mutex_lock(&c_lock);
			cqueue.push_back(make_pair(qi->task_id, tokenize_string.at(1)));
			pthread_mutex_unlock(&c_lock);

			// append completed task
			uint64_t st = (uint64_t) task_start_time.tv_sec * 1000000000
					+ (uint64_t) task_start_time.tv_nsec;
			uint64_t et = (uint64_t) task_end_time.tv_sec * 1000000000
					+ (uint64_t) task_end_time.tv_nsec;
			timespec diff = timediff(task_start_time, task_end_time);

			pthread_mutex_lock(&mutex_idle);
			worker->num_idle_cores++;
			task_comp_count++;
			pthread_mutex_unlock(&mutex_idle);

			if (LOGGING) {
				string fin_str;
				stringstream out;
				out << qi->task_id << "+" << client_id << " exitcode "
						<< " node history = " << recv_pkg.nodehistory()
						<< exit_code << " Interval " << diff.tv_sec << " S  "
						<< diff.tv_nsec << " NS" << " server " << worker->ip;
				fin_str = out.str();
				pthread_mutex_lock(&mutex_finish);
				//fin_fp << fin_str << endl;
				pthread_mutex_unlock(&mutex_finish);
			}
			delete qi;
		}
	}
}