Beispiel #1
0
void* check_wait_queue(void* args) {
	Worker *worker = (Worker*) args;
	TaskQueue_Item *qi;
	while (ON) {
		while (wqueue.size() > 0) {
			//for(TaskQueue::iterator it = wqueue.begin(); it != wqueue.end(); ++it) {
			int size = wqueue.size();
			for (int i = 0; i < size; i++) {
				//qi = *it;
				qi = wqueue[i];
				if (qi != NULL) {
					int status = worker->check_if_task_is_ready(qi->task_id); //cout << "task = " << qi->task_id << " status = " << status << endl;
					if (status == 0) {
						//cout << "task = " << qi->task_id << " status = " << status << endl;
						int ret = worker->move_task_to_ready_queue(&qi);
						pthread_mutex_lock(&w_lock);
						wqueue[i] = NULL;
						pthread_mutex_unlock(&w_lock);
					}
					/*if(status < 0) {
					 cout << "negative numwait" << endl;
					 }*/
				}
			}
			pthread_mutex_lock(&w_lock);
			TaskQueue::iterator last = remove_if(wqueue.begin(), wqueue.end(),
					check);
			wqueue.erase(last, wqueue.end());
			pthread_mutex_unlock(&w_lock);
			sleep(1);
		}
	}
}
Beispiel #2
0
int32_t Worker::get_monitoring_info() {
	if (LOGGING) {
		log_fp << "rqueue = " << rqueue.size() << " mqueue = " << mqueue.size()
				<< " wqueue = " << wqueue.size() << endl;
	}
	return task_comp_count;
	//(((rqueue.size() + mqueue.size() + wqueue.size()) * 10) + num_idle_cores);
}
Beispiel #3
0
int is_no_task()
{
    if (0 == qu.size())
    {
        return 1;
    }
    return 0;
}
Beispiel #4
0
void fun1(void* data) {
  TaskQueue* queue = static_cast<TaskQueue*>(data);
  cout<< " thread "<< std::this_thread::get_id()<<" run" << endl;
  std::chrono::milliseconds dura( 5);
  cout.flush();
  for (int i = 0; i < 10; i++) {
    Task* task = new DemonTask();
    queue->PushBack(task);
    //std::this_thread::sleep_for(dura);
  }
  cout<<"f1:queue size" << queue->size()<<endl;;
  cout.flush();
  Task* task = queue->PopFront();
  task->Action();
  //task->Release();
  cout<<"f1:queue size" << queue->size()<<endl;;
  cout.flush();
}
Beispiel #5
0
int is_task_processing()
{
    if (0 == qu.size())
    {
        return 0;
    }

    if (TS_RUNNING == qu.front()->ts)
    {
        return 1;
    }
    else
    {
        return 0;
    }
}
Beispiel #6
0
void* pushMessage(void* para) 
{
    ThreadPara* threadPara = (ThreadPara*)para;
    TaskQueue* pQueue = threadPara->pQueue;
    UpdateAction nAction = threadPara->nAction;

    if (pQueue == NULL) {
        return NULL;
    }

    while (1) {
        const char *pFile = NULL;

        threadPara->lock->lock();
        if (pQueue->size() == 0) {
            threadPara->lock->unlock();
            break;
        }
        pFile = pQueue->back();
        pQueue->popBack();
        threadPara->lock->unlock();

        if (pFile == NULL) {
            break;
        }
        if (ACTION_ADD == nAction) {
            TNOTE("pushing add file %s", pFile);
            pushAddMessage(threadPara->api, pFile);
        }
        else {
            TNOTE("pushing del file %s", pFile);
            pushDelMessage(threadPara->api, pFile);
        }
        delete[] pFile;
        pFile = NULL;
    }

    return NULL;
}
Beispiel #7
0
// pack the jobs into multiple packages - 2000 jobs per package
// and insert it into the ready queue of server that requested to steal tasks
//int Worker::migrateTasks(int num_tasks, ZHTClient &clientRet, int index){
void* migrateTasks(void *args) {

	Worker *worker = (Worker*) args;
	int index;
	while (ON) {
		//while(migrateq.size() > 0) {
		while (migratev.any()) {
			pthread_mutex_lock(&mq_lock);
			if (migratev.any()) {
				//int *index = (int*)args;                
				//index = migrateq.front();
				index = migratev.pop();
				//migrateq.pop();
				//cout << "1 worker = " << worker->selfIndex << " to index = " << index << " size = " << rqueue.size() << endl;
			} else {
				//cout << "migratev count = " << migratev.count() << endl;
				pthread_mutex_unlock(&mq_lock);
				continue;
			}
			if (index < 0 || index >= worker->num_nodes) {
				//cout << "bad index: worker = " << worker->selfIndex << " to index = " << index << endl;
				pthread_mutex_unlock(&mq_lock);
				continue;
			}
			pthread_mutex_unlock(&mq_lock);
			//cout << "2 worker = " << worker->selfIndex << " to index = " << index << " size = " << rqueue.size() << endl;
			pthread_mutex_lock(&m_lock);
			pthread_mutex_lock(&lock);
			int32_t num_tasks = rqueue.size() / 2;
			if (num_tasks < 1) {
				pthread_mutex_unlock(&lock);
				pthread_mutex_unlock(&m_lock);
				continue;;
			}
			try {	//cout << "going to send " << num_tasks << " tasks" << endl;
				mqueue.assign(rqueue.end() - num_tasks, rqueue.end());
				rqueue.erase(rqueue.end() - num_tasks, rqueue.end());
				//cout << "rqueue size = " << rqueue.size() << " mqueue size = " << mqueue.size() << endl;
			} catch (...) {
				cout
						<< "migrateTasks: cannot allocate memory while copying tasks to migrate queue"
						<< endl;
				pthread_exit(NULL);
			}
			pthread_mutex_unlock(&lock);

			map<uint32_t, NodeList> update_map = worker->get_map(mqueue);
			int update_ret = worker->zht_update(update_map, "nodehistory",
					index);
			/*if(index == worker->selfIndex) {
			 cout << "ALERT: MIGRATING TO ITSELF" << endl;
			 }*/
			int num_packages = 0;
			long total_submitted = 0;

			num_tasks = mqueue.size();
			while (total_submitted != num_tasks) {
				Package package;
				string alltasks;
				package.set_virtualpath(worker->ip);
				package.set_operation(22);
				num_packages++;
				int num_tasks_this_package = max_tasks_per_package;
				int num_tasks_left = num_tasks - total_submitted;
				if (num_tasks_left < max_tasks_per_package) {
					num_tasks_this_package = num_tasks_left;
				}
				for (int j = 0; j < num_tasks_this_package; j++) {
					//TaskQueue_item* qi = migrate_queue->remove_element();
					if (mqueue.size() < 1) {
						if (j > 0) {
							total_submitted = total_submitted + j;
							package.set_realfullpath(alltasks);
							string str = package.SerializeAsString();
							pthread_mutex_lock(&msg_lock);
							int32_t ret = worker->svrclient.svrtosvr(str,
									str.length(), index);
							pthread_mutex_unlock(&msg_lock);
						}
						//pthread_mutex_unlock (&m_lock);
						//return total_submitted;
						//pthread_exit(NULL);
						total_submitted = num_tasks;
						break;
					}
					try {
						alltasks.append(mqueue.front()->task_id);
						alltasks.append("\'\""); // Task ID
						/*stringstream num_moves_ss;
						 num_moves_ss << (mqueue.front()->num_moves + 1);
						 alltasks.append(num_moves_ss.str());  alltasks.append("\'\""); // Number of moves*/

						if (LOGGING) {
							migrate_fp << " taskid = "
									<< mqueue.front()->task_id;
							//migrate_fp << " num moves = " << (mqueue.front()->num_moves + 1);
						}
						delete mqueue.front();
						mqueue.pop_front();
					} catch (...) {
						cout
								<< "migrateTasks: Exception occurred while processing mqueue"
								<< endl;
					}
				}
				if (total_submitted == num_tasks) {
					break;
				}
				total_submitted = total_submitted + num_tasks_this_package;
				package.set_realfullpath(alltasks);
				string str = package.SerializeAsString(); //cout << "r1: " << total_submitted << " tasks" << endl;
				pthread_mutex_lock(&msg_lock);
				int32_t ret = worker->svrclient.svrtosvr(str, str.length(),
						index); //cout << "r1 sent" << endl;
				pthread_mutex_unlock(&msg_lock);
			}
			pthread_mutex_unlock(&m_lock);
			//cout << "matrix_server: No. of tasks sent = " << total_submitted << endl;
		}
	}
}
Beispiel #8
0
// Insert tasks into queue without repeated fields
//int32_t Worker::HB_insertQ_new(NoVoHT *map, Package &package) {
void* HB_insertQ_new(void* args) {

	package_thread_args targs = *((package_thread_args*) args);
	queue<string*> *source = targs.source;
	TaskQueue *dest = targs.dest;
	pthread_mutex_t *slock = targs.slock;
	pthread_mutex_t *dlock = targs.dlock;
	Worker *worker = targs.worker;

	string *st;
	Package package;

	while (ON) {
		while (source->size() > 0) {
			pthread_mutex_lock(slock);
			if (source->size() > 0) {
				try {
					st = source->front();
					source->pop(); //cout << "recd something" << endl;
				} catch (exception& e) {
					cout << "void* HB_insertQ_new: " << e.what() << endl;
					exit(1);
				}
			} else {
				pthread_mutex_unlock(slock);
				continue;
			}
			pthread_mutex_unlock(slock);
			package.ParseFromString(*st);
			delete st;

			TaskQueue_Item *qi;

			uint32_t task_recd_count = 0;
			string alltasks(package.realfullpath());
			int num_vector_count, per_vector_count;
			vector<vector<string> > tokenize_string = tokenize(alltasks, '\"',
					'\'', num_vector_count, per_vector_count);
			//cout << "num_vector_count = " << num_vector_count << " per_vector_count = " << per_vector_count << endl;
			task_recd_count = num_vector_count;
			for (int i = 0; i < num_vector_count; i++) {
				qi = new TaskQueue_Item();
				try {
					qi->task_id = tokenize_string.at(i).at(0); //cout << "insertq: qi->task_id = " << qi->task_id << endl;
				} catch (exception& e) {
					cout
							<< "void* HB_insertQ_new: (tokenize_string.at(i).at(0)) "
							<< " " << e.what() << endl;
					exit(1);
				}
				/*stringstream num_moves_ss;
				 try {
				 num_moves_ss << tokenize_string.at(i).at(1);
				 }
				 catch (exception& e) {
				 cout << "void* HB_insertQ_new: (tokenize_string.at(i).at(0)) " << " " << e.what() << endl;
				 exit(1);
				 }
				 num_moves_ss >> qi->num_moves;*/

				if (LOGGING) {
					task_fp << " taskid = " << qi->task_id;
					//task_fp << " num moves = " << qi->num_moves;
				}

				pthread_mutex_lock(dlock);
				try {
					dest->push_back(qi);
				} catch (std::bad_alloc& exc) {
					cout
							<< "HB_InsertQ_new: cannot allocate memory while adding element to ready queue"
							<< endl;
					pthread_exit(NULL);
				}
				pthread_mutex_unlock(dlock);
			}
			if (LOGGING) {
				log_fp << "Num tasks received = " << task_recd_count
						<< " Queue length = " << dest->size() << endl;
			}
		}
	}
}
Beispiel #9
0
int32_t Worker::get_numtasks_to_steal() {
	return ((rqueue.size() - num_idle_cores) / 2);
}
Beispiel #10
0
int32_t Worker::get_load_info() {
	return (rqueue.size() - num_idle_cores);
}
Beispiel #11
0
// thread to monitor ready queue and execute tasks based on num of cores availability
void* check_ready_queue(void* args) {

	Worker *worker = (Worker*) args;

	TaskQueue_Item *qi;
	while (ON) {
		while (rqueue.size() > 0) {
			pthread_mutex_lock(&lock);
			if (rqueue.size() > 0) {
				qi = rqueue.front();
				rqueue.pop_front();
			} else {
				pthread_mutex_unlock(&lock);
				continue;
			}

			pthread_mutex_unlock(&lock);

			pthread_mutex_lock(&mutex_idle);
			worker->num_idle_cores--;
			pthread_mutex_unlock(&mutex_idle);

			if (!work_exec_flag) {

				work_exec_flag = 1;

				FILE *fp = fopen(file_worker_start.c_str(), "a+");
				if (fp != NULL) {
					//fputs("fopen example", fp);
					char fbuf[100];
					memset(fbuf, 0, sizeof(fbuf));
					sprintf(fbuf, "%s:%d Got jobs..Started excuting\n",
							worker->ip.c_str(), worker->selfIndex);
					fwrite(fbuf, sizeof(char), strlen(fbuf), fp);
					fflush(fp);
					fclose(fp);
				}

				/*worker_start << worker->ip << ":" << worker->selfIndex
				 << " Got jobs..Started excuting" << endl;*/

			}

			//cout << "task to lookup = " << qi->task_id << endl;
			string value = worker->zht_lookup(qi->task_id);
			Package recv_pkg;
			recv_pkg.ParseFromString(value); //cout << "check_ready_queue: task " << qi->task_id << " node history = " << recv_pkg.nodehistory() << endl;
			int num_vector_count, per_vector_count;
			vector<vector<string> > tokenize_string = tokenize(
					recv_pkg.realfullpath(), '\"', '\'', num_vector_count,
					per_vector_count);
			//cout << "worker " << worker->selfIndex<< " pertask processing done" << endl;
			/*cout << "task = " << qi->task_id << " notify list: ";
			 for(int l = 0; l < tokenize_string.at(1).size(); l++) {
			 cout << tokenize_string.at(1).at(l) << " ";
			 } cout << endl;*/

			stringstream duration_ss;
			try {
				duration_ss << tokenize_string.at(0).at(1);
			} catch (exception& e) {
				cout << "void* check_ready_queue: num_vector_count = "
						<< num_vector_count << " per_vector_count = "
						<< per_vector_count << endl;
				cout
						<< "void* check_ready_queue: (tokenize_string.at(0).at(1)) "
						<< " " << e.what() << endl;
				cout << "void* check_ready_queue: value = " << value << endl;
				exit(1);
			}
			double duration_tmp;
			//duration = 1000000;
			duration_ss >> duration_tmp;
			long duration = floor(duration_tmp * 1000 + 0.5);
			//duration = duration / duration - 1;
			string client_id;
			try {
				client_id = tokenize_string.at(0).at(2);
			} catch (exception& e) {
				cout << "void* check_ready_queue: num_vector_count = "
						<< num_vector_count << " per_vector_count = "
						<< per_vector_count << endl;
				cout
						<< "void* check_ready_queue: (tokenize_string.at(0).at(2)) "
						<< " " << e.what() << endl;
				exit(1);
			}

			uint64_t sub_time;
			try {
				stringstream sub_time_ss;
				sub_time_ss << tokenize_string.at(0).at(3);
				sub_time_ss >> sub_time;
			} catch (exception& e) {
				cout << "void* check_ready_queue: num_vector_count = "
						<< num_vector_count << " per_vector_count = "
						<< per_vector_count << endl;
				cout
						<< "void* check_ready_queue: (tokenize_string.at(0).at(3)) "
						<< " " << e.what() << endl;
				exit(1);
			}

			timespec task_start_time, task_end_time;
			clock_gettime(CLOCK_REALTIME, &task_start_time);
			//uint32_t exit_code = sleep(duration);
			//cout << "The duration of this task is:" << duration << endl;
			uint32_t exit_code = usleep(duration);
			clock_gettime(CLOCK_REALTIME, &task_end_time);

			// push completed task into complete queue
			pthread_mutex_lock(&c_lock);
			cqueue.push_back(make_pair(qi->task_id, tokenize_string.at(1)));
			pthread_mutex_unlock(&c_lock);

			// append completed task
			uint64_t st = (uint64_t) task_start_time.tv_sec * 1000000000
					+ (uint64_t) task_start_time.tv_nsec;
			uint64_t et = (uint64_t) task_end_time.tv_sec * 1000000000
					+ (uint64_t) task_end_time.tv_nsec;
			timespec diff = timediff(task_start_time, task_end_time);

			pthread_mutex_lock(&mutex_idle);
			worker->num_idle_cores++;
			task_comp_count++;
			pthread_mutex_unlock(&mutex_idle);

			if (LOGGING) {
				string fin_str;
				stringstream out;
				out << qi->task_id << "+" << client_id << " exitcode "
						<< " node history = " << recv_pkg.nodehistory()
						<< exit_code << " Interval " << diff.tv_sec << " S  "
						<< diff.tv_nsec << " NS" << " server " << worker->ip;
				fin_str = out.str();
				pthread_mutex_lock(&mutex_finish);
				//fin_fp << fin_str << endl;
				pthread_mutex_unlock(&mutex_finish);
			}
			delete qi;
		}
	}
}
Beispiel #12
0
// thread to steal tasks it ready queue length becomes zero
void* worksteal(void* args) {
	//cout << "entered worksteal thread" << endl;
	Worker *worker = (Worker*) args;

	/*int min_lines = worker->svrclient.memberList.size();
	 //min_lines++;
	 string filename(file_worker_start);
	 //string cmd("wc -l ");
	 //cmd = cmd + filename + " | awk {\'print $1\'}";

	 string cmd("ls -l "); 	cmd.append(shared);	cmd.append("startinfo*");	cmd.append(" | wc -l");
	 string result = executeShell(cmd);
	 //cout << "server: minlines = " << min_lines << " cmd = " << cmd << " result = " << result << endl;
	 while(atoi(result.c_str()) < min_lines) {
	 sleep(2);
	 //cout << "server: " << worker.selfIndex << " minlines = " << min_lines << " cmd = " << cmd << " result = " << result << endl;
	 result = executeShell(cmd);
	 }
	 cout << "server: " << worker->selfIndex << " minlines = " << min_lines << " cmd = " << cmd << " result = " << result << endl;*/

	int num = worker->svrclient.memberList.size() - 1;
	stringstream num_ss;
	num_ss << num;
	//min_lines++;
	string cmd1("cat ");
	cmd1.append(shared);
	cmd1.append("startinfo");
	cmd1.append(num_ss.str());
	cmd1.append(" | wc -l");
	string result1 = executeShell(cmd1);
	//cout << "server: minlines = " << min_lines << " cmd = " << cmd << " result = " << result << endl;
	while (atoi(result1.c_str()) < 1) {
		usleep(ws_sleep);
		result1 = executeShell(cmd1);
	}
	//cout << "worksteal started: server: " << worker->selfIndex << " minlines = " << 1 << " cmd = " << cmd1 << " result = " << result1 << endl;

	while (work_steal_signal) {
		//while(ready_queue->get_length() > 0) { }
		while (rqueue.size() > 0) {
		}
		usleep(1000);
		// If there are no waiting ready tasks, do work stealing
		//if (worker.num_nodes > 1 && ready_queue->get_length() < 1)
		if (worker->num_nodes > 1 && rqueue.size() < 1) {
			int32_t success = worker->steal_task();
			// Do work stealing until succeed
			while (success == 0) {
				failed_attempts++;
				if (failed_attempts >= fail_threshold) {
					work_steal_signal = 0;
					cout << worker->selfIndex << " stopping worksteal" << endl;
					break;
				}
				usleep(worker->poll_interval);
				success = worker->steal_task();
			}
			failed_attempts = 0;
			//cout << "Received " << success << " tasks" << endl;
		}
	}
}
Beispiel #13
0
int main(int argc, char **argv)
{
    const static int MAX_ACTION_LEN = 10;
    int c = 0;
    int nThreadCnt = 6;
    char pSpec[MAX_SPEC_SIZE] = {0};
    char pAction[MAX_ACTION_LEN] = {0};
    char pLog[PATH_MAX] = {0};
    char pInput[PATH_MAX] = {0};

    while ((c = getopt(argc, argv, "t:l:k:i:n:h")) != -1) {
        switch(c) {
            case 't': 
                snprintf(pSpec, MAX_SPEC_SIZE, "%s", optarg);
                break;
            case 'k':
                snprintf(pAction, MAX_ACTION_LEN, "%s", optarg);
                break;
            case 'l':
                snprintf(pLog, PATH_MAX, "%s", optarg);
                break;
            case 'i':
                snprintf(pInput, PATH_MAX, "%s", optarg);
                break;
            case 'n':
                nThreadCnt = atoi(optarg);
                break;
            case 'h':
            default:
                break;
        }
    }

    if (pSpec[0] == 0 || pAction[0] == 0 || pLog[0] == 0 || pInput[0] == 0 || nThreadCnt <= 0) {
        usage(argv[0]);
        return -1;
    }

    ThreadPara threadPara[nThreadCnt];
    if (init(pSpec, pLog, threadPara, nThreadCnt) < 0) {
        return -1;
    }
   
    UpdateAction nAction;
    if (strcmp(pAction, "add") == 0) {
        nAction = ACTION_ADD;
    }
    else if (strcmp(pAction, "del") == 0) {
        nAction = ACTION_DELETE;
    } 
    else {
        usage(argv[0]);
        return -1;
    }

    TaskQueue queue;
    util::Mutex lock;

    if (loadQueue(pInput, queue) != 0 || queue.size() == 0) {
        TERR("load doc fail, path=%s", pInput);
        for(int i = 0; i < nThreadCnt; i++) {
            update_api_destroy(threadPara[i].api);
        }
        alog::Logger::shutdown();
        return -1;
    }

    for (int i = 0; i < nThreadCnt; i++) {
        threadPara[i].pQueue = &queue;
        threadPara[i].lock = &lock;
        threadPara[i].nAction = nAction;
        if (0 != pthread_create(&threadPara[i].nThrId, NULL, pushMessage, &threadPara[i])) {
            TERR("thread_create() failed. id=%d", i);
            for(int i = 0; i < nThreadCnt; i++) {
                update_api_destroy(threadPara[i].api);
            }
            alog::Logger::shutdown();
            return -1;
        }
    }
    for (int i = 0; i < nThreadCnt; i++) {
        if (0 != pthread_join(threadPara[i].nThrId, NULL)) {
            TERR("thread_join() failed. id=%d", i);
        }
        update_api_destroy(threadPara[i].api);
    }

    alog::Logger::shutdown();

    return 0;
}