//////////////////////////////////////////////////////////////////////////////// // Implements feedback when adding special tags to a task. void feedback_special_tags (const Task& task, const std::string& tag) { if (context.verbose ("special")) { std::string msg; std::string explanation; if (tag == "nocolor") msg = STRING_FEEDBACK_TAG_NOCOLOR; else if (tag == "nonag") msg = STRING_FEEDBACK_TAG_NONAG; else if (tag == "nocal") msg = STRING_FEEDBACK_TAG_NOCAL; else if (tag == "next") msg = STRING_FEEDBACK_TAG_NEXT; if (msg.length ()) { if (task.id) std::cout << format (msg, task.id) << "\n"; else std::cout << format (msg, task.get ("uuid")) << "\n"; } } }
std::string onProjectChange (Task& task, bool scope /* = true */) { std::stringstream msg; std::string project = task.get ("project"); if (project != "") { if (scope) msg << format (STRING_HELPER_PROJECT_CHANGE, project) << " "; // Count pending and done tasks, for this project. int count_pending = 0; int count_done = 0; std::vector <Task> all = context.tdb2.all_tasks (); countTasks (all, project, count_pending, count_done); // count_done count_pending percentage // ---------- ------------- ---------- // 0 0 0% // >0 0 100% // 0 >0 0% // >0 >0 calculated int percentage = 0; if (count_done == 0) percentage = 0; else if (count_pending == 0) percentage = 100; else percentage = (count_done * 100 / (count_done + count_pending)); msg << format (STRING_HELPER_PROJECT_COMPL, project, percentage) << " " << format (STRING_HELPER_PROJECT_REM, count_pending, count_pending + count_done) << "\n"; } return msg.str (); }
void IoWait::CoSwitch(std::vector<FdStruct> && fdsts, int timeout_ms) { Task* tk = g_Scheduler.GetCurrentTask(); if (!tk) return ; uint32_t id = ++tk->GetIoWaitData().io_block_id_; tk->state_ = TaskState::io_block; tk->GetIoWaitData().wait_successful_ = 0; tk->GetIoWaitData().io_block_timeout_ = timeout_ms; tk->GetIoWaitData().io_block_timer_.reset(); tk->GetIoWaitData().wait_fds_.swap(fdsts); for (auto &fdst : tk->GetIoWaitData().wait_fds_) { fdst.epoll_ptr.tk = tk; fdst.epoll_ptr.io_block_id = id; } DebugPrint(dbg_ioblock, "task(%s) CoSwitch id=%d, nfds=%d, timeout=%d", tk->DebugInfo(), id, (int)fdsts.size(), timeout_ms); g_Scheduler.CoYield(); }
bool Controller::modifyTask( const Task& task ) { // find it qDebug() << "Controller::modifyTask: committing changes to task" << task.id(); // modify the task itself: bool result = m_storage->modifyTask( task ); Q_ASSERT( result ); Q_UNUSED( result ); if ( ! result ) { qDebug() << "Controller::modifyTask: modifyTask failed!"; return result; } updateSubscriptionForTask( task ); if ( result ) { emit( taskUpdated( task ) ); } else { qDebug() << "Controller::modifyTask: storing subscription state failed."; } return result; }
TaskQueue::TaskQueue(Task& taskType) : taskMaxNum(TASKMAXNUM) { taskSize = taskType.GetSize(); //create share memory start size_t info_size = sizeof(int) * 2; size_t total_size = info_size + taskSize * taskMaxNum; shmid = shmget(IPC_PRIVATE, total_size, 0600 | IPC_CREAT | IPC_EXCL); if (shmid < 0) { cout << "task queue share memory create id failed" << endl; exit(1); } else if ((shmptr = shmat(shmid, 0, 0)) == (void *)-1) { cout << "task queue init share memory error in linking to share memory" << endl; exit(1); } else { memset(shmptr, 0, info_size); if (shmdt(shmptr) != 0) { cout << "task queue init share memory err in deleting the link " << "to share memory" << endl; exit(1); } } shmptr = (void *)-1; //create share memory end //semaphore init start if ((semid = semget(IPC_PRIVATE, 2, 0600 | IPC_CREAT | IPC_EXCL)) == -1) { cout << "task queue semaphore semget error" << endl; exit(1); } unsigned short semval[2] = {1, 0}; if (semctl(semid, 0, SETALL, semval) == -1) { cout << "task queue semaphore val init error" << endl; exit(1); } }
void TaskEditor::setTask( const Task& task ) { Q_ASSERT( m_ui ); m_task = task; const TaskTreeItem& taskTreeItem = MODEL.charmDataModel()->taskTreeItem( task.id() ); m_ui->labelTaskName->setText( MODEL.charmDataModel()->fullTaskName( taskTreeItem.task() ) ); m_ui->lineEditName->setText( task.name() ); if( task.parent() != 0 ) { const TaskTreeItem& parentItem = MODEL.charmDataModel()->taskTreeItem( task.parent() ); const QString name = parentItem.task().name(); m_ui->pushButtonParent->setText( name ); } else { m_ui->pushButtonParent->setText( tr( "Choose Parent Task" ) ); } if( task.parent() == 0 ) { m_ui->checkBoxTopLevel->setChecked( true ); } else { m_ui->checkBoxTopLevel->setChecked( false ); } QDate start = task.validFrom().date(); if( start.isValid() ) { m_ui->dateEditFrom->setDate( start ); m_ui->checkBoxFrom->setChecked( false ); } else { m_ui->checkBoxFrom->setChecked( true ); m_ui->dateEditFrom->setDate( QDate::currentDate() ); } QDate end = task.validUntil().date(); if( end.isValid() ) { m_ui->dateEditTo->setDate( end ); m_ui->checkBoxUntil->setChecked( false ); } else { m_ui->checkBoxUntil->setChecked( true ); m_ui->dateEditTo->setDate( QDate::currentDate() ); } checkInvariants(); }
void Server::parsingAddressesJSONToTask(Document& document, Task& task)throw(string) { if (!document.IsObject()) { throw(string("BAD REQUEST")); } if (!document["addresses"].IsArray()) { throw(string("BAD REQUEST")); } Value& addresses = document["addresses"]; task.initTask(addresses.Size()); cout << "parsingJSON, Task nr: " << task.taskNumber << endl; for (SizeType i = 0; i < addresses.Size(); i++) { if (!addresses[i]["address"].IsString()) { throw(string("BAD REQUEST")); } task.ip[i] = addresses[i]["address"].GetString(); } return; }
void ColumnTags::render ( std::vector <std::string>& lines, Task& task, int width, Color& color) { std::string tags = task.get (_name); if (tags != "") { if (_style == "indicator") { lines.push_back ( color.colorize ( rightJustify (context.config.get ("tag.indicator"), width))); } else if (_style == "count") { std::vector <std::string> all; split (all, tags, ','); lines.push_back ( color.colorize ( rightJustify ("[" + format ((int)all.size ()) + "]", width))); } else if (_style == "default" || _style == "list") { std::replace (tags.begin (), tags.end (), ',', ' '); std::vector <std::string> all; wrapText (all, tags, width, _hyphenate); std::vector <std::string>::iterator i; for (i = all.begin (); i != all.end (); ++i) lines.push_back (color.colorize (leftJustify (*i, width))); } } }
int handle() { if(task->manager == NULL) { errors << "TaskManager::TaskHandler: invalid task with unset manager" << endl; return -1; } { Mutex::ScopedLock lock(*task->mutex); task->state = Task::TS_QUEUED ? Task::TS_RUNNINGQUEUED : Task::TS_RUNNING; } int ret = task->breakSignal ? -1 : task->handle(); { SDL_mutexP(task->manager->mutex); boost::shared_ptr<Mutex> m = task->mutex; Mutex::ScopedLock lock(*m); if(task->queuedTask) { if(!task->manager->quitSignal) { Mutex::ScopedLock lock(*task->queuedTask->mutex); task->manager->runningTasks.insert(task->queuedTask); TaskHandler* handler = new TaskHandler(); handler->task = task->queuedTask; task->queuedTask->state = Task::TS_WAITFORIMMSTART; threadPool->start(handler, task->queuedTask->name + " handler", true); } else // We were requested to quit, i.e. we should not start this queued task anymore. delete task->queuedTask; } task->manager->runningTasks.erase(task); SDL_CondSignal(task->manager->taskFinished); SDL_mutexV(task->manager->mutex); delete task; } return ret; }
void BCLGedf::beta(const Task &t_i, const Task &t_k, fractional_t &beta_i) { unsigned long n = max_jobs_contained(t_i, t_k); integral_t c_i, tmp; c_i = t_i.get_wcet(); tmp = t_i.get_period(); tmp *= n; if (tmp < t_k.get_deadline()) // no risk of overflow tmp = t_k.get_deadline() - n * t_i.get_period(); else // test says zero is lower limit tmp = 0; beta_i = n * c_i; beta_i += min(c_i, tmp); beta_i /= t_k.get_deadline(); }
void QwwTaskPanel::insertTask(int index, QWidget * task, const QIcon & icon, const QString & label) { if (!task) return; Task *tsk = new Task(task); tsk->setToggleIcon(m_toggleIcon); if (label.isNull()) tsk->setName(task->windowTitle()); else { tsk->setName(label); task->setWindowTitle(label); } if (icon.isNull()) { tsk->setIcon(task->windowIcon()); } else { tsk->setIcon(icon); task->setWindowIcon(icon); } static_cast<QBoxLayout*>(m_panel->layout())->insertWidget(index, tsk); m_tasks.insert(index, tsk); if (m_tasks.count()==1) { setCurrentIndex(0); } tsk->show(); }
Task* EDF_BSH::Schedule(double time) { availableTaskIterator->First(); Task* bestTask = availableTaskIterator->CurrentItem(); double closestDeadline = bestTask->getTarrival() + bestTask->getDeadline() - time; Task* tempTask; double tempDeadline; for (availableTaskIterator->First(); !availableTaskIterator->IsDone(); availableTaskIterator->Next()) { tempTask = availableTaskIterator->CurrentItem(); tempDeadline = tempTask->getTarrival() + tempTask->getDeadline() - time; if (tempDeadline < closestDeadline) { closestDeadline = tempDeadline; bestTask = tempTask; } } return bestTask; }
bool Scheduler::is_started(const uint_fast8_t i) { Task *pCurTask = m_functionList[i]; uint_fast32_t time = m_pHAL->scheduler->millis() - pCurTask->get_timer(); // Time yet to start the current emitter? if(time <= m_tickrateList[i] + pCurTask->get_delay() ) { return false; } else { // Release the block for the transmitter pCurTask->reset(); } if(pCurTask->start() ) { // Set timer to the current time pCurTask->set_timer(m_pHAL->scheduler->millis() ); } else { return false; } return true; }
TaskList XmlSerializationTests::tasksToTest() { // set up test candidates: TaskList tasks; Task task; task.setName( "A task" ); task.setId( 42 ); task.setParent( 4711 ); task.setSubscribed( true ); task.setValidFrom( QDateTime::currentDateTime() ); Task task2; task2.setName( "Another task" ); task2.setId( -1 ); task2.setParent( 1000000000 ); task2.setSubscribed( false ); task2.setValidUntil( QDateTime::currentDateTime() ); Task task3; tasks << Task() << task << task2; return tasks; }
void Robot::handleTasks(float dt) { Task *task = getCurrentTask(); if (task == NULL) return; if (!task->isStarted()) { task->onStart(*this, dt); task->setStarted(true); } if (task->onStep(*this, dt) == false) { task->onEnd(*this, dt); delete task; tasks.pop_front(); handleTasks(dt); } }
TaskPhasePtr Task_lastPhase(Task& self){ return self.lastPhase(); }
TaskPhasePtr Task_addPhase2(Task& self, const std::string& caption){ return self.addPhase(caption); }
TaskPhasePtr Task_addPhase1(Task& self, TaskPhase* phase){ return self.addPhase(phase); }
TaskPhasePtr Task_phase(Task& self, int index){ return self.phase(index); }
/** Thread method. Will wait for new tasks and run them * as scheduled to it. */ void ThreadPoolRunnable::run() { Task * task; // If there are no tasks yet, wait up to m_waitSec for them to come up while (m_scheduler->size() == 0 && m_waitSec > 0.0) { Poco::Thread::sleep(10); // millisec m_waitSec -= 0.01; // Subtract ten millisec from the time left to wait. } while (m_scheduler->size() > 0) { // Request the task from the scheduler. // Will be NULL if not found. task = m_scheduler->pop(m_threadnum); if (task) { //Task-specific mutex if specified? Mutex * mutex = task->getMutex(); if (mutex) mutex->lock(); try { // Run the task (synchronously within this thread) task->run(); } catch (std::exception &e) { // The task threw an exception! // This will clear out the list of tasks, allowing all threads to finish. m_scheduler->abort(std::runtime_error(e.what())); } // Tell the scheduler that we finished this task m_scheduler->finished(task, m_threadnum); // Report progress, if specified. if (m_prog) m_prog->report(); // Unlock the mutex, if any. if (mutex) mutex->unlock(); // We now delete the task to free up memory delete task; } else { // No appropriate task for this thread (perhaps a mutex is locked) // but there are more tasks. // So we wait a bit before checking again. Poco::Thread::sleep(10); // millisec } } // Ran out of tasks that could be run. // Thread now will exit }
bool operator == (const Task& left, const Task& right) { // Order of task statuses is important. if (left.statuses().size() != right.statuses().size()) { return false; } for (int i = 0; i < left.statuses().size(); i++) { if (left.statuses().Get(i) != right.statuses().Get(i)) { return false; } } return left.name() == right.name() && left.task_id() == right.task_id() && left.framework_id() == right.framework_id() && left.executor_id() == right.executor_id() && left.slave_id() == right.slave_id() && left.state() == right.state() && Resources(left.resources()) == Resources(right.resources()) && left.status_update_state() == right.status_update_state() && left.status_update_uuid() == right.status_update_uuid() && left.labels() == right.labels() && left.discovery() == right.discovery(); }
void TCPListenerSocket::ProcessEvent(int /*eventBits*/) { //we are executing on the same thread as every other //socket, so whatever you do here has to be fast. struct sockaddr_in addr; #if __Win32__ || __osf__ || __sgi__ || __hpux__ int size = sizeof(addr); #else socklen_t size = sizeof(addr); #endif Task* theTask = NULL; TCPSocket* theSocket = NULL; //fSocket data member of TCPSocket. int osSocket = accept(fFileDesc, (struct sockaddr*)&addr, &size); //test osSocket = -1; if (osSocket == -1) { //take a look at what this error is. int acceptError = OSThread::GetErrno(); if (acceptError == EAGAIN) { //If it's EAGAIN, there's nothing on the listen queue right now, //so modwatch and return this->RequestEvent(EV_RE); return; } //test acceptError = ENFILE; //test acceptError = EINTR; //test acceptError = ENOENT; //if these error gets returned, we're out of file desciptors, //the server is going to be failing on sockets, logs, qtgroups and qtuser auth file accesses and movie files. The server is not functional. if (acceptError == EMFILE || acceptError == ENFILE) { #ifndef __Win32__ QTSSModuleUtils::LogErrorStr(qtssFatalVerbosity, "Out of File Descriptors. Set max connections lower and check for competing usage from other processes. Exiting."); #endif exit (EXIT_FAILURE); } else { char errStr[256]; errStr[sizeof(errStr) -1] = 0; qtss_snprintf(errStr, sizeof(errStr) -1, "accept error = %d '%s' on socket. Clean up and continue.", acceptError, strerror(acceptError)); WarnV( (acceptError == 0), errStr); theTask = this->GetSessionTask(&theSocket); if (theTask == NULL) { close(osSocket); } else { theTask->Signal(Task::kKillEvent); // just clean up the task } if (theSocket) theSocket->fState &= ~kConnected; // turn off connected state return; } } theTask = this->GetSessionTask(&theSocket); if (theTask == NULL) { //this should be a disconnect. do an ioctl call? close(osSocket); if (theSocket) theSocket->fState &= ~kConnected; // turn off connected state } else { Assert(osSocket != EventContext::kInvalidFileDesc); //set options on the socket //we are a server, always disable nagle algorithm int one = 1; int err = ::setsockopt(osSocket, IPPROTO_TCP, TCP_NODELAY, (char*)&one, sizeof(int)); AssertV(err == 0, OSThread::GetErrno()); err = ::setsockopt(osSocket, SOL_SOCKET, SO_KEEPALIVE, (char*)&one, sizeof(int)); AssertV(err == 0, OSThread::GetErrno()); int sndBufSize = 96L * 1024L; err = ::setsockopt(osSocket, SOL_SOCKET, SO_SNDBUF, (char*)&sndBufSize, sizeof(int)); AssertV(err == 0, OSThread::GetErrno()); //setup the socket. When there is data on the socket, //theTask will get an kReadEvent event theSocket->Set(osSocket, &addr); theSocket->InitNonBlocking(osSocket); theSocket->SetTask(theTask); theSocket->RequestEvent(EV_RE); theTask->SetThreadPicker(Task::GetBlockingTaskThreadPicker()); //The RTSP Task processing threads } if (fSleepBetweenAccepts) { // We are at our maximum supported sockets // slow down so we have time to process the active ones (we will respond with errors or service). // wake up and execute again after sleeping. The timer must be reset each time through //qtss_printf("TCPListenerSocket slowing down\n"); this->SetIdleTimer(kTimeBetweenAcceptsInMsec); //sleep 1 second } else { // sleep until there is a read event outstanding (another client wants to connect) //qtss_printf("TCPListenerSocket normal speed\n"); this->RequestEvent(EV_RE); } fOutOfDescriptors = false; // always false for now we don't properly handle this elsewhere in the code }
int main() { Config *p = Config::get_instance(); string ip, port; string dict_path; string model_path; //读取需要的文件 p->get_file_name("server_ip", ip); p->get_file_name("server_port", port); p->get_file_name("dict_path", dict_path); p->get_file_name("model_path", model_path); //初始化切词工具 CppJieba::MixSegment segment(dict_path, model_path); cout << "Overload segment done !" << endl; Search search; vector<Document> result_vec; TCPSocket server(ip, port); //从配置文件读取ip和port server.tcp_server_init(); //初始化server--create socket(),bind(),listen(),setnonblock() struct epoll_event ev, events[MAX_EVENTS]; int epollfd = epoll_create(256); if (epollfd == -1) { throw runtime_error("epoll_create"); } //设置与要处理的事件相关的文件描述符 int listenfd = server.get_fd(); ev.data.fd = listenfd; //设置要处理的事件类型,当描述符可读时触发,触发方式为ET模式 ev.events = EPOLLIN | EPOLLLT; //注册epoll事件 epoll_ctl(epollfd, EPOLL_CTL_ADD, listenfd, &ev); while(true) { int nfds = epoll_wait(epollfd, events, MAX_EVENTS, TIMEOUT); // cout << nfds << endl; sleep(2); if (nfds == -1) { throw runtime_error("epoll wait error!"); } //处理所发生的所有事件 for (int ix = 0; ix < nfds; ++ix) { //如果新监测到一个SOCKET用户连接到了绑定的SOCKET端口,建立新的连接。 if (events[ix].data.fd == listenfd) { int new_fd = server.tcp_accept(); // server.set_non_blocking(new_fd); //设置为非阻塞方式 ev.events = EPOLLIN | EPOLLLT; //设置用于读操作的文件描述符 ev.data.fd = new_fd; //设置用于注册的读操作事件 //EPOLLIN :表示对应的文件描述符可以读,ET模型 ev.events = EPOLLIN | EPOLLLT; //注册ev,每次有新的客户端的连接到服务器,都需要为其生成一个事件 epoll_ctl(epollfd, EPOLL_CTL_ADD, new_fd, &ev); } //EPOLLIN表示可读 else if (events[ix].events & EPOLLIN) { cout << "-------------------------" << endl; cout << "read..." << endl; //如果是已经连接的用户,并且收到数据,那么进行读入 int sockfd = events[ix].data.fd; char recv_buf[1024]; int read_len = server.recv_message(sockfd, recv_buf, 1024); result_vec.clear(); search.search_result(recv_buf, result_vec, segment); if (read_len == 0) { close(sockfd); events[ix].data.fd = -1; } ev.data.fd = sockfd; ev.events = EPOLLOUT | EPOLLLT; epoll_ctl(epollfd, EPOLL_CTL_MOD, sockfd, &ev); } else if (events[ix].events & EPOLLOUT) // 如果有数据发送 { int sockfd = events[ix].data.fd; Task task; task._client_fd = sockfd; task._send_vec = result_vec; cout << "--------------" << endl; cout << "_send_vec.size():" << task._send_vec.size() << endl; task.excute_task(); task._send_vec.clear(); result_vec.clear(); ev.data.fd = sockfd; //写完后,这个sockfd准备读 ev.events = EPOLLIN | EPOLLLT; epoll_ctl(epollfd, EPOLL_CTL_MOD, sockfd, &ev); } } } close(epollfd); return 0; }
//Retrieves information of done tasks. void Storage::retrieveDoneTasks(TaskList& listOfTasks) { std::string userInput = DEFAULT_EMPTY; std::ifstream input(_doneFileName); std::string temp; int taskHeader; if(input.is_open()) { while(!input.eof()) { taskHeader = START_VALUE; getline(input,userInput); if(userInput == DEFAULT_EMPTY) { return; } Task* newTask = new Task; while (userInput != SEPARATOR) { std::string details = Parser::removeFirstWord(userInput); details = Parser::trim(details); switch(taskHeader) { case HEADER_DESCRIPTION: { newTask->setDescription(details); taskHeader++; break; } case HEADER_START_DATE: { taskHeader++; if(details == DEFAULT_EMPTY || details == SPACE_PARAMETER) { break; } else { try{ newTask->setStartDate(_parser.createDate(details)); } catch(...) { delete newTask->getStartDate(); newTask->setStartDate(NULL); _corrupted=true; } } break; } case HEADER_END_DATE: { taskHeader++; if(details == DEFAULT_EMPTY || details == SPACE_PARAMETER) { break; } else { try { newTask->setEndDate(_parser.createDate(details)); } catch(...) { delete newTask->getEndDate(); newTask->setEndDate(NULL); _corrupted=true; } } break; } case HEADER_START_TIME: { taskHeader++; if(details == DEFAULT_EMPTY || details == SPACE_PARAMETER) { break; } else { try { newTask->setStartTime(_parser.createTime(details)); } catch(...) { delete newTask->getStartTime(); newTask->setStartTime(NULL); _corrupted=true; } } break; } case HEADER_END_TIME: { taskHeader++; if(details == DEFAULT_EMPTY || details == SPACE_PARAMETER) { break; } else { try { newTask->setEndTime(_parser.createTime(details)); } catch(...) { delete newTask->getEndTime(); newTask->setEndTime(NULL); _corrupted=true; } } break; } case HEADER_CATEGORY: { taskHeader++; newTask->setCategory(details); break; } case HEADER_STATUS: { taskHeader++; TASK_STATUS status = _parser.getTaskStatus(details); newTask->setStatusAsDone(); break; } default: { taskHeader++; _corrupted = true; break; } } if(taskHeader > COLUMN_OUT_OF_BOUND) { break; } getline(input, userInput); } listOfTasks.addTaskToDoneList(*newTask); } } input.close(); }
void Task_setPreCommand(Task& self, python::object func){ return self.setPreCommand(PyTaskFunc(func)); }
TaskCommandPtr Task_addToggleCommand2(Task& self, const std::string& caption){ return self.addToggleCommand(caption); }
TaskCommandPtr Task_addToggleCommand1(Task& self){ return self.addToggleCommand(); }
int poll(struct pollfd *fds, nfds_t nfds, int timeout) { if (!poll_f) coroutine_hook_init(); Task* tk = g_Scheduler.GetCurrentTask(); DebugPrint(dbg_hook, "task(%s) hook poll(nfds=%d, timeout=%d). %s coroutine.", tk ? tk->DebugInfo() : "nil", (int)nfds, timeout, g_Scheduler.IsCoroutine() ? "In" : "Not in"); if (!tk) return poll_f(fds, nfds, timeout); if (timeout == 0) return poll_f(fds, nfds, timeout); // -------------------------------- // 全部是负数fd时, 等价于sleep nfds_t negative_fd_n = 0; for (nfds_t i = 0; i < nfds; ++i) if (fds[i].fd < 0) ++ negative_fd_n; if (nfds == negative_fd_n) { // co sleep g_Scheduler.SleepSwitch(timeout); return 0; } // -------------------------------- // 执行一次非阻塞的poll, 检测异常或无效fd. int res = poll_f(fds, nfds, 0); if (res != 0) return res; // create io-sentry IoSentryPtr io_sentry = MakeShared<IoSentry>(tk, fds, nfds); // add file descriptor into epoll or poll. bool added = false; for (nfds_t i = 0; i < nfds; ++i) { fds[i].revents = 0; // clear revents pollfd & pfd = io_sentry->watch_fds_[i]; if (pfd.fd < 0) continue; FdCtxPtr fd_ctx = FdManager::getInstance().get_fd_ctx(pfd.fd); if (!fd_ctx || fd_ctx->closed()) { // bad file descriptor pfd.revents = POLLNVAL; continue; } if (!fd_ctx->add_into_reactor(pfd.events, io_sentry)) { // TODO: 兼容文件fd pfd.revents = POLLNVAL; continue; } added = true; } if (!added) { errno = 0; return nfds; } // set timer if (timeout > 0) io_sentry->timer_ = g_Scheduler.ExpireAt( std::chrono::milliseconds(timeout), [io_sentry]{ g_Scheduler.GetIoWait().IOBlockTriggered(io_sentry); }); // save io-sentry tk->io_sentry_ = io_sentry; // yield g_Scheduler.GetIoWait().CoSwitch(); // clear task->io_sentry_ reference count tk->io_sentry_.reset(); if (io_sentry->timer_) { g_Scheduler.CancelTimer(io_sentry->timer_); io_sentry->timer_.reset(); } int n = 0; for (nfds_t i = 0; i < nfds; ++i) { fds[i].revents = io_sentry->watch_fds_[i].revents; if (fds[i].revents) ++n; } errno = 0; return n; }
TaskCommandPtr Task_lastCommand(Task& self){ return self.lastCommand(); }
int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { if (!select_f) coroutine_hook_init(); int timeout_ms = -1; if (timeout) timeout_ms = timeout->tv_sec * 1000 + timeout->tv_usec / 1000; Task* tk = g_Scheduler.GetCurrentTask(); DebugPrint(dbg_hook, "task(%s) hook select(nfds=%d, rd_set=%p, wr_set=%p, er_set=%p, timeout=%d ms).", tk ? tk->DebugInfo() : "nil", (int)nfds, readfds, writefds, exceptfds, timeout_ms); if (!tk) return select_f(nfds, readfds, writefds, exceptfds, timeout); if (timeout_ms == 0) return select_f(nfds, readfds, writefds, exceptfds, timeout); if (!nfds) { g_Scheduler.SleepSwitch(timeout_ms); return 0; } nfds = std::min<int>(nfds, FD_SETSIZE); // 执行一次非阻塞的select, 检测异常或无效fd. fd_set rfs, wfs, efs; FD_ZERO(&rfs); FD_ZERO(&wfs); FD_ZERO(&efs); if (readfds) rfs = *readfds; if (writefds) wfs = *writefds; if (exceptfds) efs = *exceptfds; timeval zero_tv = {0, 0}; int n = select_f(nfds, (readfds ? &rfs : nullptr), (writefds ? &wfs : nullptr), (exceptfds ? &efs : nullptr), &zero_tv); if (n != 0) { if (readfds) *readfds = rfs; if (writefds) *writefds = wfs; if (exceptfds) *exceptfds = efs; return n; } // ------------------------------------- // convert fd_set to pollfd, and clear 3 fd_set. std::pair<fd_set*, uint32_t> sets[3] = { {readfds, POLLIN}, {writefds, POLLOUT}, {exceptfds, 0} }; //static const char* set_names[] = {"readfds", "writefds", "exceptfds"}; std::map<int, int> pfd_map; for (int i = 0; i < 3; ++i) { fd_set* fds = sets[i].first; if (!fds) continue; int event = sets[i].second; for (int fd = 0; fd < nfds; ++fd) { if (FD_ISSET(fd, fds)) { pfd_map[fd] |= event; } } FD_ZERO(fds); } std::vector<pollfd> pfds(pfd_map.size()); int i = 0; for (auto &kv : pfd_map) { pollfd &pfd = pfds[i++]; pfd.fd = kv.first; pfd.events = kv.second; } // ------------------------------------- // ------------------------------------- // poll n = poll(pfds.data(), pfds.size(), timeout_ms); if (n <= 0) return n; // ------------------------------------- // ------------------------------------- // convert pollfd to fd_set. int ret = 0; for (size_t i = 0; i < pfds.size(); ++i) { pollfd &pfd = pfds[i]; if (pfd.events & POLLIN) { if (readfds) { FD_SET(pfd.fd, readfds); ++ret; } } if (pfd.events & POLLOUT) { if (writefds) { FD_SET(pfd.fd, writefds); ++ret; } } if (pfd.events & ~(POLLIN | POLLOUT)) { if (exceptfds) { FD_SET(pfd.fd, exceptfds); ++ret; } } } // ------------------------------------- return ret; }