void Scheduler::Yield() { Task* tk = GetLocalInfo().current_task; if (!tk) return ; Debug("yield task(%llu) state=%d", tk->id_, tk->state_); swapcontext(&tk->ctx_, &GetLocalInfo().scheduler); }
STDMETHODIMP CBUDPSocket::Bind(BSTR strAddr, LONG port) { SOCKADDR_IN sockAddr; HRESULT hr = FillAddress(strAddr, port, &sockAddr); if(FAILED(hr))return hr; if(bind(m_hSocket, (SOCKADDR*)&sockAddr, sizeof(sockAddr))) return GetErrorResult(); GetLocalInfo(); return S_OK; }
STDMETHODIMP CBUDPSocket::SendTo(BSTR strAddr, LONG port, VARIANT varData) { SOCKADDR_IN sockAddr; HRESULT hr = FillAddress(strAddr, port, &sockAddr); if(FAILED(hr))return hr; CBVarPtr varPtr; hr = varPtr.Attach(varData); if(FAILED(hr))return hr; if(sendto(m_hSocket, (const char *)varPtr.m_pData, varPtr.m_nSize, 0, (SOCKADDR*)&sockAddr, sizeof(sockAddr)) == SOCKET_ERROR) return GetErrorResult(); if(!m_nPort)GetLocalInfo(); return S_OK; }
DBGSTATIC NET_API_STATUS ReplInit( VOID ) { NET_API_STATUS NetStatus; // init global variable .. // this needs to be done because the service can be stopped and started; // still the service .exe running and the global data live forever. // BUGBUG: memory leak of old values? ReplConfigExportList = NULL; ReplConfigImportList = NULL; ReplGlobalClientTerminateEvent = NULL; ReplGlobalMasterTerminateEvent = NULL; ReplGlobalExportStartupEvent = NULL; ReplGlobalImportStartupEvent = NULL; ReplGlobalClientThreadHandle = NULL; ReplGlobalMasterThreadHandle = NULL; ReplGlobalUninstallUicCode = 0; ReplGlobalIsServiceStopping = FALSE; ReplGlobalCheckpoint = 1; // Init lock for config data. ReplConfigLock = NetpCreateLock( CONFIG_DATA_LOCK_LEVEL, (LPTSTR) TEXT("config data") ); NetpAssert( ReplConfigLock != NULL ); // Init client list lock (needed by import lock/unlock APIs even if // import side not running). Ditto for master list lock. RCGlobalClientListLock = NetpCreateLock( CLIENT_LIST_LOCK_LEVEL, (LPTSTR) TEXT("client list") ); NetpAssert( RCGlobalClientListLock != NULL ); RCGlobalDuplListLock = NetpCreateLock( DUPL_LIST_LOCK_LEVEL, (LPTSTR) TEXT("dupl list") ); NetpAssert( RCGlobalDuplListLock != NULL ); RMGlobalListLock = NetpCreateLock( MASTER_LIST_LOCK_LEVEL, (LPTSTR) TEXT("master list") ); NetpAssert( RMGlobalListLock != NULL); // BUGBUG: out of memory? // Create startup events. ReplGlobalExportStartupEvent = CreateEvent(NULL, TRUE, FALSE, NULL); if( ReplGlobalExportStartupEvent == NULL) { return (GetLastError()); } ReplGlobalImportStartupEvent = CreateEvent(NULL, TRUE, FALSE, NULL); if( ReplGlobalImportStartupEvent == NULL) { return (GetLastError()); } // // Create termination events. // ReplGlobalClientTerminateEvent = CreateEvent(NULL, TRUE, FALSE, NULL); if( ReplGlobalClientTerminateEvent == NULL) { return (GetLastError()); } ReplGlobalMasterTerminateEvent = CreateEvent(NULL, TRUE, FALSE, NULL); if (ReplGlobalMasterTerminateEvent == NULL) { return (GetLastError()); } // // Disable the dreaded "net name deleted" popup (and all other hard // error popups). // (VOID) SetErrorMode( SEM_FAILCRITICALERRORS ); // // Get local domain name, computer name, etc. // NetStatus = GetLocalInfo(); if (NetStatus != NO_ERROR) { return (NetStatus); } return (NO_ERROR); }
uint32_t Scheduler::Run() { ThreadLocalInfo& info = GetLocalInfo(); info.current_task = NULL; uint32_t do_max_count = runnale_task_count_; uint32_t do_count = 0; Debug("Run --------------------------"); // 每次Run执行的协程数量不能多于当前runnable协程数量 // 以防wait状态的协程得不到执行。 while (do_count < do_max_count) { uint32_t cnt = std::max((uint32_t)1, std::min( do_max_count / GetOptions().chunk_count, GetOptions().max_chunk_size)); Debug("want pop %u tasks.", cnt); SList<Task> slist = run_task_.pop(cnt); Debug("really pop %u tasks.", cnt); if (slist.empty()) break; SList<Task>::iterator it = slist.begin(); while (it != slist.end()) { Task* tk = &*it; info.current_task = tk; Debug("enter task(%llu)", tk->id_); swapcontext(&info.scheduler, &tk->ctx_); ++do_count; Debug("exit task(%llu) state=%d", tk->id_, tk->state_); info.current_task = NULL; switch (tk->state_) { case TaskState::runnable: ++it; break; case TaskState::io_block: case TaskState::sync_block: --runnale_task_count_; it = slist.erase(it); wait_task_.push(tk); break; case TaskState::done: default: --task_count_; --runnale_task_count_; it = slist.erase(it); delete tk; break; } } Debug("push %d task return to runnable list", slist.size()); run_task_.push(slist); } static thread_local epoll_event evs[1024]; int n = epoll_wait(epoll_fd, evs, 1024, 1); Debug("do_count=%u, do epoll event, n = %d", do_count, n); for (int i = 0; i < n; ++i) { Task* tk = (Task*)evs[i].data.ptr; if (tk->unlink()) AddTask(tk); } return do_count; }
bool Scheduler::IsCoroutine() { return !!GetLocalInfo().current_task; }