void CMainDocument::HandleCompletedRPC() { int retval = 0; wxMutexError mutexErr = wxMUTEX_NO_ERROR; int i, n, requestIndex = -1; bool stillWaitingForPendingRequests = false; if (!m_RPCThread) return; if (current_rpc_request.isActive) return; // We can get here either via a CRPCFinishedEvent event posted // by the RPC thread or by a call from RequestRPC. If we were // called from RequestRPC, the CRPCFinishedEvent will still be // on the event queue, so we get called twice. Check for this here. if (current_rpc_request.which_rpc == 0) return; // already handled by a call from RequestRPC // Find our completed request in the queue n = (int) RPC_requests.size(); for (i=0; i<n; ++i) { if (RPC_requests[i].isSameAs(current_rpc_request)) { requestIndex = i; } else { if (RPC_requests[i].rpcType == RPC_TYPE_WAIT_FOR_COMPLETION) { stillWaitingForPendingRequests = true; } } } if (! stillWaitingForPendingRequests) { if (m_RPCWaitDlg) { if (m_RPCWaitDlg->IsShown()) { m_RPCWaitDlg->EndModal(wxID_OK); } m_RPCWaitDlg->Destroy(); m_RPCWaitDlg = NULL; } m_bWaitingForRPC = false; } if (requestIndex >= 0) { // Remove completed request from the queue RPC_requests.erase(RPC_requests.begin()+requestIndex); } retval = current_rpc_request.retval; if (current_rpc_request.completionTime) { *(current_rpc_request.completionTime) = wxDateTime::Now(); } if (current_rpc_request.resultPtr) { *(current_rpc_request.resultPtr) = retval; } // Post-processing if (! retval) { if (current_rpc_request.rpcType == RPC_TYPE_ASYNC_WITH_REFRESH_AFTER) { if (!retval) { m_bNeedRefresh = true; } } if (current_rpc_request.rpcType == RPC_TYPE_ASYNC_WITH_UPDATE_TASKBAR_ICON_AFTER) { if (!retval) { m_bNeedTaskBarRefresh = true; } } switch (current_rpc_request.which_rpc) { case RPC_GET_STATE: if (current_rpc_request.exchangeBuf && !retval) { CC_STATE* arg1 = (CC_STATE*)current_rpc_request.arg1; CC_STATE* exchangeBuf = (CC_STATE*)current_rpc_request.exchangeBuf; arg1->projects.swap(exchangeBuf->projects); arg1->apps.swap(exchangeBuf->apps); arg1->app_versions.swap(exchangeBuf->app_versions); arg1->wus.swap(exchangeBuf->wus); arg1->results.swap(exchangeBuf->results); exchangeBuf->global_prefs = arg1->global_prefs; exchangeBuf->version_info = arg1->version_info; exchangeBuf->executing_as_daemon = arg1->executing_as_daemon; exchangeBuf->host_info = arg1->host_info; exchangeBuf->time_stats = arg1->time_stats; exchangeBuf->have_nvidia = arg1->have_nvidia; exchangeBuf->have_ati = arg1->have_ati; } break; case RPC_GET_RESULTS: if (current_rpc_request.exchangeBuf && !retval) { RESULTS* arg1 = (RESULTS*)current_rpc_request.arg1; RESULTS* exchangeBuf = (RESULTS*)current_rpc_request.exchangeBuf; arg1->results.swap(exchangeBuf->results); } break; case RPC_GET_FILE_TRANSFERS: if (current_rpc_request.exchangeBuf && !retval) { FILE_TRANSFERS* arg1 = (FILE_TRANSFERS*)current_rpc_request.arg1; FILE_TRANSFERS* exchangeBuf = (FILE_TRANSFERS*)current_rpc_request.exchangeBuf; arg1->file_transfers.swap(exchangeBuf->file_transfers); } break; case RPC_GET_SIMPLE_GUI_INFO2: if (!retval) { retval = CopyProjectsToStateBuffer(*(PROJECTS*)(current_rpc_request.arg1), *(CC_STATE*)(current_rpc_request.arg2)); } if (current_rpc_request.exchangeBuf && !retval) { RESULTS* arg3 = (RESULTS*)current_rpc_request.arg3; RESULTS* exchangeBuf = (RESULTS*)current_rpc_request.exchangeBuf; arg3->results.swap(exchangeBuf->results); } break; case RPC_GET_PROJECT_STATUS1: if (!retval) { retval = CopyProjectsToStateBuffer(*(PROJECTS*)(current_rpc_request.arg1), *(CC_STATE*)(current_rpc_request.arg2)); } break; case RPC_GET_ALL_PROJECTS_LIST: if (current_rpc_request.exchangeBuf && !retval) { ALL_PROJECTS_LIST* arg1 = (ALL_PROJECTS_LIST*)current_rpc_request.arg1; ALL_PROJECTS_LIST* exchangeBuf = (ALL_PROJECTS_LIST*)current_rpc_request.exchangeBuf; arg1->projects.swap(exchangeBuf->projects); } break; case RPC_GET_DISK_USAGE: if (current_rpc_request.exchangeBuf && !retval) { DISK_USAGE* arg1 = (DISK_USAGE*)current_rpc_request.arg1; DISK_USAGE* exchangeBuf = (DISK_USAGE*)current_rpc_request.exchangeBuf; arg1->projects.swap(exchangeBuf->projects); exchangeBuf->d_total = arg1->d_total; exchangeBuf->d_free = arg1->d_free; exchangeBuf->d_boinc = arg1->d_boinc; exchangeBuf->d_allowed = arg1->d_allowed; } break; case RPC_GET_NOTICES: if (current_rpc_request.exchangeBuf && !retval) { NOTICES* arg2 = (NOTICES*)current_rpc_request.arg2; NOTICES* exchangeBuf = (NOTICES*)current_rpc_request.exchangeBuf; arg2->notices.swap(exchangeBuf->notices); } if (!retval) { CachedNoticeUpdate(); // Call this only when notice buffer is stable } m_bWaitingForGetNoticesRPC = false; break; case RPC_GET_MESSAGES: if (current_rpc_request.exchangeBuf && !retval) { MESSAGES* arg2 = (MESSAGES*)current_rpc_request.arg2; MESSAGES* exchangeBuf = (MESSAGES*)current_rpc_request.exchangeBuf; arg2->messages.swap(exchangeBuf->messages); } if (!retval) { CachedMessageUpdate(); // Call this only when message buffer is stable } break; case RPC_GET_HOST_INFO: if (current_rpc_request.exchangeBuf && !retval) { HOST_INFO* arg1 = (HOST_INFO*)current_rpc_request.arg1; HOST_INFO* exchangeBuf = (HOST_INFO*)current_rpc_request.exchangeBuf; *exchangeBuf = *arg1; } break; case RPC_GET_STATISTICS: if (current_rpc_request.exchangeBuf && !retval) { PROJECTS* arg1 = (PROJECTS*)current_rpc_request.arg1; PROJECTS* exchangeBuf = (PROJECTS*)current_rpc_request.exchangeBuf; arg1->projects.swap(exchangeBuf->projects); } break; case RPC_GET_CC_STATUS: if (current_rpc_request.exchangeBuf && !retval) { CC_STATUS* arg1 = (CC_STATUS*)current_rpc_request.arg1; CC_STATUS* exchangeBuf = (CC_STATUS*)current_rpc_request.exchangeBuf; *exchangeBuf = *arg1; } break; case RPC_ACCT_MGR_INFO: if (current_rpc_request.exchangeBuf && !retval) { ACCT_MGR_INFO* arg1 = (ACCT_MGR_INFO*)current_rpc_request.arg1; ACCT_MGR_INFO* exchangeBuf = (ACCT_MGR_INFO*)current_rpc_request.exchangeBuf; *exchangeBuf = *arg1; } break; default: // We don't support double buffering for other RPC calls wxASSERT(current_rpc_request.exchangeBuf == NULL); break; } } if (current_rpc_request.resultPtr) { // In case post-processing changed retval *(current_rpc_request.resultPtr) = retval; } // We must call ProcessEvent() rather than AddPendingEvent() here to // guarantee integrity of data when other events are handled (such as // Abort, Suspend/Resume, Show Graphics, Update, Detach, Reset, No // New Work, etc.) Otherwise, if one of those events is pending it // might be processed first, and the data in the selected rows may not // match the data which the user selected if any rows were added or // deleted due to the RPC. // The refresh event called here adjusts the selections to fix any // such mismatch before other pending events are processed. // // However, the refresh code may itself request a Demand RPC, which // would cause undesirable recursion if we are already waiting for // another Demand RPC to complete. In that case, we defer the refresh // until all pending Demand RPCs have been done. // if (m_bNeedRefresh && !m_bWaitingForRPC) { m_bNeedRefresh = false; // We must get the frame immediately before using it, // since it may have been changed by SetActiveGUI(). CBOINCBaseFrame* pFrame = wxGetApp().GetFrame(); if (pFrame) { CFrameEvent event(wxEVT_FRAME_REFRESHVIEW, pFrame); pFrame->GetEventHandler()->ProcessEvent(event); } } if (m_bNeedTaskBarRefresh && !m_bWaitingForRPC) { m_bNeedTaskBarRefresh = false; CTaskBarIcon* pTaskbar = wxGetApp().GetTaskBarIcon(); if (pTaskbar) { CTaskbarEvent event(wxEVT_TASKBAR_REFRESH, pTaskbar); pTaskbar->ProcessEvent(event); } } if (current_rpc_request.rpcType == RPC_TYPE_ASYNC_WITH_REFRESH_EVENT_LOG_AFTER) { CDlgEventLog* eventLog = wxGetApp().GetEventLog(); if (eventLog) { eventLog->OnRefresh(); } } current_rpc_request.clear(); // Start the next RPC request. // We can't start this until finished processing the previous RPC's // event because the two requests may write into the same buffer. if (RPC_requests.size() > 0) { // Wait for thread to unlock mutex with m_pRPC_Thread_Condition->Wait() mutexErr = m_pRPC_Thread_Mutex->Lock(); // Blocks until thread unlocks the mutex wxASSERT(mutexErr == wxMUTEX_NO_ERROR); // Make sure activation is an atomic operation RPC_requests[0].isActive = false; current_rpc_request = RPC_requests[0]; current_rpc_request.isActive = true; m_pRPC_Thread_Condition->Signal(); // Unblock the thread // m_pRPC_Thread_Condition->Wait() will Lock() the mutex upon receiving Signal(), // causing it to block again if we still have our lock on the mutex. mutexErr = m_pRPC_Thread_Mutex->Unlock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); } }
int CMainDocument::RequestRPC(ASYNC_RPC_REQUEST& request, bool hasPriority) { std::vector<ASYNC_RPC_REQUEST>::iterator iter; int retval = 0; int response = wxID_OK; wxMutexError mutexErr = wxMUTEX_NO_ERROR; long delayTimeRemaining, timeToSleep; bool shown = false; if (!m_RPCThread) return -1; if ( (request.rpcType < RPC_TYPE_WAIT_FOR_COMPLETION) || (request.rpcType >= NUM_RPC_TYPES) ) { wxASSERT(false); return -1; } // If we are quitting, cancel any pending RPCs if (request.which_rpc == RPC_QUIT) { if (current_rpc_request.isActive) { RPC_requests.erase(RPC_requests.begin()+1, RPC_requests.end()); } else { RPC_requests.clear(); } } // Check if a duplicate request is already on the queue for (iter=RPC_requests.begin(); iter!=RPC_requests.end(); iter++) { if (iter->isSameAs(request)) { return 0; } } if ((request.rpcType == RPC_TYPE_WAIT_FOR_COMPLETION) && (request.resultPtr == NULL)) { request.resultPtr = &retval; } if (hasPriority) { // We may want to set hasPriority for some user-initiated events. // Since the user is waiting, insert this at head of request queue. // As of 8/14/08, hasPriority is never set true, so hasn't been tested. iter = RPC_requests.insert(RPC_requests.begin(), request); } else { RPC_requests.push_back(request); } // Start this RPC if no other RPC is already in progress. if (RPC_requests.size() == 1) { // Wait for thread to unlock mutex with m_pRPC_Thread_Condition->Wait() mutexErr = m_pRPC_Thread_Mutex->Lock(); // Blocks until thread unlocks the mutex wxASSERT(mutexErr == wxMUTEX_NO_ERROR); // Make sure activation is an atomic operation request.isActive = false; current_rpc_request = request; current_rpc_request.isActive = true; m_pRPC_Thread_Condition->Signal(); // Unblock the thread // m_pRPC_Thread_Condition->Wait() will Lock() the mutex upon receiving Signal(), // causing it to block again if we still have our lock on the mutex. mutexErr = m_pRPC_Thread_Mutex->Unlock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); } // If this is a user-initiated event wait for completion but show // a dialog allowing the user to cancel. if (request.rpcType == RPC_TYPE_WAIT_FOR_COMPLETION) { // TODO: proper handling if a second user request is received while first is pending ?? if (m_bWaitingForRPC) { wxLogMessage(wxT("Second user RPC request while another was pending")); wxASSERT(false); return -1; } // Don't show dialog if RPC completes before RPC_WAIT_DLG_DELAY // or while BOINC is minimized CBOINCBaseFrame* pFrame = wxGetApp().GetFrame(); wxStopWatch Dlgdelay = wxStopWatch(); m_RPCWaitDlg = new AsyncRPCDlg(); m_bWaitingForRPC = true; // Allow RPC_WAIT_DLG_DELAY seconds for Demand RPC to complete before // displaying "Please Wait" dialog, but keep checking for completion. delayTimeRemaining = RPC_WAIT_DLG_DELAY; while (true) { if (delayTimeRemaining >= 0) { // Prevent overflow if minimized for a very long time delayTimeRemaining = RPC_WAIT_DLG_DELAY - Dlgdelay.Time(); } if (pFrame) { shown = pFrame->IsShown(); } else { shown = false; } if (shown) { if (delayTimeRemaining <= 0) break; // Display the Please Wait dialog timeToSleep = delayTimeRemaining; } else { // Don't show dialog while Manager is minimized, but do // process events so user can maximize the manager. // // NOTE: CBOINCGUIApp::FilterEvent() discards those events // which might cause posting of more RPC requests while // we are in this loop, to prevent undesirable recursion. // Since the manager is minimized, we don't have to worry about // discarding crucial drawing or command events. // The filter does allow the the Open Manager menu item from // the system tray icon and wxEVT_RPC_FINISHED event. // timeToSleep = DELAY_WHEN_MINIMIZED; // Allow user to maximize Manager wxSafeYield(NULL, true); } // OnRPCComplete() clears m_bWaitingForRPC if RPC completed if (! m_bWaitingForRPC) { return retval; } mutexErr = m_pRPC_Request_Mutex->Lock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); // Simulate handling of CRPCFinishedEvent but don't allow any other // events (so no user activity) to prevent undesirable recursion. // Since we don't need to filter and discard events, they remain on // the queue until it is safe to process them. // Allow RPC thread to run while we wait for it. if (!current_rpc_request.isActive) { mutexErr = m_pRPC_Request_Mutex->Unlock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); HandleCompletedRPC(); continue; } // Wait for RPC thread to wake us // This does the following: // (1) Unlocks the Mutex and puts the main thread to sleep as an atomic operation. // (2) On Signal from RPC thread: locks Mutex again and wakes the main thread. m_pRPC_Request_Condition->WaitTimeout(timeToSleep); mutexErr = m_pRPC_Request_Mutex->Unlock(); wxASSERT(mutexErr == wxMUTEX_NO_ERROR); } // Demand RPC has taken longer than RPC_WAIT_DLG_DELAY seconds and // Manager is not minimized, so display the "Please Wait" dialog // with a Cancel button. If the RPC does complete while the dialog // is up, HandleCompletedRPC() will call EndModal with wxID_OK. // // NOTE: the Modal dialog permits processing of all events, but // CBOINCGUIApp::FilterEvent() blocks those events which might cause // posting of more RPC requests while in this dialog, to prevent // undesirable recursion. // if (m_RPCWaitDlg) { response = m_RPCWaitDlg->ShowModal(); // Remember time the dialog was closed for use by RunPeriodicRPCs() m_dtLasAsyncRPCDlgTime = wxDateTime::Now(); if (response != wxID_OK) { // TODO: If user presses Cancel in Please Wait dialog but request // has not yet been started, should we just remove it from queue? // If we make that change, should we also add a separate menu item // to reset the RPC connection (or does one already exist)? retval = -1; // If the RPC continues to get data after we return to // our caller, it may try to write into a buffer or struct // which the caller has already deleted. To prevent this, // we close the socket (disconnect) and kill the RPC thread. // This is ugly but necessary. We must then reconnect and // start a new RPC thread. if (current_rpc_request.isActive) { current_rpc_request.isActive = false; rpcClient.close(); RPC_requests.clear(); current_rpc_request.clear(); m_bNeedRefresh = false; m_bNeedTaskBarRefresh = false; // We will be reconnected to the same client (if possible) by // CBOINCDialUpManager::OnPoll() and CNetworkConnection::Poll(). m_pNetworkConnection->SetStateDisconnected(); } if (response == wxID_EXIT) { pFrame = wxGetApp().GetFrame(); wxCommandEvent evt(wxEVT_COMMAND_MENU_SELECTED, wxID_EXIT); s_bSkipExitConfirmation = true; pFrame->GetEventHandler()->AddPendingEvent(evt); } } if (m_RPCWaitDlg) { m_RPCWaitDlg->Destroy(); } m_RPCWaitDlg = NULL; m_bWaitingForRPC = false; } } return retval; }