void enqueue_result (int req_id, const char ** results, const int argc) { std::string buffer; // how is this legit??!? ZKM formatstr( buffer, "%d", req_id ); for ( int i = 0; i < argc; i++ ) { buffer += ' '; if ( results[i] == NULL ) { buffer += "NULL"; } else { for ( int j = 0; results[i][j] != '\0'; j++ ) { switch ( results[i][j] ) { case ' ': case '\\': case '\r': case '\n': buffer += '\\'; default: buffer += results[i][j]; } } } } handle_results( buffer ); }
void enqueue_result (const std::string &req_id, const char ** results, const int argc) { std::string buffer; buffer = req_id; for ( int i = 0; i < argc; i++ ) { buffer += ' '; if ( results[i] == NULL ) { buffer += "NULL"; } else { for ( int j = 0; results[i][j] != '\0'; j++ ) { switch ( results[i][j] ) { case ' ': case '\\': case '\r': case '\n': buffer += '\\'; default: buffer += results[i][j]; } } } } handle_results( buffer ); }
MI_Result OMIInterface::Enumerate ( std::basic_ostream<char_t, traits>& strm, std::vector<std::pair <char_t const*, char_t const*> >& enumItems) { typedef std::pair<char_t const*, char_t const*> EnumItem_t; MI_Result result = MI_RESULT_OK; bool addSeparator = false; strm << JSON_LIST_START; for (typename std::vector<EnumItem_t>::iterator pos = enumItems.begin (), endPos = enumItems.end (); pos != endPos; ++pos) { MI_Uint32 flags = 0; MI_Operation operation = MI_OPERATION_NULL; MI_Session_EnumerateInstances ( &m_Session, flags, &m_Options, pos->first, pos->second, MI_FALSE, NULL, &operation); if (addSeparator) { strm << JSON_SEPARATOR; } addSeparator = (0 < handle_results (strm, &operation)); MI_Operation_Close (&operation); } strm << JSON_LIST_END; return result; }
void enqueue() { // Errors in this function can not be handled by opencl_err.hpp // because they require non-standard error handling CAF_LOG_TRACE("command::enqueue()"); this->ref(); // reference held by the OpenCL comand queue cl_event event_k; auto data_or_nullptr = [](const dim_vec& vec) { return vec.empty() ? nullptr : vec.data(); }; // OpenCL expects cl_uint (unsigned int), hence the cast cl_int err = clEnqueueNDRangeKernel( queue_.get(), actor_facade_->kernel_.get(), static_cast<cl_uint>(actor_facade_->config_.dimensions().size()), data_or_nullptr(actor_facade_->config_.offsets()), data_or_nullptr(actor_facade_->config_.dimensions()), data_or_nullptr(actor_facade_->config_.local_dimensions()), static_cast<cl_uint>(mem_in_events_.size()), (mem_in_events_.empty() ? nullptr : mem_in_events_.data()), &event_k ); if (err != CL_SUCCESS) { CAF_LOGMF(CAF_ERROR, "clEnqueueNDRangeKernel: " << get_opencl_error(err)); clReleaseEvent(event_k); this->deref(); return; } else { enqueue_read_buffers(event_k, detail::get_indices(result_buffers_)); cl_event marker; #if defined(__APPLE__) err = clEnqueueMarkerWithWaitList( queue_.get(), static_cast<cl_uint>(mem_out_events_.size()), mem_out_events_.data(), &marker ); #else err = clEnqueueMarker(queue_.get(), &marker); #endif if (err != CL_SUCCESS) { CAF_LOGMF(CAF_ERROR, "clSetEventCallback: " << get_opencl_error(err)); clReleaseEvent(marker); clReleaseEvent(event_k); this->deref(); // callback is not set return; } err = clSetEventCallback(marker, CL_COMPLETE, [](cl_event, cl_int, void* data) { auto cmd = reinterpret_cast<command*>(data); cmd->handle_results(); cmd->deref(); }, this); if (err != CL_SUCCESS) { CAF_LOGMF(CAF_ERROR, "clSetEventCallback: " << get_opencl_error(err)); clReleaseEvent(marker); clReleaseEvent(event_k); this->deref(); // callback is not set return; } err = clFlush(queue_.get()); if (err != CL_SUCCESS) { CAF_LOGMF(CAF_ERROR, "clFlush: " << get_opencl_error(err)); } mem_out_events_.push_back(std::move(event_k)); mem_out_events_.push_back(std::move(marker)); } }
void enqueue () { CPPA_LOG_TRACE("command::enqueue()"); this->ref(); // reference held by the OpenCL comand queue cl_int err{0}; cl_event event_k; auto data_or_nullptr = [](const dim_vec& vec) { return vec.empty() ? nullptr : vec.data(); }; err = clEnqueueNDRangeKernel(m_queue.get(), m_actor_facade->m_kernel.get(), m_actor_facade->m_global_dimensions.size(), data_or_nullptr(m_actor_facade->m_global_offsets), data_or_nullptr(m_actor_facade->m_global_dimensions), data_or_nullptr(m_actor_facade->m_local_dimensions), m_events.size(), (m_events.empty() ? nullptr : m_events.data()), &event_k); if (err != CL_SUCCESS) { CPPA_LOGMF(CPPA_ERROR, "clEnqueueNDRangeKernel: " << get_opencl_error(err)); this->deref(); // or can anything actually happen? return; } else { cl_event event_r; err = clEnqueueReadBuffer(m_queue.get(), m_arguments.back().get(), CL_FALSE, 0, sizeof(typename R::value_type) * m_result_size, m_result.data(), 1, &event_k, &event_r); if (err != CL_SUCCESS) { throw std::runtime_error("clEnqueueReadBuffer: " + get_opencl_error(err)); this->deref(); // failed to enqueue command return; } err = clSetEventCallback(event_r, CL_COMPLETE, [](cl_event, cl_int, void* data) { auto cmd = reinterpret_cast<command*>(data); cmd->handle_results(); cmd->deref(); }, this); if (err != CL_SUCCESS) { CPPA_LOGMF(CPPA_ERROR, "clSetEventCallback: " << get_opencl_error(err)); this->deref(); // callback is not set return; } err = clFlush(m_queue.get()); if (err != CL_SUCCESS) { CPPA_LOGMF(CPPA_ERROR, "clFlush: " << get_opencl_error(err)); } m_events.push_back(std::move(event_k)); m_events.push_back(std::move(event_r)); } }
void process_request(char* code_sign_key) { PLATFORM* platform; int retval; double last_rpc_time, x; struct tm *rpc_time_tm; bool ok_to_send_work = !config.dont_send_jobs; bool have_no_work = false; char buf[256]; HOST initial_host; unsigned int i; time_t t; memset(&g_reply->wreq, 0, sizeof(g_reply->wreq)); // if client has sticky files we don't need any more, tell it // do_file_delete_regex(); // if different major version of BOINC, just send a message // if (wrong_core_client_version() || unacceptable_os() || unacceptable_cpu() ) { ok_to_send_work = false; } // if no jobs reported and none to send, return without accessing DB // if (!ok_to_send_work && !g_request->results.size()) { return; } warn_user_if_core_client_upgrade_scheduled(); if (requesting_work()) { if (config.locality_scheduling || config.locality_scheduler_fraction || config.enable_assignment) { have_no_work = false; } else { lock_sema(); have_no_work = ssp->no_work(g_pid); if (have_no_work) { g_wreq->no_jobs_available = true; } unlock_sema(); } } // If: // - there's no work, // - a config flag is set, // - client isn't returning results, // - this isn't an initial RPC, // - client is requesting work // then return without accessing the DB. // This is an efficiency hack for when servers are overloaded // if ( have_no_work && config.nowork_skip && requesting_work() && (g_request->results.size() == 0) && (g_request->hostid != 0) ) { g_reply->insert_message("No work available", "low"); g_reply->set_delay(DELAY_NO_WORK_SKIP); if (!config.msg_to_host && !config.enable_vda) { log_messages.printf(MSG_NORMAL, "No work - skipping DB access\n"); return; } } // FROM HERE ON DON'T RETURN; "goto leave" instead // (because ssp->no_work() may have tagged an entry in the work array // with our process ID) retval = open_database(); if (retval) { send_error_message("Server can't open database", 3600); g_reply->project_is_down = true; goto leave; } retval = authenticate_user(); if (retval) goto leave; if (g_reply->user.id == 0) { log_messages.printf(MSG_CRITICAL, "No user ID!\n"); } initial_host = g_reply->host; g_reply->host.rpc_seqno = g_request->rpc_seqno; g_reply->nucleus_only = false; log_request(); // is host blacklisted? // if (g_reply->host._max_results_day == -1) { send_error_message("Not accepting requests from this host", 86400); goto leave; } if (strlen(config.sched_lockfile_dir)) { int pid_with_lock = lock_sched(); if (pid_with_lock > 0) { log_messages.printf(MSG_CRITICAL, "Another scheduler instance [PID=%d] is running for this host\n", pid_with_lock ); } else if (pid_with_lock) { log_messages.printf(MSG_CRITICAL, "Error acquiring lock for [HOST#%d]\n", g_reply->host.id ); } if (pid_with_lock) { send_error_message( "Another scheduler instance is running for this host", 60 ); goto leave; } } // in deciding whether it's a new day, // add a random factor (based on host ID) // to smooth out network traffic over the day // retval = rand(); srand(g_reply->host.id); x = drand()*86400; srand(retval); last_rpc_time = g_reply->host.rpc_time; t = (time_t)(g_reply->host.rpc_time + x); rpc_time_tm = localtime(&t); g_request->last_rpc_dayofyear = rpc_time_tm->tm_yday; t = time(0); g_reply->host.rpc_time = t; t += (time_t)x; rpc_time_tm = localtime(&t); g_request->current_rpc_dayofyear = rpc_time_tm->tm_yday; retval = modify_host_struct(g_reply->host); // write time stats to disk if present // if (g_request->have_time_stats_log) { write_time_stats_log(); } // look up the client's platform(s) in the DB // platform = ssp->lookup_platform(g_request->platform.name); if (platform) g_request->platforms.list.push_back(platform); // if primary platform is anonymous, ignore alternate platforms // if (strcmp(g_request->platform.name, "anonymous")) { for (i=0; i<g_request->alt_platforms.size(); i++) { platform = ssp->lookup_platform(g_request->alt_platforms[i].name); if (platform) g_request->platforms.list.push_back(platform); } } if (g_request->platforms.list.size() == 0) { sprintf(buf, "%s %s", _("This project doesn't support computers of type"), g_request->platform.name ); g_reply->insert_message(buf, "notice"); log_messages.printf(MSG_CRITICAL, "[HOST#%d] platform '%s' not found\n", g_reply->host.id, g_request->platform.name ); g_reply->set_delay(DELAY_PLATFORM_UNSUPPORTED); goto leave; } handle_global_prefs(); read_host_app_versions(); update_n_jobs_today(); handle_results(); handle_file_xfer_results(); if (config.enable_vda) { handle_vda(); } // Do this before resending lost jobs // if (bad_install_type()) { ok_to_send_work = false; } if (!requesting_work()) { ok_to_send_work = false; } send_work_setup(); if (g_request->have_other_results_list) { if (ok_to_send_work && (config.resend_lost_results || g_wreq->resend_lost_results) && !g_request->results_truncated ) { if (resend_lost_work()) { ok_to_send_work = false; } } if (config.send_result_abort) { send_result_abort(); } } if (requesting_work()) { if (!send_code_sign_key(code_sign_key)) { ok_to_send_work = false; } if (have_no_work) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] No jobs in shmem cache\n" ); } } // if last RPC was within config.min_sendwork_interval, don't send work // if (!have_no_work && ok_to_send_work) { if (config.min_sendwork_interval) { double diff = dtime() - last_rpc_time; if (diff < config.min_sendwork_interval) { ok_to_send_work = false; log_messages.printf(MSG_NORMAL, "Not sending work - last request too recent: %f\n", diff ); sprintf(buf, "Not sending work - last request too recent: %d sec", (int)diff ); g_reply->insert_message(buf, "low"); // the 1.01 is in case client's clock // is slightly faster than ours // g_reply->set_delay(1.01*config.min_sendwork_interval); } } if (ok_to_send_work) { send_work(); } } if (g_wreq->no_jobs_available) { g_reply->insert_message("Project has no tasks available", "low"); } } handle_msgs_from_host(); if (config.msg_to_host) { handle_msgs_to_host(); } update_host_record(initial_host, g_reply->host, g_reply->user); write_host_app_versions(); leave: if (!have_no_work) { ssp->restore_work(g_pid); } }