Beispiel #1
0
// Handle the reply from a scheduler
//
int CLIENT_STATE::handle_scheduler_reply(
    PROJECT* project, char* scheduler_url
) {
    SCHEDULER_REPLY sr;
    FILE* f;
    int retval;
    unsigned int i;
    bool signature_valid, update_global_prefs=false, update_project_prefs=false;
    char buf[1024], filename[256];
    std::string old_gui_urls = project->gui_urls;
    PROJECT* p2;
    vector<RESULT*>new_results;

    project->last_rpc_time = now;

    if (requested_work()) {
        had_or_requested_work = true;
    }

    get_sched_reply_filename(*project, filename, sizeof(filename));

    f = fopen(filename, "r");
    if (!f) return ERR_FOPEN;
    retval = sr.parse(f, project);
    fclose(f);
    if (retval) return retval;

    if (log_flags.sched_ops) {
        if (requested_work()) {
            sprintf(buf, ": got %d new tasks", (int)sr.results.size());
        } else {
            strcpy(buf, "");
        }
        msg_printf(project, MSG_INFO, "Scheduler request completed%s", buf);
    }
    if (log_flags.sched_op_debug) {
        if (sr.scheduler_version) {
            msg_printf(project, MSG_INFO,
                "[sched_op] Server version %d",
                sr.scheduler_version
            );
        }
    }

    // check that master URL is correct
    //
    if (strlen(sr.master_url)) {
        canonicalize_master_url(sr.master_url);
        string url1 = sr.master_url;
        string url2 = project->master_url;
        downcase_string(url1);
        downcase_string(url2);
        if (url1 != url2) {
            p2 = lookup_project(sr.master_url);
            if (p2) {
                msg_printf(project, MSG_USER_ALERT,
                    "You are attached to this project twice.  Please remove projects named %s, then add %s",
                    project->project_name,
                    sr.master_url
                );
            } else {
                msg_printf(project, MSG_INFO,
                    _("You used the wrong URL for this project.  When convenient, remove this project, then add %s"),
                    sr.master_url
                );
            }
        }
    }

    // make sure we don't already have a project of same name
    //
    bool dup_name = false;
    for (i=0; i<projects.size(); i++) {
        p2 = projects[i];
        if (project == p2) continue;
        if (!strcmp(p2->project_name, project->project_name)) {
            dup_name = true;
            break;
        }
    }
    if (dup_name) {
        msg_printf(project, MSG_INFO,
            "Already attached to a project named %s (possibly with wrong URL)",
            project->project_name
        );
        msg_printf(project, MSG_INFO,
            "Consider detaching this project, then trying again"
        );
    }

    // show messages from server
    //
    for (i=0; i<sr.messages.size(); i++) {
        USER_MESSAGE& um = sr.messages[i];
        int prio = (!strcmp(um.priority.c_str(), "notice"))?MSG_SCHEDULER_ALERT:MSG_INFO;
        string_substitute(um.message.c_str(), buf, sizeof(buf), "%", "%%");
        msg_printf(project, prio, "%s", buf);
    }

    if (log_flags.sched_op_debug && sr.request_delay) {
        msg_printf(project, MSG_INFO,
            "Project requested delay of %.0f seconds", sr.request_delay
        );
    }

    // if project is down, return error (so that we back off)
    // and don't do anything else
    //
    if (sr.project_is_down) {
        if (sr.request_delay) {
            double x = now + sr.request_delay;
            project->set_min_rpc_time(x, "project is down");
        }
        return ERR_PROJECT_DOWN;
    }

    // if the scheduler reply includes global preferences,
    // insert extra elements, write to disk, and parse
    //
    if (sr.global_prefs_xml) {
        // skip this if we have host-specific prefs
        // and we're talking to an old scheduler
        //
        if (!global_prefs.host_specific || sr.scheduler_version >= 507) {
            retval = save_global_prefs(
                sr.global_prefs_xml, project->master_url, scheduler_url
            );
            if (retval) {
                return retval;
            }
            update_global_prefs = true;
        } else {
            if (log_flags.sched_op_debug) {
                msg_printf(project, MSG_INFO,
                    "ignoring prefs from old server; we have host-specific prefs"
                );
            }
        }
    }

    // see if we have a new venue from this project
    // (this must go AFTER the above, since otherwise
    // global_prefs_source_project() is meaningless)
    //
    if (strcmp(project->host_venue, sr.host_venue)) {
        safe_strcpy(project->host_venue, sr.host_venue);
        msg_printf(project, MSG_INFO, "New computer location: %s", sr.host_venue);
        update_project_prefs = true;
        if (project == global_prefs_source_project()) {
            strcpy(main_host_venue, sr.host_venue);
            update_global_prefs = true;
        }
    }

    if (update_global_prefs) {
        read_global_prefs();
    }

    // deal with project preferences (should always be there)
    // If they've changed, write to account file,
    // then parse to get our venue, and pass to running apps
    //
    if (sr.project_prefs_xml) {
        if (strcmp(project->project_prefs.c_str(), sr.project_prefs_xml)) {
            project->project_prefs = string(sr.project_prefs_xml);
            update_project_prefs = true;
        }
    }

    // the account file has GUI URLs and project prefs.
    // rewrite if either of these has changed
    //
    if (project->gui_urls != old_gui_urls || update_project_prefs) {
        retval = project->write_account_file();
        if (retval) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "Can't write account file: %s", boincerror(retval)
            );
            return retval;
        }
    }

    if (update_project_prefs) {
        project->parse_account_file();
        if (strlen(project->host_venue)) {
            project->parse_account_file_venue();
        }
        project->parse_preferences_for_user_files();
        active_tasks.request_reread_prefs(project);
    }

    // if the scheduler reply includes a code-signing key,
    // accept it if we don't already have one from the project.
    // Otherwise verify its signature, using the key we already have.
    //

    if (sr.code_sign_key) {
        if (!strlen(project->code_sign_key)) {
            safe_strcpy(project->code_sign_key, sr.code_sign_key);
        } else {
            if (sr.code_sign_key_signature) {
                retval = check_string_signature2(
                    sr.code_sign_key, sr.code_sign_key_signature,
                    project->code_sign_key, signature_valid
                );
                if (!retval && signature_valid) {
                    safe_strcpy(project->code_sign_key, sr.code_sign_key);
                } else {
                    msg_printf(project, MSG_INTERNAL_ERROR,
                        "New code signing key doesn't validate"
                    );
                }
            } else {
                msg_printf(project, MSG_INTERNAL_ERROR,
                    "Missing code sign key signature"
                );
            }
        }
    }

    // copy new entities to client state
    //
    for (i=0; i<sr.apps.size(); i++) {
        APP* app = lookup_app(project, sr.apps[i].name);
        if (app) {
            strcpy(app->user_friendly_name, sr.apps[i].user_friendly_name);
        } else {
            app = new APP;
            *app = sr.apps[i];
            retval = link_app(project, app);
            if (retval) {
                msg_printf(project, MSG_INTERNAL_ERROR,
                    "Can't handle application %s in scheduler reply", app->name
                );
                delete app;
            } else {
                apps.push_back(app);
            }
        }
    }
    FILE_INFO* fip;
    for (i=0; i<sr.file_infos.size(); i++) {
        fip = lookup_file_info(project, sr.file_infos[i].name);
        if (fip) {
            fip->merge_info(sr.file_infos[i]);
        } else {
            fip = new FILE_INFO;
            *fip = sr.file_infos[i];
            retval = link_file_info(project, fip);
            if (retval) {
                msg_printf(project, MSG_INTERNAL_ERROR,
                    "Can't handle file %s in scheduler reply", fip->name
                );
                delete fip;
            } else {
                file_infos.push_back(fip);
            }
        }
    }
    for (i=0; i<sr.file_deletes.size(); i++) {
        fip = lookup_file_info(project, sr.file_deletes[i].c_str());
        if (fip) {
            if (log_flags.file_xfer_debug) {
                msg_printf(project, MSG_INFO,
                    "[file_xfer_debug] Got server request to delete file %s",
                    fip->name
                );
            }
            fip->sticky = false;
        }
    }
    for (i=0; i<sr.app_versions.size(); i++) {
        if (project->anonymous_platform) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "App version returned from anonymous platform project; ignoring"
            );
            continue;
        }
        APP_VERSION& avpp = sr.app_versions[i];
        if (strlen(avpp.platform) == 0) {
            strcpy(avpp.platform, get_primary_platform());
        } else {
            if (!is_supported_platform(avpp.platform)) {
                msg_printf(project, MSG_INTERNAL_ERROR,
                    "App version has unsupported platform %s", avpp.platform
                );
                continue;
            }
        }
        if (avpp.missing_coproc) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "App version uses non-existent %s GPU",
                avpp.missing_coproc_name
            );
        }
        APP* app = lookup_app(project, avpp.app_name);
        if (!app) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "Missing app %s", avpp.app_name
            );
            continue;
        }
        APP_VERSION* avp = lookup_app_version(
            app, avpp.platform, avpp.version_num, avpp.plan_class
        );
        if (avp) {
            // update performance-related info;
            // generally this shouldn't change,
            // but if it does it's better to use the new stuff
            //
            avp->avg_ncpus = avpp.avg_ncpus;
            avp->max_ncpus = avpp.max_ncpus;
            avp->flops = avpp.flops;
            strcpy(avp->cmdline, avpp.cmdline);
            avp->gpu_usage = avpp.gpu_usage;
            strlcpy(avp->api_version, avpp.api_version, sizeof(avp->api_version));
            avp->dont_throttle = avpp.dont_throttle;
            avp->needs_network = avpp.needs_network;

            // if we had download failures, clear them
            //
            avp->clear_errors();
            continue;
        }
        avp = new APP_VERSION;
        *avp = avpp;
        retval = link_app_version(project, avp);
        if (retval) {
             delete avp;
             continue;
        }
        app_versions.push_back(avp);
    }
    for (i=0; i<sr.workunits.size(); i++) {
        if (lookup_workunit(project, sr.workunits[i].name)) continue;
        WORKUNIT* wup = new WORKUNIT;
        *wup = sr.workunits[i];
        wup->project = project;
        retval = link_workunit(project, wup);
        if (retval) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "Can't handle task %s in scheduler reply", wup->name
            );
            delete wup;
            continue;
        }
        wup->clear_errors();
        workunits.push_back(wup);
    }
    double est_rsc_runtime[MAX_RSC];
    for (int j=0; j<coprocs.n_rsc; j++) {
        est_rsc_runtime[j] = 0;
    }
    for (i=0; i<sr.results.size(); i++) {
        if (lookup_result(project, sr.results[i].name)) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "Already have task %s\n", sr.results[i].name
            );
            continue;
        }
        RESULT* rp = new RESULT;
        *rp = sr.results[i];
        retval = link_result(project, rp);
        if (retval) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "Can't handle task %s in scheduler reply", rp->name
            );
            delete rp;
            continue;
        }
        if (strlen(rp->platform) == 0) {
            strcpy(rp->platform, get_primary_platform());
            rp->version_num = latest_version(rp->wup->app, rp->platform);
        }
        rp->avp = lookup_app_version(
            rp->wup->app, rp->platform, rp->version_num, rp->plan_class
        );
        if (!rp->avp) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "No app version found for app %s platform %s ver %d class %s; discarding %s",
                rp->wup->app->name, rp->platform, rp->version_num, rp->plan_class, rp->name
            );
            delete rp;
            continue;
        }
        if (rp->avp->missing_coproc) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "Missing coprocessor for task %s; aborting", rp->name
            );
            rp->abort_inactive(EXIT_MISSING_COPROC);
        } else {
            rp->set_state(RESULT_NEW, "handle_scheduler_reply");
            int rt = rp->avp->gpu_usage.rsc_type;
            if (rt > 0) {
                est_rsc_runtime[rt] += rp->estimated_runtime();
                gpus_usable = true;
                    // trigger a check of whether GPU is actually usable
            } else {
                est_rsc_runtime[0] += rp->estimated_runtime();
            }
        }
        rp->wup->version_num = rp->version_num;
        rp->received_time = now;
        new_results.push_back(rp);
        results.push_back(rp);
    }
    sort_results();

    if (log_flags.sched_op_debug) {
        if (sr.results.size()) {
            for (int j=0; j<coprocs.n_rsc; j++) {
                msg_printf(project, MSG_INFO,
                    "[sched_op] estimated total %s task duration: %.0f seconds",
                    rsc_name(j),
                    est_rsc_runtime[j]/time_stats.availability_frac(j)
                );
            }
        }
    }

    // update records for ack'ed results
    //
    for (i=0; i<sr.result_acks.size(); i++) {
        if (log_flags.sched_op_debug) {
            msg_printf(project, MSG_INFO,
                "[sched_op] handle_scheduler_reply(): got ack for task %s\n",
                sr.result_acks[i].name
            );
        }
        RESULT* rp = lookup_result(project, sr.result_acks[i].name);
        if (rp) {
            rp->got_server_ack = true;
        } else {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "Got ack for task %s, but can't find it", sr.result_acks[i].name
            );
        }
    }

    // handle result abort requests
    //
    for (i=0; i<sr.result_abort.size(); i++) {
        RESULT* rp = lookup_result(project, sr.result_abort[i].name);
        if (rp) {
            ACTIVE_TASK* atp = lookup_active_task_by_result(rp);
            if (atp) {
                atp->abort_task(EXIT_ABORTED_BY_PROJECT,
                    "aborted by project - no longer usable"
                );
            } else {
                rp->abort_inactive(EXIT_ABORTED_BY_PROJECT);
            }
        } else {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "Server requested abort of unknown task %s",
                sr.result_abort[i].name
            );
        }
    }
    for (i=0; i<sr.result_abort_if_not_started.size(); i++) {
        RESULT* rp = lookup_result(project, sr.result_abort_if_not_started[i].name);
        if (!rp) {
            msg_printf(project, MSG_INTERNAL_ERROR,
                "Server requested conditional abort of unknown task %s",
                sr.result_abort_if_not_started[i].name
            );
            continue;
        }
        if (rp->not_started) {
            rp->abort_inactive(EXIT_ABORTED_BY_PROJECT);
        }
    }

    // remove acked trickle files
    //
    if (sr.message_ack) {
        remove_trickle_files(project);
    }
    if (sr.send_full_workload) {
        project->send_full_workload = true;
    }
    project->dont_use_dcf = sr.dont_use_dcf;
    project->send_time_stats_log = sr.send_time_stats_log;
    project->send_job_log = sr.send_job_log;
    project->trickle_up_pending = false;

    // The project returns a hostid only if it has created a new host record.
    // In that case reset RPC seqno
    //
    if (sr.hostid) {
        if (project->hostid) {
            // if we already have a host ID for this project,
            // we must have sent it a stale seqno,
            // which usually means our state file was copied from another host.
            // So generate a new host CPID.
            //
            generate_new_host_cpid();
            msg_printf(project, MSG_INFO,
                "Generated new computer cross-project ID: %s",
                host_info.host_cpid
            );
        }
        //msg_printf(project, MSG_INFO, "Changing host ID from %d to %d", project->hostid, sr.hostid);
        project->hostid = sr.hostid;
        project->rpc_seqno = 0;
    }

#ifdef ENABLE_AUTO_UPDATE
    if (sr.auto_update.present) {
        if (!sr.auto_update.validate_and_link(project)) {
            auto_update = sr.auto_update;
        }
    }
#endif

    project->project_files = sr.project_files;
    project->link_project_files();
    project->create_project_file_symlinks();

    if (log_flags.state_debug) {
        msg_printf(project, MSG_INFO,
            "[state] handle_scheduler_reply(): State after handle_scheduler_reply():"
        );
        print_summary();
    }

    // the following must precede the backoff and request_delay checks,
    // since it overrides them
    //
    if (sr.next_rpc_delay) {
        project->next_rpc_time = now + sr.next_rpc_delay;
    } else {
        project->next_rpc_time = 0;
    }

    work_fetch.handle_reply(project, &sr, new_results);

    project->nrpc_failures = 0;
    project->min_rpc_time = 0;

    if (sr.request_delay) {
        double x = now + sr.request_delay;
        project->set_min_rpc_time(x, "requested by project");
    }

    if (sr.got_rss_feeds) {
        handle_sr_feeds(sr.sr_feeds, project);
    }

    update_trickle_up_urls(project, sr.trickle_up_urls);

    // garbage collect in case the project sent us some irrelevant FILE_INFOs;
    // avoid starting transfers for them
    //
    gstate.garbage_collect_always();

    return 0;
}
Beispiel #2
0
// simulate trying to do an RPC;
// return true if we actually did one
//
bool CLIENT_STATE::simulate_rpc(PROJECT* p) {
    char buf[256], buf2[256];
    vector<IP_RESULT> ip_results;
    vector<RESULT*> new_results;

    bool avail;
    if (p->last_rpc_time) {
        double delta = now - p->last_rpc_time;
        avail = p->available.sample(delta);
    } else {
        avail = p->available.sample(0);
    }
    p->last_rpc_time = now;
    if (!avail) {
        sprintf(buf, "RPC to %s skipped - project down<br>", p->project_name);
        html_msg += buf;
        msg_printf(p, MSG_INFO, "RPC skipped: project down");
        gstate.scheduler_op->project_rpc_backoff(p, "project down");
        p->master_url_fetch_pending = false;
        return false;
    }

    // save request params for WORK_FETCH::handle_reply
    //
    double save_cpu_req_secs = rsc_work_fetch[0].req_secs;
    for (int i=1; i<coprocs.n_rsc; i++) {
        COPROC& cp = coprocs.coprocs[i];
        if (!strcmp(cp.type, "NVIDIA")) {
            coprocs.nvidia.req_secs = rsc_work_fetch[i].req_secs;
        }
        if (!strcmp(cp.type, "ATI")) {
            coprocs.ati.req_secs = rsc_work_fetch[i].req_secs;
        }
        if (!strcmp(cp.type, "intel_gpu")) {
            coprocs.intel_gpu.req_secs = rsc_work_fetch[i].req_secs;
        }
    }

    if (!server_uses_workload) {
        for (int i=0; i<coprocs.n_rsc; i++) {
            rsc_work_fetch[i].estimated_delay = rsc_work_fetch[i].busy_time_estimator.get_busy_time();
        }
    }

    for (unsigned int i=0; i<app_versions.size(); i++) {
        app_versions[i]->dont_use = false;
    }

    work_fetch.request_string(buf2, sizeof(buf2));
    sprintf(buf, "RPC to %s: %s<br>", p->project_name, buf2);
    html_msg += buf;

    msg_printf(p, MSG_INFO, "RPC: %s", buf2);

    handle_completed_results(p);

    if (server_uses_workload) {
        get_workload(ip_results);
    }

    bool sent_something = false;
    while (!existing_jobs_only) {
        vector<APP*> apps;
        get_apps_needing_work(p, apps);
        if (apps.empty()) break;
        RESULT* rp = new RESULT;
        WORKUNIT* wup = new WORKUNIT;
        make_job(p, wup, rp, apps);

        double et = wup->rsc_fpops_est / rp->avp->flops;
        if (server_uses_workload) {
            IP_RESULT c(rp->name, rp->report_deadline-now, et);
            if (check_candidate(c, ncpus, ip_results)) {
                ip_results.push_back(c);
            } else {
                msg_printf(p, MSG_INFO, "job for %s misses deadline sim\n", rp->app->name);
                APP_VERSION* avp = rp->avp;
                delete rp;
                delete wup;
                avp->dont_use = true;
                continue;
            }
        } else {
            double est_delay = get_estimated_delay(rp);
            if (est_delay + et > wup->app->latency_bound) {
                msg_printf(p, MSG_INFO,
                    "job for %s misses deadline approx: del %f + et %f > %f\n",
                    rp->app->name,
                    est_delay, et, wup->app->latency_bound
                );
                APP_VERSION* avp = rp->avp;
                delete rp;
                delete wup;
                avp->dont_use = true;
                continue;
            }
        }

        sent_something = true;
        rp->set_state(RESULT_FILES_DOWNLOADED, "simulate_rpc");
        results.push_back(rp);
        new_results.push_back(rp);
#if 0
        sprintf(buf, "got job %s: CPU time %.2f, deadline %s<br>",
            rp->name, rp->final_cpu_time, time_to_string(rp->report_deadline)
        );
        html_msg += buf;
#endif
        decrement_request(rp);
    }

    njobs += (int)new_results.size();
    msg_printf(0, MSG_INFO, "Got %lu tasks", new_results.size());
    sprintf(buf, "got %lu tasks<br>", new_results.size());
    html_msg += buf;

    SCHEDULER_REPLY sr;
    rsc_work_fetch[0].req_secs = save_cpu_req_secs;
    work_fetch.handle_reply(p, &sr, new_results);
    p->nrpc_failures = 0;
    p->sched_rpc_pending = 0;
    //p->min_rpc_time = now + 900;
    p->min_rpc_time = now;
    if (sent_something) {
        request_schedule_cpus("simulate_rpc");
        request_work_fetch("simulate_rpc");
    }
    sim_results.nrpcs++;
    return true;
}
Beispiel #3
0
// Handle a task that has finished.
// Mark its output files as present, and delete scratch files.
// Don't delete input files because they might be shared with other WUs.
// Update state of result record.
//
int CLIENT_STATE::app_finished(ACTIVE_TASK& at) {
    RESULT* rp = at.result;
    bool had_error = false;

#ifndef SIM
    FILE_INFO* fip;
    unsigned int i;
    char path[MAXPATHLEN];
    int retval;
    double size;

    // scan the output files, check if missing or too big.
    // Don't bother doing this if result was aborted via GUI or by project
    //
    switch (rp->exit_status) {
    case EXIT_ABORTED_VIA_GUI:
    case EXIT_ABORTED_BY_PROJECT:
        break;
    default:
        for (i=0; i<rp->output_files.size(); i++) {
            FILE_REF& fref = rp->output_files[i];
            fip = fref.file_info;
            if (fip->uploaded) continue;
            get_pathname(fip, path, sizeof(path));
            retval = file_size(path, size);
            if (retval) {
                if (fref.optional) {
                    fip->upload_urls.clear();
                    continue;
                }

                // an output file is unexpectedly absent.
                //
                fip->status = retval;
                had_error = true;
                msg_printf(
                    rp->project, MSG_INFO,
                    "Output file %s for task %s absent",
                    fip->name, rp->name
                );
            } else if (size > fip->max_nbytes) {
                // Note: this is only checked when the application finishes.
                // The total disk space is checked while the application is running.
                //
                msg_printf(
                    rp->project, MSG_INFO,
                    "Output file %s for task %s exceeds size limit.",
                    fip->name, rp->name
                );
                msg_printf(
                    rp->project, MSG_INFO,
                    "File size: %f bytes.  Limit: %f bytes",
                    size, fip->max_nbytes
                );

                fip->delete_file();
                fip->status = ERR_FILE_TOO_BIG;
                had_error = true;
            } else {
                if (!fip->uploadable() && !fip->sticky) {
                    fip->delete_file();     // sets status to NOT_PRESENT
                } else {
                    retval = 0;
                    if (fip->gzip_when_done) {
                        retval = fip->gzip();
                    }
                    if (!retval) {
                        retval = md5_file(path, fip->md5_cksum, fip->nbytes);
                    }
                    if (retval) {
                        fip->status = retval;
                        had_error = true;
                    } else {
                        fip->status = FILE_PRESENT;
                    }
                }
            }
        }
    }
#endif

    if (rp->exit_status != 0) {
        had_error = true;
    }

    if (had_error) {
        switch (rp->exit_status) {
        case EXIT_ABORTED_VIA_GUI:
        case EXIT_ABORTED_BY_PROJECT:
            rp->set_state(RESULT_ABORTED, "CS::app_finished");
            break;
        default:
            rp->set_state(RESULT_COMPUTE_ERROR, "CS::app_finished");
        }
        rp->project->njobs_error++;
    } else {
#ifdef SIM
        rp->set_state(RESULT_FILES_UPLOADED, "CS::app_finished");
        rp->set_ready_to_report();
        rp->completed_time = now;
#else
        rp->set_state(RESULT_FILES_UPLOADING, "CS::app_finished");
        rp->append_log_record();
#endif
        rp->project->update_duration_correction_factor(&at);
        rp->project->njobs_success++;
    }

    double elapsed_time = now - rec_interval_start;
    work_fetch.accumulate_inst_sec(&at, elapsed_time);

    rp->project->pwf.request_if_idle_and_uploading = true;
        // set this to allow work fetch if idle instance,
        // even before upload finishes

    return 0;
}
Beispiel #4
0
void do_client_simulation() {
    char buf[256], buf2[256];
    int retval;
    FILE* f;

    sprintf(buf, "%s%s", infile_prefix, CONFIG_FILE);
    cc_config.defaults();
    read_config_file(true, buf);

    log_flags.init();
    sprintf(buf, "%s%s", outfile_prefix, "log_flags.xml");
    f = fopen(buf, "r");
    if (f) {
        MIOFILE mf;
        mf.init_file(f);
        XML_PARSER xp(&mf);
        xp.get_tag();   // skip open tag
        log_flags.parse(xp);
        fclose(f);
    }

    gstate.add_platform("client simulator");
    sprintf(buf, "%s%s", infile_prefix, STATE_FILE_NAME);
    if (!boinc_file_exists(buf)) {
        fprintf(stderr, "No client state file\n");
        exit(1);
    }
    retval = gstate.parse_state_file_aux(buf);
    if (retval) {
        fprintf(stderr, "state file parse error %d\n", retval);
        exit(1);
    }

    // if tasks have pending transfers, mark as completed
    //
    for (unsigned int i=0; i<gstate.results.size(); i++) {
        RESULT* rp = gstate.results[i];
        if (rp->state() < RESULT_FILES_DOWNLOADED) {
            rp->set_state(RESULT_FILES_DOWNLOADED, "init");
        } else if (rp->state() == RESULT_FILES_UPLOADING) {
            rp->set_state(RESULT_FILES_UPLOADED, "init");
        }
    }

    check_app_config(infile_prefix);
    show_app_config();
    cc_config.show();
    log_flags.show();

    sprintf(buf, "%s%s", infile_prefix, GLOBAL_PREFS_FILE_NAME);
    sprintf(buf2, "%s%s", infile_prefix, GLOBAL_PREFS_OVERRIDE_FILE);
    gstate.read_global_prefs(buf, buf2);
    fprintf(index_file,
        "<h3>Output files</h3>\n"
        "<a href=%s>Summary</a>\n"
        "<br><a href=%s>Log file</a>\n",
        SUMMARY_FNAME, LOG_FNAME
    );

    // fill in GPU device nums and OpenCL flags
    //
    for (int i=0; i<coprocs.n_rsc; i++) {
        COPROC& cp = coprocs.coprocs[i];
        for (int j=0; j<cp.count; j++) {
            cp.device_nums[j] = j;
            if (cp.have_opencl) {
                cp.instance_has_opencl[j] = true;
            }
        }
    }
    set_no_rsc_config();
    process_gpu_exclusions();

    get_app_params();
    if (!include_empty_projects) {
        cull_projects();
    }
    fprintf(summary_file, "--------------------------\n");

    int j=0;
    for (unsigned int i=0; i<gstate.projects.size(); i++) {
        gstate.projects[i]->index = j++;
    }

    clear_backoff();

    gstate.log_show_projects();
    gstate.set_ncpus();
    work_fetch.init();

    //set_initial_rec();

    rec_adjust_period = delta;

    gstate.request_work_fetch("init");
    simulate();

    sim_results.compute_figures_of_merit();

    sprintf(buf, "%s%s", outfile_prefix, RESULTS_DAT_FNAME);
    f = fopen(buf, "w");
    sim_results.print(f);
    fclose(f);
    sprintf(buf, "%s%s", outfile_prefix, RESULTS_TXT_FNAME);
    f = fopen(buf, "w");
    sim_results.print(f, true);
    fclose(f);

    fprintf(summary_file,
        "Simulation done.\n"
        "-------------------------\n"
        "Figures of merit:\n"
    );

    sim_results.print(summary_file, true);

    double cpu_time;
    boinc_calling_thread_cpu_time(cpu_time);
    fprintf(summary_file,
        "-------------------------\n"
        "Simulator CPU time: %f secs\n"
        "-------------------------\n"
        "Peak FLOPS: CPU %.2fG GPU %.2fG\n",
        cpu_time,
        cpu_peak_flops()/1e9,
        gpu_peak_flops()/1e9
    );
    print_project_results(summary_file);

    fclose(rec_file);
    make_graph("REC", "rec", 0);
}