Example #1
0
// convert results in progress to IP_RESULTs,
// and get an initial schedule for them
//
void CLIENT_STATE::get_workload(vector<IP_RESULT>& ip_results) {
    for (unsigned int i=0; i<results.size(); i++) {
        RESULT* rp = results[i];
        double x = rp->estimated_runtime_remaining();
        if (x == 0) continue;
        IP_RESULT ipr(rp->name, rp->report_deadline-now, x);
        ip_results.push_back(ipr);
    }
    //init_ip_results(work_buf_min(), ncpus, ip_results);
    init_ip_results(0, ncpus, ip_results);
}
Example #2
0
void PROJECT::get_task_durs(double& not_started_dur, double& in_progress_dur) {
    not_started_dur = 0;
    in_progress_dur = 0;
    for (unsigned int i=0; i<gstate.results.size(); i++) {
        RESULT* rp = gstate.results[i];
        if (rp->project != this) continue;
        double d = rp->estimated_runtime_remaining();
        d /= gstate.time_stats.availability_frac(rp->avp->gpu_usage.rsc_type);
        if (rp->is_not_started()) {
            not_started_dur += d;
        } else {
            in_progress_dur += d;
        }
    }
}
Example #3
0
// Write a scheduler request to a disk file,
// to be sent to a scheduling server
//
int CLIENT_STATE::make_scheduler_request(PROJECT* p) {
    char buf[1024];
    MIOFILE mf;
    unsigned int i;
    RESULT* rp;

    get_sched_request_filename(*p, buf, sizeof(buf));
    FILE* f = boinc_fopen(buf, "wb");
    if (!f) return ERR_FOPEN;

    double trs = total_resource_share();
    double rrs = runnable_resource_share(RSC_TYPE_ANY);
    double prrs = potentially_runnable_resource_share();
    double resource_share_fraction, rrs_fraction, prrs_fraction;
    if (trs) {
        resource_share_fraction = p->resource_share / trs;
    } else {
        resource_share_fraction = 1;
    }
    if (rrs) {
        rrs_fraction = p->resource_share / rrs;
    } else {
        rrs_fraction = 1;
    }
    if (prrs) {
        prrs_fraction = p->resource_share / prrs;
    } else {
        prrs_fraction = 1;
    }

    // if hostid is zero, rpc_seqno better be also
    //
    if (!p->hostid) {
        p->rpc_seqno = 0;
    }

    mf.init_file(f);
    fprintf(f,
        "<scheduler_request>\n"
        "    <authenticator>%s</authenticator>\n"
        "    <hostid>%d</hostid>\n"
        "    <rpc_seqno>%d</rpc_seqno>\n"
        "    <core_client_major_version>%d</core_client_major_version>\n"
        "    <core_client_minor_version>%d</core_client_minor_version>\n"
        "    <core_client_release>%d</core_client_release>\n"
        "    <resource_share_fraction>%f</resource_share_fraction>\n"
        "    <rrs_fraction>%f</rrs_fraction>\n"
        "    <prrs_fraction>%f</prrs_fraction>\n"
        "    <duration_correction_factor>%f</duration_correction_factor>\n"
        "    <allow_multiple_clients>%d</allow_multiple_clients>\n"
        "    <sandbox>%d</sandbox>\n",
        p->authenticator,
        p->hostid,
        p->rpc_seqno,
        core_client_version.major,
        core_client_version.minor,
        core_client_version.release,
        resource_share_fraction,
        rrs_fraction,
        prrs_fraction,
        p->duration_correction_factor,
        config.allow_multiple_clients?1:0,
        g_use_sandbox?1:0
    );
    work_fetch.write_request(f, p);

    // write client capabilities
    //
    fprintf(f,
        "    <client_cap_plan_class>1</client_cap_plan_class>\n"
    );

    write_platforms(p, mf);

    if (strlen(p->code_sign_key)) {
        fprintf(f, "    <code_sign_key>\n%s\n</code_sign_key>\n", p->code_sign_key);
    }

    // send working prefs
    //
    fprintf(f, "<working_global_preferences>\n");
    global_prefs.write(mf);
    fprintf(f, "</working_global_preferences>\n");

    // send master global preferences if present and not host-specific
    //
    if (!global_prefs.host_specific && boinc_file_exists(GLOBAL_PREFS_FILE_NAME)) {
        FILE* fprefs = fopen(GLOBAL_PREFS_FILE_NAME, "r");
        if (fprefs) {
            copy_stream(fprefs, f);
            fclose(fprefs);
        }
        PROJECT* pp = lookup_project(global_prefs.source_project);
        if (pp && strlen(pp->email_hash)) {
            fprintf(f,
                "<global_prefs_source_email_hash>%s</global_prefs_source_email_hash>\n",
                pp->email_hash
            );
        }
    }

    // Of the projects with same email hash as this one,
    // send the oldest cross-project ID.
    // Use project URL as tie-breaker.
    //
    PROJECT* winner = p;
    for (i=0; i<projects.size(); i++ ) {
        PROJECT* project = projects[i];
        if (project == p) continue;
        if (strcmp(project->email_hash, p->email_hash)) continue;
        if (project->cpid_time < winner->cpid_time) {
            winner = project;
        } else if (project->cpid_time == winner->cpid_time) {
            if (strcmp(project->master_url, winner->master_url) < 0) {
                winner = project;
            }
        }
    }
    fprintf(f,
        "<cross_project_id>%s</cross_project_id>\n",
        winner->cross_project_id
    );

    time_stats.write(mf, true);
    net_stats.write(mf);
    if (global_prefs.daily_xfer_period_days) {
        daily_xfer_history.write_scheduler_request(
            mf, global_prefs.daily_xfer_period_days
        );
    }

    // update hardware info, and write host info
    //
    host_info.get_host_info();
    set_ncpus();
    host_info.write(mf, !config.suppress_net_info, false);

    // get and write disk usage
    //
    get_disk_usages();
    get_disk_shares();
    fprintf(f,
        "    <disk_usage>\n"
        "        <d_boinc_used_total>%f</d_boinc_used_total>\n"
        "        <d_boinc_used_project>%f</d_boinc_used_project>\n"
        "        <d_project_share>%f</d_project_share>\n"
        "    </disk_usage>\n",
        total_disk_usage, p->disk_usage, p->disk_share
    );

    // copy request values from RSC_WORK_FETCH to COPROC
    //
    int j = rsc_index(GPU_TYPE_NVIDIA);
    if (j > 0) {
        coprocs.nvidia.req_secs = rsc_work_fetch[j].req_secs;
        coprocs.nvidia.req_instances = rsc_work_fetch[j].req_instances;
        coprocs.nvidia.estimated_delay = rsc_work_fetch[j].req_secs?rsc_work_fetch[j].busy_time_estimator.get_busy_time():0;
    }
    j = rsc_index(GPU_TYPE_ATI);
    if (j > 0) {
        coprocs.ati.req_secs = rsc_work_fetch[j].req_secs;
        coprocs.ati.req_instances = rsc_work_fetch[j].req_instances;
        coprocs.ati.estimated_delay = rsc_work_fetch[j].req_secs?rsc_work_fetch[j].busy_time_estimator.get_busy_time():0;
    }

    if (coprocs.n_rsc > 1) {
        coprocs.write_xml(mf, true);
    }

    // report completed jobs
    //
    unsigned int last_reported_index = 0;
    p->nresults_returned = 0;
    for (i=0; i<results.size(); i++) {
        rp = results[i];
        if (rp->project == p && rp->ready_to_report) {
            p->nresults_returned++;
            rp->write(mf, true);
        }
        if (config.max_tasks_reported
            && (p->nresults_returned >= config.max_tasks_reported)
        ) {
            last_reported_index = i;
            break;
        }
    }

    read_trickle_files(p, f);

    // report sticky files as needed
    //
    for (i=0; i<file_infos.size(); i++) {
        FILE_INFO* fip = file_infos[i];
        if (fip->project != p) continue;
        if (!fip->sticky) continue;
        fprintf(f,
            "    <file_info>\n"
            "        <name>%s</name>\n"
            "        <nbytes>%f</nbytes>\n"
            "        <status>%d</status>\n"
            "    </file_info>\n",
            fip->name, fip->nbytes, fip->status
        );
    }

    if (p->send_time_stats_log) {
        fprintf(f, "<time_stats_log>\n");
        time_stats.get_log_after(p->send_time_stats_log, mf);
        fprintf(f, "</time_stats_log>\n");
    }
    if (p->send_job_log) {
        fprintf(f, "<job_log>\n");
        job_log_filename(*p, buf, sizeof(buf));
        send_log_after(buf, p->send_job_log, mf);
        fprintf(f, "</job_log>\n");
    }

    // send descriptions of app versions
    //
    fprintf(f, "<app_versions>\n");
    j=0;
    for (i=0; i<app_versions.size(); i++) {
        APP_VERSION* avp = app_versions[i];
        if (avp->project != p) continue;
        avp->write(mf, false);
        avp->index = j++;
    }
    fprintf(f, "</app_versions>\n");

    // send descriptions of jobs in progress for this project
    //
    fprintf(f, "<other_results>\n");
    for (i=0; i<results.size(); i++) {
        rp = results[i];
        if (rp->project != p) continue;
        if ((last_reported_index && (i > last_reported_index)) || !rp->ready_to_report) {
            fprintf(f,
                "    <other_result>\n"
                "        <name>%s</name>\n"
                "        <app_version>%d</app_version>\n",
                rp->name,
                rp->avp->index
            );
            // the following is for backwards compatibility w/ old schedulers
            //
            if (strlen(rp->avp->plan_class)) {
                fprintf(f,
                    "        <plan_class>%s</plan_class>\n",
                    rp->avp->plan_class
                );
            }
            fprintf(f,
                "    </other_result>\n"
            );
        }
    }
    fprintf(f, "</other_results>\n");

    // if requested by project, send summary of all in-progress results
    // (for EDF simulation by scheduler)
    //
    if (p->send_full_workload) {
        fprintf(f, "<in_progress_results>\n");
        for (i=0; i<results.size(); i++) {
            rp = results[i];
            double x = rp->estimated_runtime_remaining();
            if (x == 0) continue;
            strcpy(buf, "");
            int rt = rp->avp->gpu_usage.rsc_type;
            if (rt) {
                if (rt == rsc_index(GPU_TYPE_NVIDIA)) {
                    sprintf(buf, "        <ncudas>%f</ncudas>\n", rp->avp->gpu_usage.usage);
                } else if (rt == rsc_index(GPU_TYPE_ATI)) {
                    sprintf(buf, "        <natis>%f</natis>\n", rp->avp->gpu_usage.usage);
                }
            }
            fprintf(f,
                "    <ip_result>\n"
                "        <name>%s</name>\n"
                "        <report_deadline>%.0f</report_deadline>\n"
                "        <time_remaining>%.2f</time_remaining>\n"
                "        <avg_ncpus>%f</avg_ncpus>\n"
                "%s"
                "    </ip_result>\n",
                rp->name,
                rp->report_deadline,
                x,
                rp->avp->avg_ncpus,
                buf
            );
        }
        fprintf(f, "</in_progress_results>\n");
    }
    FILE* cof = boinc_fopen(CLIENT_OPAQUE_FILENAME, "r");
    if (cof) {
        fprintf(f, "<client_opaque>\n<![CDATA[\n");
        copy_stream(cof, f);
        fprintf(f, "\n]]>\n</client_opaque>\n");
        fclose(cof);
    }

    fprintf(f, "</scheduler_request>\n");

    fclose(f);
    return 0;
}