void show_app(APP* app) { fprintf(summary_file, " app %s\n" " job params: fpops_est %.0fG fpops mean %.0fG std_dev %.0fG\n" " latency %.2f weight %.2f", app->name, app->fpops_est/1e9, app->fpops.mean/1e9, app->fpops.std_dev/1e9, app->latency_bound, app->weight ); if (app->max_concurrent) { fprintf(summary_file, " max_concurrent %d\n", app->max_concurrent); } else { fprintf(summary_file, "\n"); } for (unsigned int i=0; i<gstate.app_versions.size(); i++) { APP_VERSION* avp = gstate.app_versions[i]; if (avp->app != app) continue; if (avp->gpu_usage.rsc_type) { fprintf(summary_file, " app version %d (%s)\n" " %.2f CPUs, %.2f %s GPUs, %.0f GFLOPS\n", avp->version_num, avp->plan_class, avp->avg_ncpus, avp->gpu_usage.usage, rsc_name(avp->gpu_usage.rsc_type), avp->flops/1e9 ); } else { fprintf(summary_file, " app version %d (%s)\n" " %.2f CPUs, %.0f GFLOPS\n", avp->version_num, avp->plan_class, avp->avg_ncpus, avp->flops/1e9 ); } } }
// Write project information to client state file or GUI RPC reply // int PROJECT::write_state(MIOFILE& out, bool gui_rpc) { unsigned int i; char un[2048], tn[2048]; out.printf( "<project>\n" ); xml_escape(user_name, un, sizeof(un)); xml_escape(team_name, tn, sizeof(tn)); out.printf( " <master_url>%s</master_url>\n" " <project_name>%s</project_name>\n" " <symstore>%s</symstore>\n" " <user_name>%s</user_name>\n" " <team_name>%s</team_name>\n" " <host_venue>%s</host_venue>\n" " <email_hash>%s</email_hash>\n" " <cross_project_id>%s</cross_project_id>\n" " <external_cpid>%s</external_cpid>\n" " <cpid_time>%f</cpid_time>\n" " <user_total_credit>%f</user_total_credit>\n" " <user_expavg_credit>%f</user_expavg_credit>\n" " <user_create_time>%f</user_create_time>\n" " <rpc_seqno>%d</rpc_seqno>\n" " <userid>%d</userid>\n" " <teamid>%d</teamid>\n" " <hostid>%d</hostid>\n" " <host_total_credit>%f</host_total_credit>\n" " <host_expavg_credit>%f</host_expavg_credit>\n" " <host_create_time>%f</host_create_time>\n" " <nrpc_failures>%d</nrpc_failures>\n" " <master_fetch_failures>%d</master_fetch_failures>\n" " <min_rpc_time>%f</min_rpc_time>\n" " <next_rpc_time>%f</next_rpc_time>\n" " <rec>%f</rec>\n" " <rec_time>%f</rec_time>\n" " <resource_share>%f</resource_share>\n" " <desired_disk_usage>%f</desired_disk_usage>\n" " <duration_correction_factor>%f</duration_correction_factor>\n" " <sched_rpc_pending>%d</sched_rpc_pending>\n" " <send_time_stats_log>%d</send_time_stats_log>\n" " <send_job_log>%d</send_job_log>\n" " <njobs_success>%d</njobs_success>\n" " <njobs_error>%d</njobs_error>\n" " <elapsed_time>%f</elapsed_time>\n" " <last_rpc_time>%f</last_rpc_time>\n" "%s%s%s%s%s%s%s%s%s%s%s%s%s%s", master_url, project_name, symstore, un, tn, host_venue, email_hash, cross_project_id, external_cpid, cpid_time, user_total_credit, user_expavg_credit, user_create_time, rpc_seqno, userid, teamid, hostid, host_total_credit, host_expavg_credit, host_create_time, nrpc_failures, master_fetch_failures, min_rpc_time, next_rpc_time, pwf.rec, pwf.rec_time, resource_share, desired_disk_usage, duration_correction_factor, sched_rpc_pending, send_time_stats_log, send_job_log, njobs_success, njobs_error, elapsed_time, last_rpc_time, anonymous_platform?" <anonymous_platform/>\n":"", master_url_fetch_pending?" <master_url_fetch_pending/>\n":"", trickle_up_pending?" <trickle_up_pending/>\n":"", send_full_workload?" <send_full_workload/>\n":"", dont_use_dcf?" <dont_use_dcf/>\n":"", non_cpu_intensive?" <non_cpu_intensive/>\n":"", verify_files_on_app_start?" <verify_files_on_app_start/>\n":"", suspended_via_gui?" <suspended_via_gui/>\n":"", dont_request_more_work?" <dont_request_more_work/>\n":"", detach_when_done?" <detach_when_done/>\n":"", ended?" <ended/>\n":"", attached_via_acct_mgr?" <attached_via_acct_mgr/>\n":"", (this == gstate.scheduler_op->cur_proj)?" <scheduler_rpc_in_progress/>\n":"", use_symlinks?" <use_symlinks/>\n":"" ); for (int j=0; j<coprocs.n_rsc; j++) { out.printf( " <rsc_backoff_time>\n" " <name>%s</name>\n" " <value>%f</value>\n" " </rsc_backoff_time>\n" " <rsc_backoff_interval>\n" " <name>%s</name>\n" " <value>%f</value>\n" " </rsc_backoff_interval>\n", rsc_name(j), rsc_pwf[j].backoff_time, rsc_name(j), rsc_pwf[j].backoff_interval ); if (no_rsc_ams[j]) { out.printf(" <no_rsc_ams>%s</no_rsc_ams>\n", rsc_name(j)); } if (no_rsc_apps[j]) { out.printf(" <no_rsc_apps>%s</no_rsc_apps>\n", rsc_name(j)); } if (no_rsc_pref[j]) { out.printf(" <no_rsc_pref>%s</no_rsc_pref>\n", rsc_name(j)); } if (j>0 && gui_rpc && (rsc_pwf[j].ncoprocs_excluded == rsc_work_fetch[j].ninstances)) { out.printf(" <no_rsc_config>%s</no_rsc_config>\n", rsc_name(j)); } } if (ams_resource_share >= 0) { out.printf(" <ams_resource_share_new>%f</ams_resource_share_new>\n", ams_resource_share ); } if (gui_rpc) { out.printf( "%s" " <sched_priority>%f</sched_priority>\n" " <project_files_downloaded_time>%f</project_files_downloaded_time>\n", gui_urls.c_str(), sched_priority, project_files_downloaded_time ); if (download_backoff.next_xfer_time > gstate.now) { out.printf( " <download_backoff>%f</download_backoff>\n", download_backoff.next_xfer_time - gstate.now ); } if (upload_backoff.next_xfer_time > gstate.now) { out.printf( " <upload_backoff>%f</upload_backoff>\n", upload_backoff.next_xfer_time - gstate.now ); } if (strlen(host_venue)) { out.printf(" <venue>%s</venue>\n", host_venue); } out.printf(" <project_dir>%s</project_dir>\n", project_dir_absolute()); } else { for (i=0; i<scheduler_urls.size(); i++) { out.printf( " <scheduler_url>%s</scheduler_url>\n", scheduler_urls[i].c_str() ); } if (strlen(code_sign_key)) { out.printf( " <code_sign_key>\n%s\n</code_sign_key>\n", code_sign_key ); } for (i=0; i<trickle_up_ops.size(); i++) { TRICKLE_UP_OP* t = trickle_up_ops[i]; out.printf( " <trickle_up_url>%s</trickle_up_url>\n", t->url.c_str() ); } } out.printf( "</project>\n" ); return 0; }