void list_spool_stats(void sendit(const char *msg, int len, void *sarg), void *arg) { char ed1[30], ed2[30]; POOL_MEM msg(PM_MESSAGE); int len; len = Mmsg(msg, _("Spooling statistics:\n")); if (spool_stats.data_jobs || spool_stats.max_data_size) { len = Mmsg(msg, _("Data spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes/job.\n"), spool_stats.data_jobs, edit_uint64_with_commas(spool_stats.data_size, ed1), spool_stats.total_data_jobs, edit_uint64_with_commas(spool_stats.max_data_size, ed2)); sendit(msg.c_str(), len, arg); } if (spool_stats.attr_jobs || spool_stats.max_attr_size) { len = Mmsg(msg, _("Attr spooling: %u active jobs, %s bytes; %u total jobs, %s max bytes.\n"), spool_stats.attr_jobs, edit_uint64_with_commas(spool_stats.attr_size, ed1), spool_stats.total_attr_jobs, edit_uint64_with_commas(spool_stats.max_attr_size, ed2)); sendit(msg.c_str(), len, arg); } }
/* * Generic function which generates a restore summary message. * Used by: * - native_restore_cleanup e.g. normal restores * - ndmp_restore_cleanup e.g. NDMP restores */ void generate_restore_summary(JCR *jcr, int msg_type, const char *term_msg) { char sdt[MAX_TIME_LENGTH], edt[MAX_TIME_LENGTH]; char ec1[30], ec2[30], ec3[30]; char fd_term_msg[100], sd_term_msg[100]; double kbps; bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime); bstrftimes(edt, sizeof(edt), jcr->jr.EndTime); if (jcr->jr.EndTime - jcr->jr.StartTime > 0) { kbps = (double)jcr->jr.JobBytes / (1000 * (jcr->jr.EndTime - jcr->jr.StartTime)); } else { kbps = 0; } if (kbps < 0.05) { kbps = 0; } jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg)); jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg)); Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" " Build OS: %s %s %s\n" " JobId: %d\n" " Job: %s\n" " Restore Client: %s\n" " Start time: %s\n" " End time: %s\n" " Files Expected: %s\n" " Files Restored: %s\n" " Bytes Restored: %s\n" " Rate: %.1f KB/s\n" " FD Errors: %d\n" " FD termination status: %s\n" " SD termination status: %s\n" " Termination: %s\n\n"), BAREOS, my_name, VERSION, LSMDATE, HOST_OS, DISTNAME, DISTVER, jcr->jr.JobId, jcr->jr.Job, jcr->res.client->name(), sdt, edt, edit_uint64_with_commas((uint64_t)jcr->ExpectedFiles, ec1), edit_uint64_with_commas((uint64_t)jcr->jr.JobFiles, ec2), edit_uint64_with_commas(jcr->jr.JobBytes, ec3), (float)kbps, jcr->JobErrors, fd_term_msg, sd_term_msg, term_msg); }
/* * Edit an integer into "human-readable" format with four or fewer * significant digits followed by a suffix that indicates the scale * factor. The buf array inherits a 27 byte minimim length * requirement from edit_unit64_with_commas(), although the output * string is limited to eight characters. */ char *edit_uint64_with_suffix(uint64_t val, char *buf) { int commas = 0; char *c, mbuf[50]; const char *suffix[] = { "", "K", "M", "G", "T", "P", "E", "Z", "Y", "FIX ME" }; int suffixes = sizeof(suffix) / sizeof(*suffix); edit_uint64_with_commas(val, mbuf); if ((c = strchr(mbuf, ',')) != NULL) { commas++; *c++ = '.'; while ((c = strchr(c, ',')) != NULL) { commas++; *c++ = '\0'; } mbuf[5] = '\0'; // drop this to get '123.456 TB' rather than '123.4 TB' } if (commas >= suffixes) commas = suffixes - 1; bsnprintf(buf, 27, "%s %s", mbuf, suffix[commas]); return buf; }
/* Release all freed pooled memory */ void close_memory_pool() { struct abufhead *buf, *next; int count = 0; uint64_t bytes = 0; char ed1[50]; sm_check(__FILE__, __LINE__, false); P(mutex); for (int i=1; i<=PM_MAX; i++) { buf = pool_ctl[i].free_buf; while (buf) { next = buf->next; count++; bytes += sizeof_pool_memory((char *)buf); free((char *)buf); buf = next; } pool_ctl[i].free_buf = NULL; } Dmsg2(DT_MEMORY|001, "Freed mem_pool count=%d size=%s\n", count, edit_uint64_with_commas(bytes, ed1)); if (chk_dbglvl(DT_MEMORY|1)) { print_memory_pool_stats(); } V(mutex); }
bool commit_attribute_spool(JCR *jcr) { boffset_t size; char ec1[30]; char tbuf[100]; Dmsg1(100, "Commit attributes at %s\n", bstrftimes(tbuf, sizeof(tbuf), (utime_t)time(NULL))); if (are_attributes_spooled(jcr)) { if (fseeko(jcr->dir_bsock->m_spool_fd, 0, SEEK_END) != 0) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Fseek on attributes file failed: ERR=%s\n"), be.bstrerror()); goto bail_out; } size = ftello(jcr->dir_bsock->m_spool_fd); if (size < 0) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Fseek on attributes file failed: ERR=%s\n"), be.bstrerror()); goto bail_out; } P(mutex); if (spool_stats.attr_size + size > spool_stats.max_attr_size) { spool_stats.max_attr_size = spool_stats.attr_size + size; } spool_stats.attr_size += size; V(mutex); set_jcr_job_status(jcr, JS_AttrDespooling); dir_send_job_status(jcr); Jmsg(jcr, M_INFO, 0, _("Sending spooled attrs to the Director. Despooling %s bytes ...\n"), edit_uint64_with_commas(size, ec1)); if (!blast_attr_spool_file(jcr, size)) { /* Can't read spool file from director side, * send content over network. */ jcr->dir_bsock->despool(update_attr_spool_size, size); } return close_attr_spool_file(jcr, jcr->dir_bsock); } return true; bail_out: close_attr_spool_file(jcr, jcr->dir_bsock); return false; }
/* * Release resources allocated during backup. */ void backup_cleanup(JCR *jcr, int TermCode) { char sdt[50], edt[50], schedt[50]; char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30], compress[50]; char ec6[30], ec7[30], ec8[30], elapsed[50]; char term_code[100], fd_term_msg[100], sd_term_msg[100]; const char *term_msg; int msg_type = M_INFO; MEDIA_DBR mr; CLIENT_DBR cr; double kbps, compression; utime_t RunTime; if (jcr->get_JobLevel() == L_VIRTUAL_FULL) { vbackup_cleanup(jcr, TermCode); return; } Dmsg2(100, "Enter backup_cleanup %d %c\n", TermCode, TermCode); memset(&mr, 0, sizeof(mr)); memset(&cr, 0, sizeof(cr)); update_job_end(jcr, TermCode); if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"), db_strerror(jcr->db)); set_jcr_job_status(jcr, JS_ErrorTerminated); } bstrncpy(cr.Name, jcr->client->name(), sizeof(cr.Name)); if (!db_get_client_record(jcr, jcr->db, &cr)) { Jmsg(jcr, M_WARNING, 0, _("Error getting Client record for Job report: ERR=%s"), db_strerror(jcr->db)); } bstrncpy(mr.VolumeName, jcr->VolumeName, sizeof(mr.VolumeName)); if (!db_get_media_record(jcr, jcr->db, &mr)) { Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"), mr.VolumeName, db_strerror(jcr->db)); set_jcr_job_status(jcr, JS_ErrorTerminated); } update_bootstrap_file(jcr); switch (jcr->JobStatus) { case JS_Terminated: if (jcr->JobErrors || jcr->SDErrors) { term_msg = _("Backup OK -- with warnings"); } else { term_msg = _("Backup OK"); } break; case JS_Warnings: term_msg = _("Backup OK -- with warnings"); break; case JS_FatalError: case JS_ErrorTerminated: term_msg = _("*** Backup Error ***"); msg_type = M_ERROR; /* Generate error message */ if (jcr->store_bsock) { jcr->store_bsock->signal(BNET_TERMINATE); if (jcr->SD_msg_chan) { pthread_cancel(jcr->SD_msg_chan); } } break; case JS_Canceled: term_msg = _("Backup Canceled"); if (jcr->store_bsock) { jcr->store_bsock->signal(BNET_TERMINATE); if (jcr->SD_msg_chan) { pthread_cancel(jcr->SD_msg_chan); } } break; default: term_msg = term_code; sprintf(term_code, _("Inappropriate term code: %c\n"), jcr->JobStatus); break; } bstrftimes(schedt, sizeof(schedt), jcr->jr.SchedTime); bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime); bstrftimes(edt, sizeof(edt), jcr->jr.EndTime); RunTime = jcr->jr.EndTime - jcr->jr.StartTime; if (RunTime <= 0) { kbps = 0; } else { kbps = ((double)jcr->jr.JobBytes) / (1000.0 * (double)RunTime); } if (!db_get_job_volume_names(jcr, jcr->db, jcr->jr.JobId, &jcr->VolumeName)) { /* * Note, if the job has erred, most likely it did not write any * tape, so suppress this "error" message since in that case * it is normal. Or look at it the other way, only for a * normal exit should we complain about this error. */ if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) { Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); } jcr->VolumeName[0] = 0; /* none */ } if (jcr->ReadBytes == 0) { bstrncpy(compress, "None", sizeof(compress)); } else { compression = (double)100 - 100.0 * ((double)jcr->JobBytes / (double)jcr->ReadBytes); if (compression < 0.5) { bstrncpy(compress, "None", sizeof(compress)); } else { bsnprintf(compress, sizeof(compress), "%.1f %%", compression); } } jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg)); jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg)); // bmicrosleep(15, 0); /* for debugging SIGHUP */ Jmsg(jcr, msg_type, 0, _("%s %s %s (%s): %s\n" " Build OS: %s %s %s\n" " JobId: %d\n" " Job: %s\n" " Backup Level: %s%s\n" " Client: \"%s\" %s\n" " FileSet: \"%s\" %s\n" " Pool: \"%s\" (From %s)\n" " Catalog: \"%s\" (From %s)\n" " Storage: \"%s\" (From %s)\n" " Scheduled time: %s\n" " Start time: %s\n" " End time: %s\n" " Elapsed time: %s\n" " Priority: %d\n" " FD Files Written: %s\n" " SD Files Written: %s\n" " FD Bytes Written: %s (%sB)\n" " SD Bytes Written: %s (%sB)\n" " Rate: %.1f KB/s\n" " Software Compression: %s\n" " VSS: %s\n" " Encryption: %s\n" " Accurate: %s\n" " Volume name(s): %s\n" " Volume Session Id: %d\n" " Volume Session Time: %d\n" " Last Volume Bytes: %s (%sB)\n" " Non-fatal FD errors: %d\n" " SD Errors: %d\n" " FD termination status: %s\n" " SD termination status: %s\n" " Termination: %s\n\n"), BACULA, my_name, VERSION, LSMDATE, edt, HOST_OS, DISTNAME, DISTVER, jcr->jr.JobId, jcr->jr.Job, level_to_str(jcr->get_JobLevel()), jcr->since, jcr->client->name(), cr.Uname, jcr->fileset->name(), jcr->FSCreateTime, jcr->pool->name(), jcr->pool_source, jcr->catalog->name(), jcr->catalog_source, jcr->wstore->name(), jcr->wstore_source, schedt, sdt, edt, edit_utime(RunTime, elapsed, sizeof(elapsed)), jcr->JobPriority, edit_uint64_with_commas(jcr->jr.JobFiles, ec1), edit_uint64_with_commas(jcr->SDJobFiles, ec2), edit_uint64_with_commas(jcr->jr.JobBytes, ec3), edit_uint64_with_suffix(jcr->jr.JobBytes, ec4), edit_uint64_with_commas(jcr->SDJobBytes, ec5), edit_uint64_with_suffix(jcr->SDJobBytes, ec6), kbps, compress, jcr->VSS?_("yes"):_("no"), jcr->Encrypt?_("yes"):_("no"), jcr->accurate?_("yes"):_("no"), jcr->VolumeName, jcr->VolSessionId, jcr->VolSessionTime, edit_uint64_with_commas(mr.VolBytes, ec7), edit_uint64_with_suffix(mr.VolBytes, ec8), jcr->JobErrors, jcr->SDErrors, fd_term_msg, sd_term_msg, term_msg); Dmsg0(100, "Leave backup_cleanup()\n"); }
/* * Prune File records from the database. For any Job which * is older than the retention period, we unconditionally delete * all File records for that Job. This is simple enough that no * temporary tables are needed. We simply make an in memory list of * the JobIds meeting the prune conditions, then delete all File records * pointing to each of those JobIds. * * This routine assumes you want the pruning to be done. All checking * must be done before calling this routine. * * Note: client or pool can possibly be NULL (not both). */ int prune_files(UAContext *ua, CLIENTRES *client, POOLRES *pool) { struct del_ctx del; struct s_count_ctx cnt; POOL_MEM query(PM_MESSAGE); POOL_MEM sql_where(PM_MESSAGE); POOL_MEM sql_from(PM_MESSAGE); utime_t period; char ed1[50]; memset(&del, 0, sizeof(del)); if (pool && pool->FileRetention > 0) { period = pool->FileRetention; } else if (client) { period = client->FileRetention; } else { /* should specify at least pool or client */ return false; } db_lock(ua->db); /* Specify JobTDate and Pool.Name= and/or Client.Name= in the query */ if (!prune_set_filter(ua, client, pool, period, &sql_from, &sql_where)) { goto bail_out; } // edit_utime(now-period, ed1, sizeof(ed1)); // Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Jobs older than %s secs.\n"), ed1); Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Files.\n")); /* Select Jobs -- for counting */ Mmsg(query, "SELECT COUNT(1) FROM Job %s WHERE PurgedFiles=0 %s", sql_from.c_str(), sql_where.c_str()); Dmsg1(050, "select sql=%s\n", query.c_str()); cnt.count = 0; if (!db_sql_query(ua->db, query.c_str(), del_count_handler, (void *)&cnt)) { ua->error_msg("%s", db_strerror(ua->db)); Dmsg0(050, "Count failed\n"); goto bail_out; } if (cnt.count == 0) { if (ua->verbose) { ua->warning_msg(_("No Files found to prune.\n")); } goto bail_out; } if (cnt.count < MAX_DEL_LIST_LEN) { del.max_ids = cnt.count + 1; } else { del.max_ids = MAX_DEL_LIST_LEN; } del.tot_ids = 0; del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); /* Now process same set but making a delete list */ Mmsg(query, "SELECT JobId FROM Job %s WHERE PurgedFiles=0 %s", sql_from.c_str(), sql_where.c_str()); Dmsg1(050, "select sql=%s\n", query.c_str()); db_sql_query(ua->db, query.c_str(), file_delete_handler, (void *)&del); purge_files_from_job_list(ua, del); edit_uint64_with_commas(del.num_del, ed1); ua->info_msg(_("Pruned Files from %s Jobs for client %s from catalog.\n"), ed1, client->name()); bail_out: db_unlock(ua->db); if (del.JobId) { free(del.JobId); } return 1; }
/* * Restore files */ bool restore_cmd(UAContext *ua, const char *cmd) { RESTORE_CTX rx; /* restore context */ POOL_MEM buf; JOBRES *job; int i; JCR *jcr = ua->jcr; char *escaped_bsr_name = NULL; char *escaped_where_name = NULL; char *strip_prefix, *add_prefix, *add_suffix, *regexp; strip_prefix = add_prefix = add_suffix = regexp = NULL; memset(&rx, 0, sizeof(rx)); rx.path = get_pool_memory(PM_FNAME); rx.fname = get_pool_memory(PM_FNAME); rx.JobIds = get_pool_memory(PM_FNAME); rx.JobIds[0] = 0; rx.BaseJobIds = get_pool_memory(PM_FNAME); rx.query = get_pool_memory(PM_FNAME); rx.bsr = new_bsr(); i = find_arg_with_value(ua, "comment"); if (i >= 0) { rx.comment = ua->argv[i]; if (!is_comment_legal(ua, rx.comment)) { goto bail_out; } } i = find_arg_with_value(ua, "backupformat"); if (i >= 0) { rx.backup_format = ua->argv[i]; } i = find_arg_with_value(ua, "where"); if (i >= 0) { rx.where = ua->argv[i]; } i = find_arg_with_value(ua, "replace"); if (i >= 0) { rx.replace = ua->argv[i]; } i = find_arg_with_value(ua, "pluginoptions"); if (i >= 0) { rx.plugin_options = ua->argv[i]; } i = find_arg_with_value(ua, "strip_prefix"); if (i >= 0) { strip_prefix = ua->argv[i]; } i = find_arg_with_value(ua, "add_prefix"); if (i >= 0) { add_prefix = ua->argv[i]; } i = find_arg_with_value(ua, "add_suffix"); if (i >= 0) { add_suffix = ua->argv[i]; } i = find_arg_with_value(ua, "regexwhere"); if (i >= 0) { rx.RegexWhere = ua->argv[i]; } if (strip_prefix || add_suffix || add_prefix) { int len = bregexp_get_build_where_size(strip_prefix, add_prefix, add_suffix); regexp = (char *)bmalloc(len * sizeof(char)); bregexp_build_where(regexp, len, strip_prefix, add_prefix, add_suffix); rx.RegexWhere = regexp; } /* TODO: add acl for regexwhere ? */ if (rx.RegexWhere) { if (!acl_access_ok(ua, Where_ACL, rx.RegexWhere, true)) { ua->error_msg(_("\"RegexWhere\" specification not authorized.\n")); goto bail_out; } } if (rx.where) { if (!acl_access_ok(ua, Where_ACL, rx.where, true)) { ua->error_msg(_("\"where\" specification not authorized.\n")); goto bail_out; } } if (!open_client_db(ua, true)) { goto bail_out; } /* Ensure there is at least one Restore Job */ LockRes(); foreach_res(job, R_JOB) { if (job->JobType == JT_RESTORE) { if (!rx.restore_job) { rx.restore_job = job; } rx.restore_jobs++; } } UnlockRes(); if (!rx.restore_jobs) { ua->error_msg(_( "No Restore Job Resource found in bareos-dir.conf.\n" "You must create at least one before running this command.\n")); goto bail_out; } /* * Request user to select JobIds or files by various different methods * last 20 jobs, where File saved, most recent backup, ... * In the end, a list of files are pumped into * add_findex() */ switch (user_select_jobids_or_files(ua, &rx)) { case 0: /* error */ goto bail_out; case 1: /* selected by jobid */ get_and_display_basejobs(ua, &rx); if (!build_directory_tree(ua, &rx)) { ua->send_msg(_("Restore not done.\n")); goto bail_out; } break; case 2: /* selected by filename, no tree needed */ break; } if (rx.bsr->JobId) { char ed1[50]; if (!complete_bsr(ua, rx.bsr)) { /* find Vol, SessId, SessTime from JobIds */ ua->error_msg(_("Unable to construct a valid BSR. Cannot continue.\n")); goto bail_out; } if (!(rx.selected_files = write_bsr_file(ua, rx))) { ua->warning_msg(_("No files selected to be restored.\n")); goto bail_out; } display_bsr_info(ua, rx); /* display vols needed, etc */ if (rx.selected_files==1) { ua->info_msg(_("\n1 file selected to be restored.\n\n")); } else { ua->info_msg(_("\n%s files selected to be restored.\n\n"), edit_uint64_with_commas(rx.selected_files, ed1)); } } else { ua->warning_msg(_("No files selected to be restored.\n")); goto bail_out; } if (rx.restore_jobs == 1) { job = rx.restore_job; } else { job = get_restore_job(ua); } if (!job) { goto bail_out; } if (!get_client_name(ua, &rx)) { goto bail_out; } if (!rx.ClientName) { ua->error_msg(_("No Client resource found!\n")); goto bail_out; } if (!get_restore_client_name(ua, rx)) { goto bail_out; } escaped_bsr_name = escape_filename(jcr->RestoreBootstrap); Mmsg(ua->cmd, "run job=\"%s\" client=\"%s\" restoreclient=\"%s\" storage=\"%s\"" " bootstrap=\"%s\" files=%u catalog=\"%s\"", job->name(), rx.ClientName, rx.RestoreClientName, rx.store?rx.store->name():"", escaped_bsr_name ? escaped_bsr_name : jcr->RestoreBootstrap, rx.selected_files, ua->catalog->name()); /* * Build run command */ if (rx.backup_format) { Mmsg(buf, " backupformat=%s", rx.backup_format); pm_strcat(ua->cmd, buf); } pm_strcpy(buf, ""); if (rx.RegexWhere) { escaped_where_name = escape_filename(rx.RegexWhere); Mmsg(buf, " regexwhere=\"%s\"", escaped_where_name ? escaped_where_name : rx.RegexWhere); } else if (rx.where) { escaped_where_name = escape_filename(rx.where); Mmsg(buf," where=\"%s\"", escaped_where_name ? escaped_where_name : rx.where); } pm_strcat(ua->cmd, buf); if (rx.replace) { Mmsg(buf, " replace=%s", rx.replace); pm_strcat(ua->cmd, buf); } if (rx.plugin_options) { Mmsg(buf, " pluginoptions=%s", rx.plugin_options); pm_strcat(ua->cmd, buf); } if (rx.comment) { Mmsg(buf, " comment=\"%s\"", rx.comment); pm_strcat(ua->cmd, buf); } if (escaped_bsr_name != NULL) { bfree(escaped_bsr_name); } if (escaped_where_name != NULL) { bfree(escaped_where_name); } if (regexp) { bfree(regexp); } if (find_arg(ua, NT_("yes")) > 0) { pm_strcat(ua->cmd, " yes"); /* pass it on to the run command */ } Dmsg1(200, "Submitting: %s\n", ua->cmd); /* * Transfer jobids to jcr to for picking up restore objects */ jcr->JobIds = rx.JobIds; rx.JobIds = NULL; parse_ua_args(ua); run_cmd(ua, ua->cmd); free_rx(&rx); garbage_collect_memory(); /* release unused memory */ return true; bail_out: if (escaped_bsr_name != NULL) { bfree(escaped_bsr_name); } if (escaped_where_name != NULL) { bfree(escaped_where_name); } if (regexp) { bfree(regexp); } free_rx(&rx); garbage_collect_memory(); /* release unused memory */ return false; }
bool commit_attribute_spool(JCR *jcr) { boffset_t size, data_end; char ec1[30]; char tbuf[MAX_TIME_LENGTH]; BSOCK *dir; Dmsg1(100, "Commit attributes at %s\n", bstrftimes(tbuf, sizeof(tbuf), (utime_t)time(NULL))); if (are_attributes_spooled(jcr)) { dir = jcr->dir_bsock; if ((size = lseek(dir->m_spool_fd, 0, SEEK_END)) == -1) { berrno be; Jmsg(jcr, M_FATAL, 0, _("lseek on attributes file failed: ERR=%s\n"), be.bstrerror()); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ goto bail_out; } if (jcr->is_JobStatus(JS_Incomplete)) { data_end = dir->get_data_end(); /* * Check and truncate to last valid data_end if necssary */ if (size > data_end) { if (ftruncate(dir->m_spool_fd, data_end) != 0) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Truncate on attributes file failed: ERR=%s\n"), be.bstrerror()); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ goto bail_out; } Dmsg2(100, "=== Attrib spool truncated from %lld to %lld\n", size, data_end); size = data_end; } } if (size < 0) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Fseek on attributes file failed: ERR=%s\n"), be.bstrerror()); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ goto bail_out; } P(mutex); if (spool_stats.attr_size + size > spool_stats.max_attr_size) { spool_stats.max_attr_size = spool_stats.attr_size + size; } spool_stats.attr_size += size; V(mutex); jcr->sendJobStatus(JS_AttrDespooling); Jmsg(jcr, M_INFO, 0, _("Sending spooled attrs to the Director. Despooling %s bytes ...\n"), edit_uint64_with_commas(size, ec1)); if (!blast_attr_spool_file(jcr, size)) { /* Can't read spool file from director side, * send content over network. */ dir->despool(update_attr_spool_size, size); } return close_attr_spool_file(jcr, dir); } return true; bail_out: close_attr_spool_file(jcr, dir); return false; }
/* * Write a block to the spool file * * Returns: true on success or EOT * false on hard error */ bool write_block_to_spool_file(DCR *dcr) { uint32_t wlen, hlen; /* length to write */ bool despool = false; DEV_BLOCK *block = dcr->block; if (job_canceled(dcr->jcr)) { return false; } ASSERT(block->binbuf == ((uint32_t) (block->bufp - block->buf))); if (block->binbuf <= WRITE_BLKHDR_LENGTH) { /* Does block have data in it? */ return true; } hlen = sizeof(spool_hdr); wlen = block->binbuf; P(dcr->dev->spool_mutex); dcr->job_spool_size += hlen + wlen; dcr->dev->spool_size += hlen + wlen; if ((dcr->max_job_spool_size > 0 && dcr->job_spool_size >= dcr->max_job_spool_size) || (dcr->dev->max_spool_size > 0 && dcr->dev->spool_size >= dcr->dev->max_spool_size)) { despool = true; } V(dcr->dev->spool_mutex); P(mutex); spool_stats.data_size += hlen + wlen; if (spool_stats.data_size > spool_stats.max_data_size) { spool_stats.max_data_size = spool_stats.data_size; } V(mutex); if (despool) { char ec1[30], ec2[30]; if (dcr->max_job_spool_size > 0) { Jmsg(dcr->jcr, M_INFO, 0, _("User specified Job spool size reached: " "JobSpoolSize=%s MaxJobSpoolSize=%s\n"), edit_uint64_with_commas(dcr->job_spool_size, ec1), edit_uint64_with_commas(dcr->max_job_spool_size, ec2)); } else { Jmsg(dcr->jcr, M_INFO, 0, _("User specified Device spool size reached: " "DevSpoolSize=%s MaxDevSpoolSize=%s\n"), edit_uint64_with_commas(dcr->dev->spool_size, ec1), edit_uint64_with_commas(dcr->dev->max_spool_size, ec2)); } if (!despool_data(dcr, false)) { Pmsg0(000, _("Bad return from despool in write_block.\n")); return false; } /* Despooling cleared these variables so reset them */ P(dcr->dev->spool_mutex); dcr->job_spool_size += hlen + wlen; dcr->dev->spool_size += hlen + wlen; V(dcr->dev->spool_mutex); Jmsg(dcr->jcr, M_INFO, 0, _("Spooling data again ...\n")); } if (!write_spool_header(dcr)) { return false; } if (!write_spool_data(dcr)) { return false; } Dmsg2(800, "Wrote block FI=%d LI=%d\n", block->FirstIndex, block->LastIndex); empty_block(block); return true; }
/* * NB! This routine locks the device, but if committing will * not unlock it. If not committing, it will be unlocked. */ static bool despool_data(DCR *dcr, bool commit) { DEVICE *rdev; DCR *rdcr; bool ok = true; DEV_BLOCK *block; JCR *jcr = dcr->jcr; int status; char ec1[50]; BSOCK *dir = jcr->dir_bsock; Dmsg0(100, "Despooling data\n"); if (jcr->dcr->job_spool_size == 0) { Jmsg(jcr, M_WARNING, 0, _("Despooling zero bytes. Your disk is probably FULL!\n")); } /* * Commit means that the job is done, so we commit, otherwise, we * are despooling because of user spool size max or some error * (e.g. filesystem full). */ if (commit) { Jmsg(jcr, M_INFO, 0, _("Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n"), jcr->dcr->VolumeName, edit_uint64_with_commas(jcr->dcr->job_spool_size, ec1)); jcr->setJobStatus(JS_DataCommitting); } else { Jmsg(jcr, M_INFO, 0, _("Writing spooled data to Volume. Despooling %s bytes ...\n"), edit_uint64_with_commas(jcr->dcr->job_spool_size, ec1)); jcr->setJobStatus(JS_DataDespooling); } jcr->sendJobStatus(JS_DataDespooling); dcr->despool_wait = true; dcr->spooling = false; /* * We work with device blocked, but not locked so that other threads * e.g. reservations can lock the device structure. */ dcr->dblock(BST_DESPOOLING); dcr->despool_wait = false; dcr->despooling = true; /* * This is really quite kludgy and should be fixed some time. * We create a dev structure to read from the spool file * in rdev and rdcr. */ rdev = (DEVICE *)malloc(sizeof(DEVICE)); memset(rdev, 0, sizeof(DEVICE)); rdev->dev_name = get_memory(strlen(spool_name)+1); bstrncpy(rdev->dev_name, spool_name, sizeof_pool_memory(rdev->dev_name)); rdev->errmsg = get_pool_memory(PM_EMSG); *rdev->errmsg = 0; rdev->max_block_size = dcr->dev->max_block_size; rdev->min_block_size = dcr->dev->min_block_size; rdev->device = dcr->dev->device; rdcr = dcr->get_new_spooling_dcr(); setup_new_dcr_device(jcr, rdcr, rdev, NULL); rdcr->spool_fd = dcr->spool_fd; block = dcr->block; /* save block */ dcr->block = rdcr->block; /* make read and write block the same */ Dmsg1(800, "read/write block size = %d\n", block->buf_len); lseek(rdcr->spool_fd, 0, SEEK_SET); /* rewind */ #if defined(HAVE_POSIX_FADVISE) && defined(POSIX_FADV_WILLNEED) posix_fadvise(rdcr->spool_fd, 0, 0, POSIX_FADV_WILLNEED); #endif /* Add run time, to get current wait time */ int32_t despool_start = time(NULL) - jcr->run_time; set_new_file_parameters(dcr); while (ok) { if (job_canceled(jcr)) { ok = false; break; } status = read_block_from_spool_file(rdcr); if (status == RB_EOT) { break; } else if (status == RB_ERROR) { ok = false; break; } ok = dcr->write_block_to_device(); if (!ok) { Jmsg2(jcr, M_FATAL, 0, _("Fatal append error on device %s: ERR=%s\n"), dcr->dev->print_name(), dcr->dev->bstrerror()); Dmsg2(000, "Fatal append error on device %s: ERR=%s\n", dcr->dev->print_name(), dcr->dev->bstrerror()); /* Force in case Incomplete set */ jcr->forceJobStatus(JS_FatalError); } Dmsg3(800, "Write block ok=%d FI=%d LI=%d\n", ok, block->FirstIndex, block->LastIndex); } /* * If this Job is incomplete, we need to backup the FileIndex * to the last correctly saved file so that the JobMedia * LastIndex is correct. */ if (jcr->is_JobStatus(JS_Incomplete)) { dcr->VolLastIndex = dir->get_FileIndex(); Dmsg1(100, "======= Set FI=%ld\n", dir->get_FileIndex()); } if (!dcr->dir_create_jobmedia_record(false)) { Jmsg2(jcr, M_FATAL, 0, _("Could not create JobMedia record for Volume=\"%s\" Job=%s\n"), dcr->getVolCatName(), jcr->Job); jcr->forceJobStatus(JS_FatalError); /* override any Incomplete */ } /* Set new file/block parameters for current dcr */ set_new_file_parameters(dcr); /* * Subtracting run_time give us elapsed time - wait_time since * we started despooling. Note, don't use time_t as it is 32 or 64 * bits depending on the OS and doesn't edit with %d */ int32_t despool_elapsed = time(NULL) - despool_start - jcr->run_time; if (despool_elapsed <= 0) { despool_elapsed = 1; } Jmsg(jcr, M_INFO, 0, _("Despooling elapsed time = %02d:%02d:%02d, Transfer rate = %s Bytes/second\n"), despool_elapsed / 3600, despool_elapsed % 3600 / 60, despool_elapsed % 60, edit_uint64_with_suffix(jcr->dcr->job_spool_size / despool_elapsed, ec1)); dcr->block = block; /* reset block */ lseek(rdcr->spool_fd, 0, SEEK_SET); /* rewind */ if (ftruncate(rdcr->spool_fd, 0) != 0) { berrno be; Jmsg(jcr, M_ERROR, 0, _("Ftruncate spool file failed: ERR=%s\n"), be.bstrerror()); /* Note, try continuing despite ftruncate problem */ } P(mutex); if (spool_stats.data_size < dcr->job_spool_size) { spool_stats.data_size = 0; } else { spool_stats.data_size -= dcr->job_spool_size; } V(mutex); P(dcr->dev->spool_mutex); dcr->dev->spool_size -= dcr->job_spool_size; dcr->job_spool_size = 0; /* zap size in input dcr */ V(dcr->dev->spool_mutex); free_memory(rdev->dev_name); free_pool_memory(rdev->errmsg); /* Be careful to NULL the jcr and free rdev after free_dcr() */ rdcr->jcr = NULL; rdcr->set_dev(NULL); free_dcr(rdcr); free(rdev); dcr->spooling = true; /* turn on spooling again */ dcr->despooling = false; /* * Note, if committing we leave the device blocked. It will be removed in * release_device(); */ if (!commit) { dcr->dev->dunblock(); } jcr->sendJobStatus(JS_Running); return ok; }
/* * Note!!!! Part numbers now begin at 1. The part number is * suppressed from the first part, which is just the Volume * name. Each subsequent part is the Volumename.partnumber. * * Write a part (Vol, Vol.2, ...) from the spool to the DVD * This routine does not update the part number, so normally, you * should call open_next_part() * * It is also called from truncate_dvd to "blank" the medium, as * well as from block.c when the DVD is full to write the last part. */ bool dvd_write_part(DCR *dcr) { DEVICE *dev = dcr->dev; POOL_MEM archive_name(PM_FNAME); /* * Don't write empty part files. * This is only useful when growisofs does not support write beyond * the 4GB boundary. * Example : * - 3.9 GB on the volume, dvd-freespace reports 0.4 GB free * - Write 0.2 GB on the volume, Bacula thinks it could still * append data, it creates a new empty part. * - dvd-freespace reports 0 GB free, as the 4GB boundary has * been crossed * - Bacula thinks he must finish to write to the device, so it * tries to write the last part (0-byte), but dvd-writepart fails... * * There is one exception: when recycling a volume, we write a blank part * file, so, then, we need to accept to write it. */ if (dev->part_size == 0 && !dev->truncating) { Dmsg2(29, "dvd_write_part: device is %s, won't write blank part %d\n", dev->print_name(), dev->part); /* Delete spool file */ make_spooled_dvd_filename(dev, archive_name); unlink(archive_name.c_str()); dev->set_part_spooled(false); Dmsg1(29, "========= unlink(%s)\n", archive_name.c_str()); Dsm_check(400); return true; } POOL_MEM ocmd(PM_FNAME); POOL_MEM results(PM_MESSAGE); char* icmd; int status; int timeout; char ed1[50]; dev->clear_freespace_ok(); /* need to update freespace */ Dsm_check(400); Dmsg3(29, "dvd_write_part: device is %s, part is %d, is_mounted=%d\n", dev->print_name(), dev->part, dev->is_mounted()); icmd = dev->device->write_part_command; dev->edit_mount_codes(ocmd, icmd); /* * original line follows * timeout = dev->max_open_wait + (dev->max_part_size/(1350*1024/2)); * I modified this for a longer timeout; pre-formatting, blanking and * writing can take quite a while */ /* Explanation of the timeout value, when writing the first part, * by Arno Lehmann : * 9 GB, write speed 1x: 6990 seconds (almost 2 hours...) * Overhead: 900 seconds (starting, initializing, finalizing,probably * reloading 15 minutes) * Sum: 15780. * A reasonable last-exit timeout would be 16000 seconds. Quite long - * almost 4.5 hours, but hopefully, that timeout will only ever be needed * in case of a serious emergency. */ if (dev->part == 1) { timeout = 16000; } else { timeout = dev->max_open_wait + (dev->part_size/(1350*1024/4)); } Dmsg2(20, "Write part: cmd=%s timeout=%d\n", ocmd.c_str(), timeout); status = run_program_full_output(ocmd.c_str(), timeout, results.addr()); Dmsg2(20, "Write part status=%d result=%s\n", status, results.c_str()); dev->blank_dvd = false; if (status != 0) { Jmsg2(dcr->jcr, M_FATAL, 0, _("Error writing part %d to the DVD: ERR=%s\n"), dev->part, results.c_str()); Mmsg1(dev->errmsg, _("Error while writing current part to the DVD: %s"), results.c_str()); Dmsg1(100, "%s\n", dev->errmsg); dev->dev_errno = EIO; if (!dev->truncating) { dcr->mark_volume_in_error(); } Dsm_check(400); return false; } Jmsg(dcr->jcr, M_INFO, 0, _("Part %d (%lld bytes) written to DVD.\n"), dev->part, dev->part_size); Dmsg3(400, "dvd_write_part: Part %d (%lld bytes) written to DVD\nResults: %s\n", dev->part, dev->part_size, results.c_str()); dev->num_dvd_parts++; /* there is now one more part on DVD */ dev->VolCatInfo.VolCatParts = dev->num_dvd_parts; dcr->VolCatInfo.VolCatParts = dev->num_dvd_parts; Dmsg1(100, "Update num_parts=%d\n", dev->num_dvd_parts); /* Delete spool file */ make_spooled_dvd_filename(dev, archive_name); unlink(archive_name.c_str()); dev->set_part_spooled(false); Dmsg1(29, "========= unlink(%s)\n", archive_name.c_str()); Dsm_check(400); /* growisofs umounted the device, so remount it (it will update the free space) */ dev->clear_mounted(); dev->mount(1); Jmsg(dcr->jcr, M_INFO, 0, _("Remaining free space %s on %s\n"), edit_uint64_with_commas(dev->free_space, ed1), dev->print_name()); Dsm_check(400); return true; }
/* * Release resources allocated during backup. * */ void verify_cleanup(JCR *jcr, int TermCode) { char sdt[50], edt[50]; char ec1[30], ec2[30]; char term_code[100], fd_term_msg[100], sd_term_msg[100]; const char *term_msg; int msg_type; const char *Name; // Dmsg1(100, "Enter verify_cleanup() TermCod=%d\n", TermCode); Dmsg3(900, "JobLevel=%c Expected=%u JobFiles=%u\n", jcr->getJobLevel(), jcr->ExpectedFiles, jcr->JobFiles); if ((jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) && jcr->ExpectedFiles != jcr->JobFiles) { TermCode = JS_ErrorTerminated; } update_job_end(jcr, TermCode); if (job_canceled(jcr)) { cancel_storage_daemon_job(jcr); } if (jcr->unlink_bsr && jcr->RestoreBootstrap) { unlink(jcr->RestoreBootstrap); jcr->unlink_bsr = false; } msg_type = M_INFO; /* by default INFO message */ switch (TermCode) { case JS_Terminated: if (jcr->JobErrors || jcr->SDErrors) { term_msg = _("Verify OK -- with warnings"); } else { term_msg = _("Verify OK"); } break; case JS_FatalError: case JS_ErrorTerminated: term_msg = _("*** Verify Error ***"); msg_type = M_ERROR; /* Generate error message */ break; case JS_Error: term_msg = _("Verify warnings"); break; case JS_Canceled: term_msg = _("Verify Canceled"); break; case JS_Differences: term_msg = _("Verify Differences"); break; default: term_msg = term_code; bsnprintf(term_code, sizeof(term_code), _("Inappropriate term code: %d %c\n"), TermCode, TermCode); break; } bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime); bstrftimes(edt, sizeof(edt), jcr->jr.EndTime); if (jcr->verify_job) { Name = jcr->verify_job->hdr.name; } else { Name = ""; } jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg)); if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG || jcr->getJobLevel() == L_VERIFY_DATA) { jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg)); Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" " Build OS: %s %s %s\n" " JobId: %d\n" " Job: %s\n" " FileSet: %s\n" " Verify Level: %s\n" " Client: %s\n" " Verify JobId: %d\n" " Verify Job: %s\n" " Start time: %s\n" " End time: %s\n" " Files Expected: %s\n" " Files Examined: %s\n" " Non-fatal FD errors: %d\n" " SD Errors: %d\n" " FD termination status: %s\n" " SD termination status: %s\n" " Termination: %s\n\n"), BACULA, my_name, VERSION, LSMDATE, HOST_OS, DISTNAME, DISTVER, jcr->jr.JobId, jcr->jr.Job, jcr->fileset->hdr.name, level_to_str(jcr->getJobLevel()), jcr->client->hdr.name, jcr->previous_jr.JobId, Name, sdt, edt, edit_uint64_with_commas(jcr->ExpectedFiles, ec1), edit_uint64_with_commas(jcr->JobFiles, ec2), jcr->JobErrors, jcr->SDErrors, fd_term_msg, sd_term_msg, term_msg); } else { Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n" " Build: %s %s %s\n" " JobId: %d\n" " Job: %s\n" " FileSet: %s\n" " Verify Level: %s\n" " Client: %s\n" " Verify JobId: %d\n" " Verify Job: %s\n" " Start time: %s\n" " End time: %s\n" " Files Examined: %s\n" " Non-fatal FD errors: %d\n" " FD termination status: %s\n" " Termination: %s\n\n"), BACULA, my_name, VERSION, LSMDATE, HOST_OS, DISTNAME, DISTVER, jcr->jr.JobId, jcr->jr.Job, jcr->fileset->hdr.name, level_to_str(jcr->getJobLevel()), jcr->client->name(), jcr->previous_jr.JobId, Name, sdt, edt, edit_uint64_with_commas(jcr->JobFiles, ec1), jcr->JobErrors, fd_term_msg, term_msg); } Dmsg0(100, "Leave verify_cleanup()\n"); }
/* * Prune File records from the database. For any Job which * is older than the retention period, we unconditionally delete * all File records for that Job. This is simple enough that no * temporary tables are needed. We simply make an in memory list of * the JobIds meeting the prune conditions, then delete all File records * pointing to each of those JobIds. * * This routine assumes you want the pruning to be done. All checking * must be done before calling this routine. */ int prune_files(UAContext *ua, CLIENT *client) { struct del_ctx del; struct s_count_ctx cnt; POOL_MEM query(PM_MESSAGE); utime_t now, period; CLIENT_DBR cr; char ed1[50], ed2[50]; db_lock(ua->db); memset(&cr, 0, sizeof(cr)); memset(&del, 0, sizeof(del)); bstrncpy(cr.Name, client->hdr.name, sizeof(cr.Name)); if (!db_create_client_record(ua->jcr, ua->db, &cr)) { db_unlock(ua->db); return 0; } period = client->FileRetention; now = (utime_t)time(NULL); /* Select Jobs -- for counting */ Mmsg(query, count_select_job, edit_int64(now - period, ed1), edit_int64(cr.ClientId, ed2)); Dmsg3(050, "select now=%u period=%u sql=%s\n", (uint32_t)now, (uint32_t)period, query.c_str()); cnt.count = 0; if (!db_sql_query(ua->db, query.c_str(), del_count_handler, (void *)&cnt)) { ua->error_msg("%s", db_strerror(ua->db)); Dmsg0(050, "Count failed\n"); goto bail_out; } if (cnt.count == 0) { if (ua->verbose) { ua->warning_msg(_("No Files found to prune.\n")); } goto bail_out; } if (cnt.count < MAX_DEL_LIST_LEN) { del.max_ids = cnt.count + 1; } else { del.max_ids = MAX_DEL_LIST_LEN; } del.tot_ids = 0; del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); /* Now process same set but making a delete list */ Mmsg(query, select_job, edit_int64(now - period, ed1), edit_int64(cr.ClientId, ed2)); db_sql_query(ua->db, query.c_str(), file_delete_handler, (void *)&del); purge_files_from_job_list(ua, del); edit_uint64_with_commas(del.num_del, ed1); ua->info_msg(_("Pruned Files from %s Jobs for client %s from catalog.\n"), ed1, client->name()); bail_out: db_unlock(ua->db); if (del.JobId) { free(del.JobId); } return 1; }
static void list_status_header(STATUS_PKT *sp) { int len; char dt[MAX_TIME_LENGTH]; POOL_MEM msg(PM_MESSAGE); char b1[32], b2[32], b3[32], b4[32], b5[35]; #if defined(HAVE_WIN32) char buf[300]; #endif len = Mmsg(msg, _("%s Version: %s (%s) %s %s %s %s\n"), my_name, VERSION, BDATE, VSS, HOST_OS, DISTNAME, DISTVER); sendit(msg, len, sp); bstrftime_nc(dt, sizeof(dt), daemon_start_time); len = Mmsg(msg, _("Daemon started %s. Jobs: run=%d running=%d.\n"), dt, num_jobs_run, job_count()); sendit(msg, len, sp); #if defined(HAVE_WIN32) if (GetWindowsVersionString(buf, sizeof(buf))) { len = Mmsg(msg, "%s\n", buf); sendit(msg, len, sp); } if (debug_level > 0) { if (!privs) { privs = enable_backup_privileges(NULL, 1); } len = Mmsg(msg, "Priv 0x%x\n", privs); sendit(msg, len, sp); len = Mmsg(msg, "APIs=%sOPT,%sATP,%sLPV,%sCFA,%sCFW,\n", p_OpenProcessToken ? "" : "!", p_AdjustTokenPrivileges ? "" : "!", p_LookupPrivilegeValue ? "" : "!", p_CreateFileA ? "" : "!", p_CreateFileW ? "" : "!"); sendit(msg, len, sp); len = Mmsg(msg, " %sWUL,%sWMKD,%sGFAA,%sGFAW,%sGFAEA,%sGFAEW,%sSFAA,%sSFAW,%sBR,%sBW,%sSPSP,\n", p_wunlink ? "" : "!", p_wmkdir ? "" : "!", p_GetFileAttributesA ? "" : "!", p_GetFileAttributesW ? "" : "!", p_GetFileAttributesExA ? "" : "!", p_GetFileAttributesExW ? "" : "!", p_SetFileAttributesA ? "" : "!", p_SetFileAttributesW ? "" : "!", p_BackupRead ? "" : "!", p_BackupWrite ? "" : "!", p_SetProcessShutdownParameters ? "" : "!"); sendit(msg, len, sp); len = Mmsg(msg, " %sWC2MB,%sMB2WC,%sFFFA,%sFFFW,%sFNFA,%sFNFW,%sSCDA,%sSCDW,\n", p_WideCharToMultiByte ? "" : "!", p_MultiByteToWideChar ? "" : "!", p_FindFirstFileA ? "" : "!", p_FindFirstFileW ? "" : "!", p_FindNextFileA ? "" : "!", p_FindNextFileW ? "" : "!", p_SetCurrentDirectoryA ? "" : "!", p_SetCurrentDirectoryW ? "" : "!"); sendit(msg, len, sp); len = Mmsg(msg, " %sGCDA,%sGCDW,%sGVPNW,%sGVNFVMPW\n", p_GetCurrentDirectoryA ? "" : "!", p_GetCurrentDirectoryW ? "" : "!", p_GetVolumePathNameW ? "" : "!", p_GetVolumeNameForVolumeMountPointW ? "" : "!"); sendit(msg, len, sp); } #endif len = Mmsg(msg, _(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"), edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1), edit_uint64_with_commas(sm_bytes, b2), edit_uint64_with_commas(sm_max_bytes, b3), edit_uint64_with_commas(sm_buffers, b4), edit_uint64_with_commas(sm_max_buffers, b5)); sendit(msg, len, sp); len = Mmsg(msg, _(" Sizeof: boffset_t=%d size_t=%d debug=%d trace=%d " "bwlimit=%skB/s\n"), sizeof(boffset_t), sizeof(size_t), debug_level, get_trace(), edit_uint64_with_commas(me->max_bandwidth_per_job / 1024, b1)); sendit(msg, len, sp); if (me->secure_erase_cmdline) { len = Mmsg(msg, _(" secure erase command='%s'\n"), me->secure_erase_cmdline); sendit(msg, len, sp); } len = list_fd_plugins(msg); if (len > 0) { sendit(msg, len, sp); } }
static void list_terminated_jobs(STATUS_PKT *sp) { int len; struct s_last_job *je; POOL_MEM msg(PM_MESSAGE); char level[10], dt[MAX_TIME_LENGTH], b1[30], b2[30]; if (!sp->api) { len = pm_strcpy(msg, _("\nTerminated Jobs:\n")); sendit(msg, len, sp); } if (last_jobs->size() == 0) { if (!sp->api) { len = pm_strcpy(msg, _("====\n")); sendit(msg, len, sp); } return; } lock_last_jobs_list(); if (!sp->api) { len = pm_strcpy(msg, _(" JobId Level Files Bytes Status Finished Name \n")); sendit(msg, len, sp); len = pm_strcpy(msg, _("======================================================================\n")); sendit(msg, len, sp); } foreach_dlist(je, last_jobs) { char *p; char JobName[MAX_NAME_LENGTH]; const char *termstat; bstrftime_nc(dt, sizeof(dt), je->end_time); switch (je->JobType) { case JT_ADMIN: case JT_RESTORE: bstrncpy(level, " ", sizeof(level)); break; default: bstrncpy(level, level_to_str(je->JobLevel), sizeof(level)); level[4] = 0; break; } switch (je->JobStatus) { case JS_Created: termstat = _("Created"); break; case JS_FatalError: case JS_ErrorTerminated: termstat = _("Error"); break; case JS_Differences: termstat = _("Diffs"); break; case JS_Canceled: termstat = _("Cancel"); break; case JS_Terminated: termstat = _("OK"); break; default: termstat = _("Other"); break; } bstrncpy(JobName, je->Job, sizeof(JobName)); /* * There are three periods after the Job name */ for (int i=0; i<3; i++) { if ((p=strrchr(JobName, '.')) != NULL) { *p = 0; } } if (sp->api) { len = Mmsg(msg, _("%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n"), je->JobId, level, edit_uint64_with_commas(je->JobFiles, b1), edit_uint64_with_suffix(je->JobBytes, b2), termstat, dt, JobName); } else { len = Mmsg(msg, _("%6d %-6s %8s %10s %-7s %-8s %s\n"), je->JobId, level, edit_uint64_with_commas(je->JobFiles, b1), edit_uint64_with_suffix(je->JobBytes, b2), termstat, dt, JobName); } sendit(msg, len, sp); }
static void list_running_jobs_plain(STATUS_PKT *sp) { JCR *njcr; int len, sec, bps; bool found = false; POOL_MEM msg(PM_MESSAGE); char dt[MAX_TIME_LENGTH], b1[32], b2[32], b3[32], b4[32]; /* * List running jobs */ Dmsg0(1000, "Begin status jcr loop.\n"); len = Mmsg(msg, _("\nRunning Jobs:\n")); sendit(msg, len, sp); foreach_jcr(njcr) { bstrftime_nc(dt, sizeof(dt), njcr->start_time); if (njcr->JobId == 0) { len = Mmsg(msg, _("Director connected at: %s\n"), dt); } else { len = Mmsg(msg, _("JobId %d Job %s is running.\n"), njcr->JobId, njcr->Job); sendit(msg, len, sp); #ifdef WIN32_VSS len = Mmsg(msg, _(" %s%s %s Job started: %s\n"), (njcr->pVSSClient && njcr->pVSSClient->IsInitialized()) ? "VSS " : "", level_to_str(njcr->getJobLevel()), job_type_to_str(njcr->getJobType()), dt); #else len = Mmsg(msg, _(" %s %s Job started: %s\n"), level_to_str(njcr->getJobLevel()), job_type_to_str(njcr->getJobType()), dt); #endif } sendit(msg, len, sp); if (njcr->JobId == 0) { continue; } sec = time(NULL) - njcr->start_time; if (sec <= 0) { sec = 1; } bps = (int)(njcr->JobBytes / sec); len = Mmsg(msg, _(" Files=%s Bytes=%s Bytes/sec=%s Errors=%d\n" " Bwlimit=%s\n"), edit_uint64_with_commas(njcr->JobFiles, b1), edit_uint64_with_commas(njcr->JobBytes, b2), edit_uint64_with_commas(bps, b3), njcr->JobErrors, edit_uint64_with_commas(njcr->max_bandwidth, b4)); sendit(msg, len, sp); len = Mmsg(msg, _(" Files Examined=%s\n"), edit_uint64_with_commas(njcr->num_files_examined, b1)); sendit(msg, len, sp); if (njcr->JobFiles > 0) { njcr->lock(); len = Mmsg(msg, _(" Processing file: %s\n"), njcr->last_fname); njcr->unlock(); sendit(msg, len, sp); } found = true; if (njcr->store_bsock) { len = Mmsg(msg, " SDReadSeqNo=%" lld " fd=%d\n", njcr->store_bsock->read_seqno, njcr->store_bsock->m_fd); sendit(msg, len, sp); } else { len = Mmsg(msg, _(" SDSocket closed.\n")); sendit(msg, len, sp); } } endeach_jcr(njcr); if (!found) { len = Mmsg(msg, _("No Jobs running.\n")); sendit(msg, len, sp); } len = pm_strcpy(msg, _("====\n")); sendit(msg, len, sp); }
static void dump_session_label(DEV_RECORD *rec, const char *type) { int dbl; struct date_time dt; struct tm tm; SESSION_LABEL label; char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30], ec6[30], ec7[30]; unser_session_label(&label, rec); dbl = debug_level; debug_level = 1; Pmsg7(-1, _("\n%s Record:\n" "JobId : %d\n" "VerNum : %d\n" "PoolName : %s\n" "PoolType : %s\n" "JobName : %s\n" "ClientName : %s\n" ""), type, label.JobId, label.VerNum, label.PoolName, label.PoolType, label.JobName, label.ClientName); if (label.VerNum >= 10) { Pmsg4(-1, _( "Job (unique name) : %s\n" "FileSet : %s\n" "JobType : %c\n" "JobLevel : %c\n" ""), label.Job, label.FileSetName, label.JobType, label.JobLevel); } if (rec->FileIndex == EOS_LABEL) { Pmsg8(-1, _( "JobFiles : %s\n" "JobBytes : %s\n" "StartBlock : %s\n" "EndBlock : %s\n" "StartFile : %s\n" "EndFile : %s\n" "JobErrors : %s\n" "JobStatus : %c\n" ""), edit_uint64_with_commas(label.JobFiles, ec1), edit_uint64_with_commas(label.JobBytes, ec2), edit_uint64_with_commas(label.StartBlock, ec3), edit_uint64_with_commas(label.EndBlock, ec4), edit_uint64_with_commas(label.StartFile, ec5), edit_uint64_with_commas(label.EndFile, ec6), edit_uint64_with_commas(label.JobErrors, ec7), label.JobStatus); } if (label.VerNum >= 11) { char dt[50]; bstrftime(dt, sizeof(dt), btime_to_utime(label.write_btime)); Pmsg1(-1, _("Date written : %s\n"), dt); } else { dt.julian_day_number = label.write_date; dt.julian_day_fraction = label.write_time; tm_decode(&dt, &tm); Pmsg5(-1, _("Date written : %04d-%02d-%02d at %02d:%02d\n"), tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min); } debug_level = dbl; }
void dump_label_record(DEVICE *dev, DEV_RECORD *rec, bool verbose) { const char *type; int dbl; if (rec->FileIndex == 0 && rec->VolSessionId == 0 && rec->VolSessionTime == 0) { return; } dbl = debug_level; debug_level = 1; switch (rec->FileIndex) { case PRE_LABEL: type = _("Fresh Volume"); break; case VOL_LABEL: type = _("Volume"); break; case SOS_LABEL: type = _("Begin Job Session"); break; case EOS_LABEL: type = _("End Job Session"); break; case EOM_LABEL: type = _("End of Media"); break; case EOT_LABEL: type = _("End of Tape"); break; default: type = _("Unknown"); break; } if (verbose) { switch (rec->FileIndex) { case PRE_LABEL: case VOL_LABEL: unser_volume_label(dev, rec); dump_volume_label(dev); break; case SOS_LABEL: dump_session_label(rec, type); break; case EOS_LABEL: dump_session_label(rec, type); break; case EOM_LABEL: Pmsg7(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); break; case EOT_LABEL: Pmsg0(-1, _("End of physical tape.\n")); break; default: Pmsg7(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); break; } } else { SESSION_LABEL label; char dt[50]; switch (rec->FileIndex) { case SOS_LABEL: unser_session_label(&label, rec); bstrftimes(dt, sizeof(dt), btime_to_utime(label.write_btime)); Pmsg6(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, label.JobId); Pmsg4(-1, _(" Job=%s Date=%s Level=%c Type=%c\n"), label.Job, dt, label.JobLevel, label.JobType); break; case EOS_LABEL: char ed1[30], ed2[30]; unser_session_label(&label, rec); bstrftimes(dt, sizeof(dt), btime_to_utime(label.write_btime)); Pmsg6(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, label.JobId); Pmsg7(-1, _(" Date=%s Level=%c Type=%c Files=%s Bytes=%s Errors=%d Status=%c\n"), dt, label.JobLevel, label.JobType, edit_uint64_with_commas(label.JobFiles, ed1), edit_uint64_with_commas(label.JobBytes, ed2), label.JobErrors, (char)label.JobStatus); break; case EOM_LABEL: case PRE_LABEL: case VOL_LABEL: default: Pmsg7(-1, _("%s Record: File:blk=%u:%u SessId=%d SessTime=%d JobId=%d DataLen=%d\n"), type, dev->file, dev->block_num, rec->VolSessionId, rec->VolSessionTime, rec->Stream, rec->data_len); break; case EOT_LABEL: break; } } debug_level = dbl; }