bool db_update_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr) { bool retval; char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; db_lock(mdb); mdb->db_escape_string(jcr, esc, pr->LabelFormat, strlen(pr->LabelFormat)); Mmsg(mdb->cmd, "SELECT count(*) from Media WHERE PoolId=%s", edit_int64(pr->PoolId, ed4)); pr->NumVols = get_sql_record_max(jcr, mdb); Dmsg1(400, "NumVols=%d\n", pr->NumVols); Mmsg(mdb->cmd, "UPDATE Pool SET NumVols=%u,MaxVols=%u,UseOnce=%d,UseCatalog=%d," "AcceptAnyVolume=%d,VolRetention='%s',VolUseDuration='%s'," "MaxVolJobs=%u,MaxVolFiles=%u,MaxVolBytes=%s,Recycle=%d," "AutoPrune=%d,LabelType=%d,LabelFormat='%s',RecyclePoolId=%s," "ScratchPoolId=%s,ActionOnPurge=%d WHERE PoolId=%s", pr->NumVols, pr->MaxVols, pr->UseOnce, pr->UseCatalog, pr->AcceptAnyVolume, edit_uint64(pr->VolRetention, ed1), edit_uint64(pr->VolUseDuration, ed2), pr->MaxVolJobs, pr->MaxVolFiles, edit_uint64(pr->MaxVolBytes, ed3), pr->Recycle, pr->AutoPrune, pr->LabelType, esc, edit_int64(pr->RecyclePoolId,ed5), edit_int64(pr->ScratchPoolId,ed6), pr->ActionOnPurge, ed4); retval = UPDATE_DB(jcr, mdb, mdb->cmd); db_unlock(mdb); return retval; }
/* * Update Client record * Returns: false on failure * true on success */ bool db_update_client_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr) { bool retval = false; char ed1[50], ed2[50]; char esc_name[MAX_ESCAPE_NAME_LENGTH]; char esc_uname[MAX_ESCAPE_NAME_LENGTH]; CLIENT_DBR tcr; db_lock(mdb); memcpy(&tcr, cr, sizeof(tcr)); if (!db_create_client_record(jcr, mdb, &tcr)) { goto bail_out; } mdb->db_escape_string(jcr, esc_name, cr->Name, strlen(cr->Name)); mdb->db_escape_string(jcr, esc_uname, cr->Uname, strlen(cr->Uname)); Mmsg(mdb->cmd, "UPDATE Client SET AutoPrune=%d,FileRetention=%s,JobRetention=%s," "Uname='%s' WHERE Name='%s'", cr->AutoPrune, edit_uint64(cr->FileRetention, ed1), edit_uint64(cr->JobRetention, ed2), esc_uname, esc_name); retval = UPDATE_DB(jcr, mdb, mdb->cmd); bail_out: db_unlock(mdb); return retval; }
static int send_volume_info_to_storage_daemon(JCR *jcr, BSOCK *sd, MEDIA_DBR *mr) { int status; char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50], ed6[50]; jcr->MediaId = mr->MediaId; pm_strcpy(jcr->VolumeName, mr->VolumeName); bash_spaces(mr->VolumeName); status = sd->fsend(OK_media, mr->VolumeName, mr->VolJobs, mr->VolFiles, mr->VolBlocks, edit_uint64(mr->VolBytes, ed1), mr->VolMounts, mr->VolErrors, mr->VolWrites, edit_uint64(mr->MaxVolBytes, ed2), edit_uint64(mr->VolCapacityBytes, ed3), mr->VolStatus, mr->Slot, mr->MaxVolJobs, mr->MaxVolFiles, mr->InChanger, edit_int64(mr->VolReadTime, ed4), edit_int64(mr->VolWriteTime, ed5), mr->EndFile, mr->EndBlock, mr->LabelType, edit_uint64(mr->MediaId, ed6), mr->EncrKey, mr->MinBlocksize, mr->MaxBlocksize); unbash_spaces(mr->VolumeName); Dmsg2(100, "Vol Info for %s: %s", jcr->Job, sd->msg); return status; }
/* * Write bsr data for a single bsr record */ static uint32_t write_bsr_item(RBSR *bsr, UAContext *ua, RESTORE_CTX &rx, FILE *fd, bool &first, uint32_t &LastIndex) { char ed1[50], ed2[50]; uint32_t count = 0; uint32_t total_count = 0; char device[MAX_NAME_LENGTH]; /* * For a given volume, loop over all the JobMedia records. * VolCount is the number of JobMedia records. */ for (int i=0; i < bsr->VolCount; i++) { if (!is_volume_selected(bsr->fi, bsr->VolParams[i].FirstIndex, bsr->VolParams[i].LastIndex)) { bsr->VolParams[i].VolumeName[0] = 0; /* zap VolumeName */ continue; } if (!rx.store) { find_storage_resource(ua, rx, bsr->VolParams[i].Storage, bsr->VolParams[i].MediaType); } fprintf(fd, "Storage=\"%s\"\n", bsr->VolParams[i].Storage); fprintf(fd, "Volume=\"%s\"\n", bsr->VolParams[i].VolumeName); fprintf(fd, "MediaType=\"%s\"\n", bsr->VolParams[i].MediaType); if (bsr->fileregex) { fprintf(fd, "FileRegex=%s\n", bsr->fileregex); } if (get_storage_device(device, bsr->VolParams[i].Storage)) { fprintf(fd, "Device=\"%s\"\n", device); } if (bsr->VolParams[i].Slot > 0) { fprintf(fd, "Slot=%d\n", bsr->VolParams[i].Slot); } fprintf(fd, "VolSessionId=%u\n", bsr->VolSessionId); fprintf(fd, "VolSessionTime=%u\n", bsr->VolSessionTime); fprintf(fd, "VolAddr=%s-%s\n", edit_uint64(bsr->VolParams[i].StartAddr, ed1), edit_uint64(bsr->VolParams[i].EndAddr, ed2)); // Dmsg2(100, "bsr VolParam FI=%u LI=%u\n", // bsr->VolParams[i].FirstIndex, bsr->VolParams[i].LastIndex); count = write_findex(bsr->fi, bsr->VolParams[i].FirstIndex, bsr->VolParams[i].LastIndex, fd); if (count) { fprintf(fd, "Count=%u\n", count); } total_count += count; /* If the same file is present on two tapes or in two files * on a tape, it is a continuation, and should not be treated * twice in the totals. */ if (!first && LastIndex == bsr->VolParams[i].FirstIndex) { total_count--; } first = false; LastIndex = bsr->VolParams[i].LastIndex; } return total_count; }
/** * After writing a Volume, create the JobMedia record. */ bool SD_DCR::dir_create_jobmedia_record(bool zero) { BSOCK *dir = jcr->dir_bsock; char ed1[50]; /* * If system job, do not update catalog */ if (jcr->is_JobType(JT_SYSTEM)) { return true; } /* * Throw out records where FI is zero -- i.e. nothing done */ if (!zero && VolFirstIndex == 0 && (StartBlock != 0 || EndBlock != 0)) { Dmsg0(dbglvl, "JobMedia FI=0 StartBlock!=0 record suppressed\n"); return true; } if (!WroteVol) { return true; /* nothing written to tape */ } WroteVol = false; if (zero) { /* * Send dummy place holder to avoid purging */ dir->fsend(Create_job_media, jcr->Job, 0 , 0, 0, 0, 0, 0, 0, 0, edit_uint64(VolMediaId, ed1)); } else { dir->fsend(Create_job_media, jcr->Job, VolFirstIndex, VolLastIndex, StartFile, EndFile, StartBlock, EndBlock, Copy, Stripe, edit_uint64(VolMediaId, ed1)); } Dmsg1(dbglvl, ">dird %s", dir->msg); if (dir->recv() <= 0) { Dmsg0(dbglvl, "create_jobmedia error bnet_recv\n"); Jmsg(jcr, M_FATAL, 0, _("Error creating JobMedia record: ERR=%s\n"), dir->bstrerror()); return false; } Dmsg1(dbglvl, "<dird %s", dir->msg); if (!bstrcmp(dir->msg, OK_create)) { Dmsg1(dbglvl, "Bad response from Dir: %s\n", dir->msg); Jmsg(jcr, M_FATAL, 0, _("Error creating JobMedia record: %s\n"), dir->msg); return false; } return true; }
/* * Update the Job record at end of Job * * Returns: 0 on failure * 1 on success */ int db_update_job_end_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr) { char dt[MAX_TIME_LENGTH]; char rdt[MAX_TIME_LENGTH]; time_t ttime; struct tm tm; int stat; char ed1[30], ed2[30], ed3[50], ed4[50]; btime_t JobTDate; char PriorJobId[50]; if (jr->PriorJobId) { bstrncpy(PriorJobId, edit_int64(jr->PriorJobId, ed1), sizeof(PriorJobId)); } else { bstrncpy(PriorJobId, "0", sizeof(PriorJobId)); } ttime = jr->EndTime; (void)localtime_r(&ttime, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); if (jr->RealEndTime == 0) { jr->RealEndTime = jr->EndTime; } ttime = jr->RealEndTime; (void)localtime_r(&ttime, &tm); strftime(rdt, sizeof(rdt), "%Y-%m-%d %H:%M:%S", &tm); JobTDate = ttime; db_lock(mdb); Mmsg(mdb->cmd, "UPDATE Job SET JobStatus='%c',EndTime='%s'," "ClientId=%u,JobBytes=%s,ReadBytes=%s,JobFiles=%u,JobErrors=%u,VolSessionId=%u," "VolSessionTime=%u,PoolId=%u,FileSetId=%u,JobTDate=%s," "RealEndTime='%s',PriorJobId=%s,HasBase=%u,PurgedFiles=%u WHERE JobId=%s", (char)(jr->JobStatus), dt, jr->ClientId, edit_uint64(jr->JobBytes, ed1), edit_uint64(jr->ReadBytes, ed4), jr->JobFiles, jr->JobErrors, jr->VolSessionId, jr->VolSessionTime, jr->PoolId, jr->FileSetId, edit_uint64(JobTDate, ed2), rdt, PriorJobId, jr->HasBase, jr->PurgedFiles, edit_int64(jr->JobId, ed3)); stat = UPDATE_DB(jcr, mdb, mdb->cmd); db_unlock(mdb); return stat; }
/* * Utility routine for updates. * Returns: false on failure * true on success */ bool UpdateDB(const char *file, int line, JCR *jcr, B_DB *mdb, char *cmd, int nr_afr) { int num_rows; if (!sql_query(mdb, cmd)) { m_msg(file, line, &mdb->errmsg, _("update %s failed:\n%s\n"), cmd, sql_strerror(mdb)); j_msg(file, line, jcr, M_ERROR, 0, "%s", mdb->errmsg); if (verbose) { j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return false; } if (nr_afr > 0) { num_rows = sql_affected_rows(mdb); if (num_rows < nr_afr) { char ed1[30]; m_msg(file, line, &mdb->errmsg, _("Update failed: affected_rows=%s for %s\n"), edit_uint64(num_rows, ed1), cmd); if (verbose) { // j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return false; } } mdb->changes++; return true; }
/* * Wait for any device to be released, then we return, so * higher level code can rescan possible devices. Since there * could be a job waiting for a drive to free up, we wait a maximum * of 1 minute then retry just in case a broadcast was lost, and * we return to rescan the devices. * * Returns: true if a device has changed state * false if the total wait time has expired. */ bool wait_for_device(JCR *jcr, int &retries) { struct timeval tv; struct timezone tz; struct timespec timeout; int status = 0; bool ok = true; const int max_wait_time = 1 * 60; /* wait 1 minute */ char ed1[50]; Dmsg0(dbglvl, "Enter wait_for_device\n"); P(device_release_mutex); if (++retries % 5 == 0) { /* Print message every 5 minutes */ Jmsg(jcr, M_MOUNT, 0, _("JobId=%s, Job %s waiting to reserve a device.\n"), edit_uint64(jcr->JobId, ed1), jcr->Job); } gettimeofday(&tv, &tz); timeout.tv_nsec = tv.tv_usec * 1000; timeout.tv_sec = tv.tv_sec + max_wait_time; Dmsg0(dbglvl, "Going to wait for a device.\n"); /* Wait required time */ status = pthread_cond_timedwait(&wait_device_release, &device_release_mutex, &timeout); Dmsg1(dbglvl, "Wokeup from sleep on device status=%d\n", status); V(device_release_mutex); Dmsg1(dbglvl, "Return from wait_device ok=%d\n", ok); return ok; }
/* * Update the Job record at start of Job * * Returns: false on failure * true on success */ bool db_update_job_start_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr) { char dt[MAX_TIME_LENGTH]; time_t stime; struct tm tm; btime_t JobTDate; bool retval; char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50]; stime = jr->StartTime; (void)localtime_r(&stime, &tm); strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); JobTDate = (btime_t)stime; db_lock(mdb); Mmsg(mdb->cmd, "UPDATE Job SET JobStatus='%c',Level='%c',StartTime='%s'," "ClientId=%s,JobTDate=%s,PoolId=%s,FileSetId=%s WHERE JobId=%s", (char)(jcr->JobStatus), (char)(jr->JobLevel), dt, edit_int64(jr->ClientId, ed1), edit_uint64(JobTDate, ed2), edit_int64(jr->PoolId, ed3), edit_int64(jr->FileSetId, ed4), edit_int64(jr->JobId, ed5)); retval = UPDATE_DB(jcr, mdb, mdb->cmd); mdb->changes = 0; db_unlock(mdb); return retval; }
/* * Utility routine to do inserts * Returns: false on failure * true on success */ bool InsertDB(const char *file, int line, JCR *jcr, B_DB *mdb, char *cmd) { int num_rows; if (!sql_query(mdb, cmd)) { m_msg(file, line, &mdb->errmsg, _("insert %s failed:\n%s\n"), cmd, sql_strerror(mdb)); j_msg(file, line, jcr, M_FATAL, 0, "%s", mdb->errmsg); if (verbose) { j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return false; } num_rows = sql_affected_rows(mdb); if (num_rows != 1) { char ed1[30]; m_msg(file, line, &mdb->errmsg, _("Insertion problem: affected_rows=%s\n"), edit_uint64(num_rows, ed1)); if (verbose) { j_msg(file, line, jcr, M_INFO, 0, "%s\n", cmd); } return false; } mdb->changes++; return true; }
/* * Run a File daemon Job -- File daemon already authorized * Director sends us this command. * * Basic task here is: * - Read a command from the File daemon * - Execute it * */ void run_job(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; char ec1[30]; dir->set_jcr(jcr); Dmsg1(120, "Start run Job=%s\n", jcr->Job); dir->fsend(Job_start, jcr->Job); jcr->start_time = time(NULL); jcr->run_time = jcr->start_time; jcr->sendJobStatus(JS_Running); do_fd_commands(jcr); jcr->end_time = time(NULL); dequeue_messages(jcr); /* send any queued messages */ jcr->setJobStatus(JS_Terminated); generate_plugin_event(jcr, bsdEventJobEnd); dir->fsend(Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, edit_uint64(jcr->JobBytes, ec1), jcr->JobErrors); dir->signal(BNET_EOD); /* send EOD to Director daemon */ free_plugins(jcr); /* release instantiated plugins */ }
/* * Update Quota Softlimit * * Returns: false on failure * true on success */ bool db_update_quota_softlimit(JCR *jcr, B_DB *mdb, JOB_DBR *jr) { bool retval; char ed1[50], ed2[50]; db_lock(mdb); Mmsg(mdb->cmd, "UPDATE Quota SET QuotaLimit=%s WHERE ClientId='%s'", edit_uint64((jr->JobSumTotalBytes + jr->JobBytes), ed1), edit_uint64(jr->ClientId, ed2)); retval = UPDATE_DB(jcr, mdb, mdb->cmd); db_unlock(mdb); return retval; }
/* * Dump contents of resource */ void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fmt, ...), void *sock) { URES *res = (URES *)reshdr; bool recurse = true; char ed1[100], ed2[100]; if (res == NULL) { sendit(sock, _("No %s resource defined\n"), res_to_str(type)); return; } if (type < 0) { /* no recursion */ type = - type; recurse = false; } switch (type) { case R_MONITOR: sendit(sock, _("Monitor: name=%s FDtimeout=%s SDtimeout=%s\n"), reshdr->name, edit_uint64(res->res_monitor.FDConnectTimeout, ed1), edit_uint64(res->res_monitor.SDConnectTimeout, ed2)); break; case R_DIRECTOR: sendit(sock, _("Director: name=%s address=%s FDport=%d\n"), res->res_dir.hdr.name, res->res_dir.address, res->res_dir.DIRport); break; case R_CLIENT: sendit(sock, _("Client: name=%s address=%s FDport=%d\n"), res->res_client.hdr.name, res->res_client.address, res->res_client.FDport); break; case R_STORAGE: sendit(sock, _("Storage: name=%s address=%s SDport=%d\n"), res->res_store.hdr.name, res->res_store.address, res->res_store.SDport); break; case R_CONSOLE_FONT: sendit(sock, _("ConsoleFont: name=%s font face=%s\n"), reshdr->name, NPRT(res->con_font.fontface)); break; default: sendit(sock, _("Unknown resource type %d in dump_resource.\n"), type); break; } if (recurse && res->res_monitor.hdr.next) { dump_resource(type, res->res_monitor.hdr.next, sendit, sock); } }
static void update_volmaxbytes(UAContext *ua, char *val, MEDIA_DBR *mr) { uint64_t maxbytes; char ed1[50], ed2[50]; POOL_MEM query(PM_MESSAGE); if (!size_to_uint64(val, strlen(val), &maxbytes)) { ua->error_msg(_("Invalid max. bytes specification: %s\n"), val); return; } Mmsg(query, "UPDATE Media SET MaxVolBytes=%s WHERE MediaId=%s", edit_uint64(maxbytes, ed1), edit_int64(mr->MediaId, ed2)); if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { ua->error_msg("%s", db_strerror(ua->db)); } else { ua->info_msg(_("New Max bytes is: %s\n"), edit_uint64(maxbytes, ed1)); } }
/* * Update Quota record * * Returns: false on failure * true on success */ bool db_update_quota_gracetime(JCR *jcr, B_DB *mdb, JOB_DBR *jr) { bool retval; char ed1[50], ed2[50]; time_t now = time(NULL); db_lock(mdb); Mmsg(mdb->cmd, "UPDATE Quota SET GraceTime=%s WHERE ClientId='%s'", edit_uint64(now, ed1), edit_uint64(jr->ClientId, ed2)); retval = UPDATE_DB(jcr, mdb, mdb->cmd); db_unlock(mdb); return retval; }
/** * Update NDMP level mapping * * Returns: false on failure * true on success */ bool db_update_ndmp_level_mapping(JCR *jcr, B_DB *mdb, JOB_DBR *jr, char *filesystem, int level) { bool retval; char ed1[50], ed2[50], ed3[50]; db_lock(mdb); mdb->esc_name = check_pool_memory_size(mdb->esc_name, strlen(filesystem) * 2 + 1); db_escape_string(jcr, mdb, mdb->esc_name, filesystem, strlen(filesystem)); Mmsg(mdb->cmd, "UPDATE NDMPLevelMap SET DumpLevel='%s' WHERE " "ClientId='%s' AND FileSetId='%s' AND FileSystem='%s'", edit_uint64(level, ed1), edit_uint64(jr->ClientId, ed2), edit_uint64(jr->FileSetId, ed3), mdb->esc_name); retval = UPDATE_DB(jcr, mdb, mdb->cmd); db_unlock(mdb); return retval; }
/* * Update Long term statistics with all jobs that were run before * age seconds */ int db_update_stats(JCR *jcr, B_DB *mdb, utime_t age) { char ed1[30]; int rows; utime_t now = (utime_t)time(NULL); edit_uint64(now - age, ed1); db_lock(mdb); Mmsg(mdb->cmd, fill_jobhisto, ed1); QUERY_DB(jcr, mdb, mdb->cmd); /* TODO: get a message ? */ rows = sql_affected_rows(mdb); db_unlock(mdb); return rows; }
/* * Reset Quota Gracetime * * Returns: false on failure * true on success */ bool db_reset_quota_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr) { bool retval; char ed1[50]; db_lock(mdb); Mmsg(mdb->cmd, "UPDATE Quota SET GraceTime='0', QuotaLimit='0' WHERE ClientId='%s'", edit_uint64(cr->ClientId, ed1)); retval = UPDATE_DB(jcr, mdb, mdb->cmd); db_unlock(mdb); return retval; }
bool finish_cmd(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; char ec1[30]; /* * See if the Job has a certain protocol. Some protocols allow the * finish cmd some do not (Native backup for example does NOT) */ switch (jcr->getJobProtocol()) { case PT_NDMP: Dmsg1(200, "Finish_cmd: %s", jcr->dir_bsock->msg); jcr->end_time = time(NULL); dequeue_messages(jcr); /* send any queued messages */ jcr->setJobStatus(JS_Terminated); switch (jcr->getJobType()) { case JT_BACKUP: end_of_ndmp_backup(jcr); break; case JT_RESTORE: end_of_ndmp_restore(jcr); break; default: break; } generate_plugin_event(jcr, bsdEventJobEnd); dir->fsend(Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, edit_uint64(jcr->JobBytes, ec1), jcr->JobErrors); dir->signal(BNET_EOD); /* send EOD to Director daemon */ free_plugins(jcr); /* release instantiated plugins */ Dmsg2(800, "Done jid=%d %p\n", jcr->JobId, jcr); return false; /* Continue DIR session ? */ default: Dmsg1(200, "Finish_cmd: %s", jcr->dir_bsock->msg); Jmsg2(jcr, M_FATAL, 0, _("Hey!!!! JobId %u Job %s tries to use finish cmd while not part of protocol.\n"), (uint32_t)jcr->JobId, jcr->Job); return false; /* Continue DIR session ? */ } }
static void update_volretention(UAContext *ua, char *val, MEDIA_DBR *mr) { char ed1[150], ed2[50]; POOL_MEM query(PM_MESSAGE); if (!duration_to_utime(val, &mr->VolRetention)) { ua->error_msg(_("Invalid retention period specified: %s\n"), val); return; } Mmsg(query, "UPDATE Media SET VolRetention=%s WHERE MediaId=%s", edit_uint64(mr->VolRetention, ed1), edit_int64(mr->MediaId,ed2)); if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { ua->error_msg("%s", db_strerror(ua->db)); } else { ua->info_msg(_("New retention period is: %s\n"), edit_utime(mr->VolRetention, ed1, sizeof(ed1))); } }
/* * Cancel a job -- typically called by the UA (Console program), but may also * be called by the job watchdog. * * Returns: true if cancel appears to be successful * false on failure. Message sent to ua->jcr. */ bool cancel_job(UAContext *ua, JCR *jcr) { char ed1[50]; int32_t old_status = jcr->JobStatus; jcr->setJobStatus(JS_Canceled); switch (old_status) { case JS_Created: case JS_WaitJobRes: case JS_WaitClientRes: case JS_WaitStoreRes: case JS_WaitPriority: case JS_WaitMaxJobs: case JS_WaitStartTime: ua->info_msg(_("JobId %s, Job %s marked to be canceled.\n"), edit_uint64(jcr->JobId, ed1), jcr->Job); jobq_remove(&job_queue, jcr); /* attempt to remove it from queue */ break; default: /* * Cancel File daemon */ if (jcr->file_bsock) { if (!cancel_file_daemon_job(ua, jcr)) { return false; } } /* * Cancel Storage daemon */ if (jcr->store_bsock) { if (!cancel_storage_daemon_job(ua, jcr)) { return false; } } break; } run_scripts(jcr, jcr->res.job->RunScripts, "AfterJob"); return true; }
/* * Check if the current position on the volume corresponds to * what is in the catalog. */ bool DCR::is_eod_valid() { if (dev->is_dvd()) { char ed1[50], ed2[50]; if (dev->VolCatInfo.VolCatBytes == dev->part_start + dev->part_size) { Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volume \"%s\"" " part=%d size=%s\n"), VolumeName, dev->part, edit_uint64(dev->VolCatInfo.VolCatBytes,ed1)); } else { Jmsg(jcr, M_ERROR, 0, _("Bacula cannot write on DVD Volume \"%s\" because: " "The sizes do not match! Volume=%s Catalog=%s\n"), VolumeName, edit_uint64(dev->part_start + dev->part_size, ed1), edit_uint64(dev->VolCatInfo.VolCatBytes, ed2)); mark_volume_in_error(); return false; } } else if (dev->is_tape()) { /* * Check if we are positioned on the tape at the same place * that the database says we should be. */ if (dev->VolCatInfo.VolCatFiles == dev->get_file()) { Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volume \"%s\" at file=%d.\n"), VolumeName, dev->get_file()); } else { Jmsg(jcr, M_ERROR, 0, _("Bacula cannot write on tape Volume \"%s\" because:\n" "The number of files mismatch! Volume=%u Catalog=%u\n"), VolumeName, dev->get_file(), dev->VolCatInfo.VolCatFiles); mark_volume_in_error(); return false; } } else if (dev->is_file()) { char ed1[50], ed2[50]; boffset_t pos; pos = dev->lseek(this, (boffset_t)0, SEEK_END); if (dev->VolCatInfo.VolCatBytes == (uint64_t)pos) { Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volume \"%s\"" " size=%s\n"), VolumeName, edit_uint64(dev->VolCatInfo.VolCatBytes, ed1)); } else { Jmsg(jcr, M_ERROR, 0, _("Bacula cannot write on disk Volume \"%s\" because: " "The sizes do not match! Volume=%s Catalog=%s\n"), VolumeName, edit_uint64(pos, ed1), edit_uint64(dev->VolCatInfo.VolCatBytes, ed2)); mark_volume_in_error(); return false; } } return true; }
/* * Update the Media Record Default values from Pool * * Returns: false on failure * true on success */ bool db_update_media_defaults(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr) { bool retval; char ed1[50], ed2[50], ed3[50], ed4[50], ed5[50]; char esc[MAX_ESCAPE_NAME_LENGTH]; db_lock(mdb); if (mr->VolumeName[0]) { mdb->db_escape_string(jcr, esc, mr->VolumeName, strlen(mr->VolumeName)); Mmsg(mdb->cmd, "UPDATE Media SET " "ActionOnPurge=%d,Recycle=%d,VolRetention=%s,VolUseDuration=%s," "MaxVolJobs=%u,MaxVolFiles=%u,MaxVolBytes=%s,RecyclePoolId=%s," "MinBlocksize=%d,MaxBlocksize=%d" " WHERE VolumeName='%s'", mr->ActionOnPurge, mr->Recycle,edit_uint64(mr->VolRetention, ed1), edit_uint64(mr->VolUseDuration, ed2), mr->MaxVolJobs, mr->MaxVolFiles, edit_uint64(mr->MaxVolBytes, ed3), edit_uint64(mr->RecyclePoolId, ed4), mr->MinBlocksize, mr->MaxBlocksize, esc); } else { Mmsg(mdb->cmd, "UPDATE Media SET " "ActionOnPurge=%d,Recycle=%d,VolRetention=%s,VolUseDuration=%s," "MaxVolJobs=%u,MaxVolFiles=%u,MaxVolBytes=%s,RecyclePoolId=%s," "MinBlocksize=%d,MaxBlocksize=%d" " WHERE PoolId=%s", mr->ActionOnPurge, mr->Recycle,edit_uint64(mr->VolRetention, ed1), edit_uint64(mr->VolUseDuration, ed2), mr->MaxVolJobs, mr->MaxVolFiles, edit_uint64(mr->MaxVolBytes, ed3), edit_int64(mr->RecyclePoolId, ed4), mr->MinBlocksize, mr->MaxBlocksize, edit_int64(mr->PoolId, ed5)); } Dmsg1(400, "%s\n", mdb->cmd); retval = UPDATE_DB(jcr, mdb, mdb->cmd); db_unlock(mdb); return retval; }
static void print_ls_output(char *fname, char *link, int type, struct stat *statp) { char buf[2000]; char ec1[30]; char *p, *f; int n; if (type == FT_LNK) { statp->st_mtime = 0; statp->st_mode |= 0777; } p = encode_mode(statp->st_mode, buf); n = sprintf(p, " %2d ", (uint32_t)statp->st_nlink); p += n; n = sprintf(p, "%-4d %-4d", (int)statp->st_uid, (int)statp->st_gid); p += n; n = sprintf(p, "%10.10s ", edit_uint64(statp->st_size, ec1)); p += n; if (S_ISCHR(statp->st_mode) || S_ISBLK(statp->st_mode)) { n = sprintf(p, "%4x ", (int)statp->st_rdev); } else { n = sprintf(p, " "); } p += n; p = encode_time(statp->st_mtime, p); *p++ = ' '; /* Copy file name */ for (f=fname; *f && (p-buf) < (int)sizeof(buf); ) *p++ = *f++; if (type == FT_LNK) { *p++ = '-'; *p++ = '>'; *p++ = ' '; /* Copy link name */ for (f=link; *f && (p-buf) < (int)sizeof(buf); ) *p++ = *f++; } *p++ = '\n'; *p = 0; fputs(buf, stdout); }
/* * Run a File daemon Job -- File daemon already authorized * Director sends us this command. * * Basic task here is: * - Read a command from the File daemon * - Execute it * */ void run_job(JCR *jcr) { BSOCK *dir = jcr->dir_bsock; char ec1[30]; dir->set_jcr(jcr); Dmsg1(120, "Start run Job=%s\n", jcr->Job); dir->fsend(Job_start, jcr->Job); jcr->start_time = time(NULL); jcr->run_time = jcr->start_time; set_jcr_job_status(jcr, JS_Running); dir_send_job_status(jcr); /* update director */ do_fd_commands(jcr); jcr->end_time = time(NULL); dequeue_messages(jcr); /* send any queued messages */ set_jcr_job_status(jcr, JS_Terminated); generate_daemon_event(jcr, "JobEnd"); dir->fsend(Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, edit_uint64(jcr->JobBytes, ec1), jcr->JobErrors); dir->signal(BNET_EOD); /* send EOD to Director daemon */ return; }
/* * Wait for a specific device to be released * We wait a maximum of 1 minute then * retry just in case a broadcast was lost. * * Returns: true if the device has changed state * false if the total wait time has expired. */ bool wait_for_device(DCR *dcr, int &retries) { struct timeval tv; struct timezone tz; struct timespec timeout; JCR *jcr = dcr->jcr; DEVICE *dev = dcr->dev; int stat = 0; bool ok = true; const int max_wait_time = 1 * 60; /* wait 1 minute */ char ed1[50]; Dmsg3(40, "Enter wait_for_device. busy=%d dcrvol=%s devvol=%s\n", dev->is_busy(), dcr->VolumeName, dev->getVolCatName()); P(device_release_mutex); if (++retries % 5 == 0) { /* Print message every 5 minutes */ Jmsg(jcr, M_MOUNT, 0, _("JobId=%s, Job %s waiting device %s.\n"), edit_uint64(jcr->JobId, ed1), jcr->Job, dcr->dev->print_name()); } gettimeofday(&tv, &tz); timeout.tv_nsec = tv.tv_usec * 1000; timeout.tv_sec = tv.tv_sec + max_wait_time; Dmsg0(dbglvl, "Going to wait for a device.\n"); /* Wait required time */ stat = pthread_cond_timedwait(&wait_device_release, &device_release_mutex, &timeout); Dmsg1(dbglvl, "Wokeup from sleep on device stat=%d\n", stat); V(device_release_mutex); Dmsg1(dbglvl, "Return from wait_device ok=%d\n", ok); return ok; }
/* * Release resources allocated during backup. */ void native_vbackup_cleanup(JCR *jcr, int TermCode) { char ec1[30], ec2[30]; char term_code[100]; const char *term_msg; int msg_type = M_INFO; CLIENT_DBR cr; POOL_MEM query(PM_MESSAGE); Dmsg2(100, "Enter backup_cleanup %d %c\n", TermCode, TermCode); memset(&cr, 0, sizeof(cr)); switch (jcr->JobStatus) { case JS_Terminated: case JS_Warnings: jcr->jr.JobLevel = L_FULL; /* we want this to appear as a Full backup */ break; default: break; } jcr->JobFiles = jcr->SDJobFiles; jcr->JobBytes = jcr->SDJobBytes; if (jcr->getJobStatus() == JS_Terminated && (jcr->JobErrors || jcr->SDErrors)) { TermCode = JS_Warnings; } update_job_end(jcr, TermCode); /* * Update final items to set them to the previous job's values */ Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s'," "JobTDate=%s WHERE JobId=%s", jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime, edit_uint64(jcr->previous_jr.JobTDate, ec1), edit_uint64(jcr->JobId, ec2)); db_sql_query(jcr->db, query.c_str()); /* * Get the fully updated job record */ if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"), db_strerror(jcr->db)); jcr->setJobStatus(JS_ErrorTerminated); } bstrncpy(cr.Name, jcr->res.client->name(), sizeof(cr.Name)); if (!db_get_client_record(jcr, jcr->db, &cr)) { Jmsg(jcr, M_WARNING, 0, _("Error getting Client record for Job report: ERR=%s"), db_strerror(jcr->db)); } update_bootstrap_file(jcr); switch (jcr->JobStatus) { case JS_Terminated: term_msg = _("Backup OK"); break; case JS_Warnings: term_msg = _("Backup OK -- with warnings"); break; case JS_FatalError: case JS_ErrorTerminated: term_msg = _("*** Backup Error ***"); msg_type = M_ERROR; /* Generate error message */ if (jcr->store_bsock) { jcr->store_bsock->signal(BNET_TERMINATE); if (jcr->SD_msg_chan_started) { pthread_cancel(jcr->SD_msg_chan); } } break; case JS_Canceled: term_msg = _("Backup Canceled"); if (jcr->store_bsock) { jcr->store_bsock->signal(BNET_TERMINATE); if (jcr->SD_msg_chan_started) { pthread_cancel(jcr->SD_msg_chan); } } break; default: term_msg = term_code; sprintf(term_code, _("Inappropriate term code: %c\n"), jcr->JobStatus); break; } generate_backup_summary(jcr, &cr, msg_type, term_msg); Dmsg0(100, "Leave vbackup_cleanup()\n"); }
/* * Do a virtual backup, which consolidates all previous backups into * a sort of synthetic Full. * * Returns: false on failure * true on success */ bool do_native_vbackup(JCR *jcr) { char ed1[100]; BSOCK *sd; char *p; db_list_ctx jobids; if (!jcr->res.rstorage) { Jmsg(jcr, M_FATAL, 0, _("No storage for reading given.\n")); return false; } if (!jcr->res.wstorage) { Jmsg(jcr, M_FATAL, 0, _("No storage for writing given.\n")); return false; } Dmsg2(100, "rstorage=%p wstorage=%p\n", jcr->res.rstorage, jcr->res.wstorage); Dmsg2(100, "Read store=%s, write store=%s\n", ((STORERES *)jcr->res.rstorage->first())->name(), ((STORERES *)jcr->res.wstorage->first())->name()); /* * Print Job Start message */ Jmsg(jcr, M_INFO, 0, _("Start Virtual Backup JobId %s, Job=%s\n"), edit_uint64(jcr->JobId, ed1), jcr->Job); if (!jcr->accurate) { Jmsg(jcr, M_WARNING, 0, _("This Job is not an Accurate backup so is not equivalent to a Full backup.\n")); } db_accurate_get_jobids(jcr, jcr->db, &jcr->jr, &jobids); Dmsg1(10, "Accurate jobids=%s\n", jobids.list); if (jobids.count == 0) { Jmsg(jcr, M_FATAL, 0, _("No previous Jobs found.\n")); return false; } /* * Now we find the last job that ran and store it's info in * the previous_jr record. We will set our times to the * values from that job so that anything changed after that * time will be picked up on the next backup. */ p = strrchr(jobids.list, ','); /* find last jobid */ if (p != NULL) { p++; } else { p = jobids.list; } memset(&jcr->previous_jr, 0, sizeof(jcr->previous_jr)); jcr->previous_jr.JobId = str_to_int64(p); Dmsg1(10, "Previous JobId=%s\n", p); if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) { Jmsg(jcr, M_FATAL, 0, _("Error getting Job record for previous Job: ERR=%s"), db_strerror(jcr->db)); return false; } if (!create_bootstrap_file(jcr, jobids.list)) { Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n")); return false; } /* * Open a message channel connection with the Storage * daemon. This is to let him know that our client * will be contacting him for a backup session. * */ Dmsg0(110, "Open connection with storage daemon\n"); jcr->setJobStatus(JS_WaitSD); /* * Start conversation with Storage daemon */ if (!connect_to_storage_daemon(jcr, 10, me->SDConnectTimeout, true)) { return false; } sd = jcr->store_bsock; /* * Now start a job with the Storage daemon */ if (!start_storage_daemon_job(jcr, jcr->res.rstorage, jcr->res.wstorage, /* send_bsr */ true)) { return false; } Dmsg0(100, "Storage daemon connection OK\n"); /* * We re-update the job start record so that the start * time is set after the run before job. This avoids * that any files created by the run before job will * be saved twice. They will be backed up in the current * job, but not in the next one unless they are changed. * Without this, they will be backed up in this job and * in the next job run because in that case, their date * is after the start of this run. */ jcr->start_time = time(NULL); jcr->jr.StartTime = jcr->start_time; jcr->jr.JobTDate = jcr->start_time; jcr->setJobStatus(JS_Running); /* * Update job start record */ if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); return false; } /* * Declare the job started to start the MaxRunTime check */ jcr->setJobStarted(); /* * Start the job prior to starting the message thread below * to avoid two threads from using the BSOCK structure at * the same time. */ if (!sd->fsend("run")) { return false; } /* * Now start a Storage daemon message thread */ if (!start_storage_daemon_message_thread(jcr)) { return false; } jcr->setJobStatus(JS_Running); /* * Pickup Job termination data * Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */ wait_for_storage_daemon_termination(jcr); jcr->setJobStatus(jcr->SDJobStatus); db_write_batch_file_records(jcr); /* used by bulk batch file insert */ if (!jcr->is_JobStatus(JS_Terminated)) { return false; } native_vbackup_cleanup(jcr, jcr->JobStatus); return true; }
static bool decompress_with_fastlz(JCR *jcr, const char *last_fname, char **data, uint32_t *length, uint32_t comp_magic, bool sparse, bool want_data_stream) { int zstat; zfast_stream stream; zfast_stream_compressor compressor = COMPRESSOR_FASTLZ; char ec1[50]; /* Buffer printing huge values */ switch (comp_magic) { case COMPRESS_FZ4L: case COMPRESS_FZ4H: compressor = COMPRESSOR_LZ4; break; } /* * NOTE! We only use uInt and Bytef because they are * needed by the fastlz routines, they should not otherwise * be used in Bareos. */ memset(&stream, 0, sizeof(stream)); stream.next_in = (Bytef *)*data + sizeof(comp_stream_header); stream.avail_in = (uInt)*length - sizeof(comp_stream_header); if (sparse && want_data_stream) { stream.next_out = (Bytef *)jcr->compress.inflate_buffer + OFFSET_FADDR_SIZE; stream.avail_out = (uInt)jcr->compress.inflate_buffer_size - OFFSET_FADDR_SIZE; } else { stream.next_out = (Bytef *)jcr->compress.inflate_buffer; stream.avail_out = (uInt)jcr->compress.inflate_buffer_size; } Dmsg2(400, "Comp_len=%d msglen=%d\n", stream.avail_in, *length); if ((zstat = fastlzlibDecompressInit(&stream)) != Z_OK) { goto cleanup; } if ((zstat = fastlzlibSetCompressor(&stream, compressor)) != Z_OK) { goto cleanup; } while (1) { zstat = fastlzlibDecompress(&stream); switch (zstat) { case Z_BUF_ERROR: /* * The buffer size is too small, try with a bigger one */ jcr->compress.inflate_buffer_size = jcr->compress.inflate_buffer_size + (jcr->compress.inflate_buffer_size >> 1); jcr->compress.inflate_buffer = check_pool_memory_size(jcr->compress.inflate_buffer, jcr->compress.inflate_buffer_size); if (sparse && want_data_stream) { stream.next_out = (Bytef *)jcr->compress.inflate_buffer + OFFSET_FADDR_SIZE; stream.avail_out = (uInt)jcr->compress.inflate_buffer_size - OFFSET_FADDR_SIZE; } else { stream.next_out = (Bytef *)jcr->compress.inflate_buffer; stream.avail_out = (uInt)jcr->compress.inflate_buffer_size; } continue; case Z_OK: case Z_STREAM_END: break; default: goto cleanup; } break; } /* * We return a decompressed data stream with the fileoffset encoded when this was a sparse stream. */ if (sparse && want_data_stream) { memcpy(jcr->compress.inflate_buffer, *data, OFFSET_FADDR_SIZE); } *data = jcr->compress.inflate_buffer; *length = stream.total_out; Dmsg2(400, "Write uncompressed %d bytes, total before write=%s\n", *length, edit_uint64(jcr->JobBytes, ec1)); fastlzlibDecompressEnd(&stream); return true; cleanup: Qmsg(jcr, M_ERROR, 0, _("Uncompression error on file %s. ERR=%s\n"), last_fname, zlib_strerror(zstat)); fastlzlibDecompressEnd(&stream); return false; }
static bool decompress_with_lzo(JCR *jcr, const char *last_fname, char **data, uint32_t *length, bool sparse, bool want_data_stream) { char ec1[50]; /* Buffer printing huge values */ lzo_uint compress_len; const unsigned char *cbuf; unsigned char *wbuf; int status, real_compress_len; if (sparse && want_data_stream) { compress_len = jcr->compress.inflate_buffer_size - OFFSET_FADDR_SIZE; cbuf = (const unsigned char *)*data + OFFSET_FADDR_SIZE + sizeof(comp_stream_header); wbuf = (unsigned char *)jcr->compress.inflate_buffer + OFFSET_FADDR_SIZE; } else { compress_len = jcr->compress.inflate_buffer_size; cbuf = (const unsigned char *)*data + sizeof(comp_stream_header); wbuf = (unsigned char *)jcr->compress.inflate_buffer; } real_compress_len = *length - sizeof(comp_stream_header); Dmsg2(400, "Comp_len=%d msglen=%d\n", compress_len, *length); while ((status = lzo1x_decompress_safe(cbuf, real_compress_len, wbuf, &compress_len, NULL)) == LZO_E_OUTPUT_OVERRUN) { /* * The buffer size is too small, try with a bigger one */ jcr->compress.inflate_buffer_size = jcr->compress.inflate_buffer_size + (jcr->compress.inflate_buffer_size >> 1); jcr->compress.inflate_buffer = check_pool_memory_size(jcr->compress.inflate_buffer, jcr->compress.inflate_buffer_size); if (sparse && want_data_stream) { compress_len = jcr->compress.inflate_buffer_size - OFFSET_FADDR_SIZE; wbuf = (unsigned char *)jcr->compress.inflate_buffer + OFFSET_FADDR_SIZE; } else { compress_len = jcr->compress.inflate_buffer_size; wbuf = (unsigned char *)jcr->compress.inflate_buffer; } Dmsg2(400, "Comp_len=%d msglen=%d\n", compress_len, *length); } if (status != LZO_E_OK) { Qmsg(jcr, M_ERROR, 0, _("LZO uncompression error on file %s. ERR=%d\n"), last_fname, status); return false; } /* * We return a decompressed data stream with the fileoffset encoded when this was a sparse stream. */ if (sparse && want_data_stream) { memcpy(jcr->compress.inflate_buffer, *data, OFFSET_FADDR_SIZE); } *data = jcr->compress.inflate_buffer; *length = compress_len; Dmsg2(400, "Write uncompressed %d bytes, total before write=%s\n", compress_len, edit_uint64(jcr->JobBytes, ec1)); return true; }