/* * Release resources allocated during backup. */ void native_vbackup_cleanup(JCR *jcr, int TermCode) { char ec1[30], ec2[30]; char term_code[100]; const char *term_msg; int msg_type = M_INFO; CLIENT_DBR cr; POOL_MEM query(PM_MESSAGE); Dmsg2(100, "Enter backup_cleanup %d %c\n", TermCode, TermCode); memset(&cr, 0, sizeof(cr)); switch (jcr->JobStatus) { case JS_Terminated: case JS_Warnings: jcr->jr.JobLevel = L_FULL; /* we want this to appear as a Full backup */ break; default: break; } jcr->JobFiles = jcr->SDJobFiles; jcr->JobBytes = jcr->SDJobBytes; if (jcr->getJobStatus() == JS_Terminated && (jcr->JobErrors || jcr->SDErrors)) { TermCode = JS_Warnings; } update_job_end(jcr, TermCode); /* * Update final items to set them to the previous job's values */ Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s'," "JobTDate=%s WHERE JobId=%s", jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime, edit_uint64(jcr->previous_jr.JobTDate, ec1), edit_uint64(jcr->JobId, ec2)); db_sql_query(jcr->db, query.c_str()); /* * Get the fully updated job record */ if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"), db_strerror(jcr->db)); jcr->setJobStatus(JS_ErrorTerminated); } bstrncpy(cr.Name, jcr->res.client->name(), sizeof(cr.Name)); if (!db_get_client_record(jcr, jcr->db, &cr)) { Jmsg(jcr, M_WARNING, 0, _("Error getting Client record for Job report: ERR=%s"), db_strerror(jcr->db)); } update_bootstrap_file(jcr); switch (jcr->JobStatus) { case JS_Terminated: term_msg = _("Backup OK"); break; case JS_Warnings: term_msg = _("Backup OK -- with warnings"); break; case JS_FatalError: case JS_ErrorTerminated: term_msg = _("*** Backup Error ***"); msg_type = M_ERROR; /* Generate error message */ if (jcr->store_bsock) { jcr->store_bsock->signal(BNET_TERMINATE); if (jcr->SD_msg_chan_started) { pthread_cancel(jcr->SD_msg_chan); } } break; case JS_Canceled: term_msg = _("Backup Canceled"); if (jcr->store_bsock) { jcr->store_bsock->signal(BNET_TERMINATE); if (jcr->SD_msg_chan_started) { pthread_cancel(jcr->SD_msg_chan); } } break; default: term_msg = term_code; sprintf(term_code, _("Inappropriate term code: %c\n"), jcr->JobStatus); break; } generate_backup_summary(jcr, &cr, msg_type, term_msg); Dmsg0(100, "Leave vbackup_cleanup()\n"); }
/* * Pruning Jobs is a bit more complicated than purging Files * because we delete Job records only if there is a more current * backup of the FileSet. Otherwise, we keep the Job record. * In other words, we never delete the only Job record that * contains a current backup of a FileSet. This prevents the * Volume from being recycled and destroying a current backup. * * For Verify Jobs, we do not delete the last InitCatalog. * * For Restore Jobs there are no restrictions. */ static bool prune_backup_jobs(UAContext *ua, CLIENTRES *client, POOLRES *pool) { POOL_MEM query(PM_MESSAGE); POOL_MEM sql_where(PM_MESSAGE); POOL_MEM sql_from(PM_MESSAGE); utime_t period; char ed1[50]; alist *jobids_check=NULL; struct accurate_check_ctx *elt; db_list_ctx jobids, tempids; JOB_DBR jr; struct del_ctx del; memset(&del, 0, sizeof(del)); if (pool && pool->JobRetention > 0) { period = pool->JobRetention; } else if (client) { period = client->JobRetention; } else { /* should specify at least pool or client */ return false; } db_lock(ua->db); if (!prune_set_filter(ua, client, pool, period, &sql_from, &sql_where)) { goto bail_out; } /* Drop any previous temporary tables still there */ drop_temp_tables(ua); /* Create temp tables and indicies */ if (!create_temp_tables(ua)) { goto bail_out; } edit_utime(period, ed1, sizeof(ed1)); Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Jobs older than %s.\n"), ed1); del.max_ids = 100; del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); del.PurgedFiles = (char *)malloc(del.max_ids); /* * Select all files that are older than the JobRetention period * and add them into the "DeletionCandidates" table. */ Mmsg(query, "INSERT INTO DelCandidates " "SELECT JobId,PurgedFiles,FileSetId,JobFiles,JobStatus " "FROM Job %s " /* JOIN Pool/Client */ "WHERE Type IN ('B', 'C', 'M', 'V', 'D', 'R', 'c', 'm', 'g') " " %s ", /* Pool/Client + JobTDate */ sql_from.c_str(), sql_where.c_str()); Dmsg1(050, "select sql=%s\n", query.c_str()); if (!db_sql_query(ua->db, query.c_str())) { if (ua->verbose) { ua->error_msg("%s", db_strerror(ua->db)); } goto bail_out; } /* Now, for the selection, we discard some of them in order to be always * able to restore files. (ie, last full, last diff, last incrs) * Note: The DISTINCT could be more useful if we don't get FileSetId */ jobids_check = New(alist(10, owned_by_alist)); Mmsg(query, "SELECT DISTINCT Job.Name, FileSet, Client.Name, Job.FileSetId, " "Job.ClientId, Job.Type " "FROM DelCandidates " "JOIN Job USING (JobId) " "JOIN Client USING (ClientId) " "JOIN FileSet ON (Job.FileSetId = FileSet.FileSetId) " "WHERE Job.Type IN ('B') " /* Look only Backup jobs */ "AND Job.JobStatus IN ('T', 'W') " /* Look only useful jobs */ ); /* The job_select_handler will skip jobs or filesets that are no longer * in the configuration file. Interesting ClientId/FileSetId will be * added to jobids_check (currently disabled in 6.0.7b) */ if (!db_sql_query(ua->db, query.c_str(), job_select_handler, jobids_check)) { ua->error_msg("%s", db_strerror(ua->db)); } /* For this selection, we exclude current jobs used for restore or * accurate. This will prevent to prune the last full backup used for * current backup & restore */ memset(&jr, 0, sizeof(jr)); /* To find useful jobs, we do like an incremental */ jr.JobLevel = L_INCREMENTAL; foreach_alist(elt, jobids_check) { jr.ClientId = elt->ClientId; /* should be always the same */ jr.FileSetId = elt->FileSetId; db_accurate_get_jobids(ua->jcr, ua->db, &jr, &tempids); jobids.add(tempids); }
/* * This is called during restore to create the file (if necessary) We must return in rp->create_status: * * CF_ERROR -- error * CF_SKIP -- skip processing this file * CF_EXTRACT -- extract the file (i.e.call i/o routines) * CF_CREATED -- created, but no content to extract (typically directories) */ static bRC createFile(bpContext *ctx, struct restore_pkt *rp) { int status; bool exists = false; struct stat st; plugin_ctx *p_ctx = (plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } /* * See if the file already exists. */ Dmsg(ctx, 400, "Replace=%c %d\n", (char)rp->replace, rp->replace); status = ceph_lstat(p_ctx->cmount, rp->ofname, &st); if (status == 0) { exists = true; switch (rp->replace) { case REPLACE_IFNEWER: if (rp->statp.st_mtime <= st.st_mtime) { Jmsg(ctx, M_INFO, 0, _("File skipped. Not newer: %s\n"), rp->ofname); rp->create_status = CF_SKIP; goto bail_out; } break; case REPLACE_IFOLDER: if (rp->statp.st_mtime >= st.st_mtime) { Jmsg(ctx, M_INFO, 0, _("File skipped. Not older: %s\n"), rp->ofname); rp->create_status = CF_SKIP; goto bail_out; } break; case REPLACE_NEVER: /* * Set attributes if we created this directory */ if (rp->type == FT_DIREND && path_list_lookup(p_ctx->path_list, rp->ofname)) { break; } Jmsg(ctx, M_INFO, 0, _("File skipped. Already exists: %s\n"), rp->ofname); rp->create_status = CF_SKIP; goto bail_out; case REPLACE_ALWAYS: break; } } switch (rp->type) { case FT_LNKSAVED: /* Hard linked, file already saved */ case FT_LNK: case FT_SPEC: /* Fifo, ... to be backed up */ case FT_REGE: /* Empty file */ case FT_REG: /* Regular file */ /* * See if file already exists then we need to unlink it. */ if (exists) { Dmsg(ctx, 400, "unlink %s\n", rp->ofname); status = ceph_unlink(p_ctx->cmount, rp->ofname); if (status != 0) { berrno be; Jmsg(ctx, M_ERROR, 0, _("File %s already exists and could not be replaced. ERR=%s.\n"), rp->ofname, be.bstrerror(-status)); /* * Continue despite error */ } } else { /* * File doesn't exist see if we need to create the parent directory. */ POOL_MEM parent_dir(PM_FNAME); char *bp; pm_strcpy(parent_dir, rp->ofname); bp = strrchr(parent_dir.c_str(), '/'); if (bp) { *bp = '\0'; if (strlen(parent_dir.c_str())) { if (!cephfs_makedirs(p_ctx, parent_dir.c_str())) { rp->create_status = CF_ERROR; goto bail_out; } } } } /* * See if we need to perform anything special for the restore file type. */ switch (rp->type) { case FT_LNKSAVED: status = ceph_link(p_ctx->cmount, rp->olname, rp->ofname); if (status < 0) { berrno be; Jmsg(ctx, M_ERROR, "ceph_link(%s) failed: %s\n", rp->ofname, be.bstrerror(-status)); rp->create_status = CF_ERROR; } else { rp->create_status = CF_CREATED; } break; case FT_LNK: status = ceph_symlink(p_ctx->cmount, rp->olname, rp->ofname); if (status < 0) { berrno be; Jmsg(ctx, M_ERROR, "ceph_symlink(%s) failed: %s\n", rp->ofname, be.bstrerror(-status)); rp->create_status = CF_ERROR; } else { rp->create_status = CF_CREATED; } break; case FT_SPEC: status = ceph_mknod(p_ctx->cmount, rp->olname, rp->statp.st_mode, rp->statp.st_rdev); if (status < 0) { berrno be; Jmsg(ctx, M_ERROR, "ceph_mknod(%s) failed: %s\n", rp->ofname, be.bstrerror(-status)); rp->create_status = CF_ERROR; } else { rp->create_status = CF_CREATED; } break; default: rp->create_status = CF_EXTRACT; break; } break; case FT_DIRBEGIN: case FT_DIREND: if (!cephfs_makedirs(p_ctx, rp->ofname)) { rp->create_status = CF_ERROR; } else { rp->create_status = CF_CREATED; } break; case FT_DELETED: Jmsg(ctx, M_INFO, 0, _("Original file %s have been deleted: type=%d\n"), rp->ofname, rp->type); break; default: Jmsg(ctx, M_ERROR, 0, _("Unknown file type %d; not restored: %s\n"), rp->type, rp->ofname); break; } bail_out: return bRC_OK; }
/* * See if there is any local shadowing within an include block. */ static void check_local_fileset_shadowing(JCR *jcr, findINCEXE *incexe, bool remove) { dlistString *str1, *str2, *next; bool recursive; /* * See if this is a recursive include block. */ recursive = include_block_is_recursive(incexe); /* * Loop over all entries in the name_list * and compare them against all next entries * after the one we are currently examining. * This way we only check shadowing only once. */ str1 = (dlistString *)incexe->name_list.first(); while (str1) { str2 = (dlistString *)incexe->name_list.next(str1); while (str1 && str2) { if (check_include_pattern_shadowing(jcr, str1->c_str(), str2->c_str(), recursive)) { /* * See what entry shadows the other, the longest entry * shadow the shorter one. */ if (strlen(str1->c_str()) < strlen(str2->c_str())) { if (remove) { /* * Pattern2 is longer then Pattern1 e.g. the include block patterns * are probably sorted right. This is the easiest case where we just * remove the entry from the list and continue. */ Jmsg(jcr, M_WARNING, 0, _("Fileset include block entry %s shadows %s removing it from fileset\n"), str2->c_str(), str1->c_str()); next = (dlistString *)incexe->name_list.next(str2); incexe->name_list.remove(str2); str2 = next; continue; } else { Jmsg(jcr, M_WARNING, 0, _("Fileset include block entry %s shadows %s\n"), str2->c_str(), str1->c_str()); } } else { if (remove) { /* * Pattern1 is longer then Pattern2 e.g. the include block patterns * are not sorted right and probably reverse. This is a bit more difficult. * We remove the first pattern from the list and restart the shadow scan. * By setting str1 to NULL we force a rescan as the next method of the dlist * will start at the first entry of the dlist again. */ Jmsg(jcr, M_WARNING, 0, _("Fileset include block entry %s shadows %s removing it from fileset\n"), str1->c_str(), str2->c_str()); incexe->name_list.remove(str1); str1 = NULL; continue; } else { Jmsg(jcr, M_WARNING, 0, _("Fileset include block entry %s shadows %s\n"), str1->c_str(), str2->c_str()); } } } str2 = (dlistString *)incexe->name_list.next(str2); } str1 = (dlistString *)incexe->name_list.next(str1); } }
/* * Check if a certain fileset include pattern shadows an other pattern. */ static inline bool check_include_pattern_shadowing(JCR *jcr, const char *pattern1, const char *pattern2, bool recursive) { int len1, len2; bool retval = false; struct stat st1, st2; /* * See if one directory shadows the other or if two * files are hardlinked. */ if (lstat(pattern1, &st1) != 0) { berrno be; Jmsg(jcr, M_WARNING, 0, _("Cannot stat file %s: ERR=%s\n"), pattern1, be.bstrerror()); goto bail_out; } if (lstat(pattern2, &st2) != 0) { berrno be; Jmsg(jcr, M_WARNING, 0, _("Cannot stat file %s: ERR=%s\n"), pattern2, be.bstrerror()); goto bail_out; } if (S_ISDIR(st1.st_mode) && S_ISDIR(st2.st_mode)) { /* * Only check shadowing of directories when recursion is turned on. */ if (recursive ) { len1 = strlen(pattern1); len2 = strlen(pattern2); /* * See if one pattern shadows the other. */ if (bstrncmp(pattern1, pattern2, MIN(len1, len2))) { /* * If both directories have the same st_dev they shadow * each other e.g. are not on seperate filesystems. */ if (st1.st_dev == st2.st_dev) { retval = true; } } } } else { /* * See if the two files are hardlinked. */ if (st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino) { retval = true; } } bail_out: return retval; }
/* * Depending on the initiate parameter perform one of the following: * * - First make him prove his identity and then prove our identity to the Remote. * - First prove our identity to the Remote and then make him prove his identity. */ bool BSOCK::two_way_authenticate(JCR *jcr, const char *what, const char *name, s_password &password, tls_t &tls, bool initiated_by_remote) { btimer_t *tid = NULL; const int dbglvl = 50; bool compatible = true; bool auth_success = false; int tls_local_need = BNET_TLS_NONE; int tls_remote_need = BNET_TLS_NONE; ASSERT(password.encoding == p_encoding_md5); /* * TLS Requirement */ if (get_tls_enable(tls.ctx)) { tls_local_need = get_tls_require(tls.ctx) ? BNET_TLS_REQUIRED : BNET_TLS_OK; } if (jcr && job_canceled(jcr)) { Dmsg0(dbglvl, "Failed, because job is canceled.\n"); auth_success = false; /* force quick exit */ goto auth_fatal; } /* * Timeout Hello after 10 min */ tid = start_bsock_timer(this, AUTH_TIMEOUT); /* * See if we initiate the challenge or respond to a challenge. */ if (initiated_by_remote) { /* * Challenge Remote. */ auth_success = cram_md5_challenge(this, password.value, tls_local_need, compatible); if (auth_success) { /* * Respond to remote challenge */ auth_success = cram_md5_respond(this, password.value, &tls_remote_need, &compatible); if (!auth_success) { Dmsg1(dbglvl, "Respond cram-get-auth failed with %s\n", who()); } } else { Dmsg1(dbglvl, "Challenge cram-auth failed with %s\n", who()); } } else { /* * Respond to remote challenge */ auth_success = cram_md5_respond(this, password.value, &tls_remote_need, &compatible); if (!auth_success) { Dmsg1(dbglvl, "cram_respond failed for %s\n", who()); } else { /* * Challenge Remote. */ auth_success = cram_md5_challenge(this, password.value, tls_local_need, compatible); if (!auth_success) { Dmsg1(dbglvl, "cram_challenge failed for %s\n", who()); } } } if (!auth_success) { Jmsg(jcr, M_FATAL, 0, _("Authorization key rejected by %s %s.\n" "Please see %s for help.\n"), what, name, MANUAL_AUTH_URL); goto auth_fatal; } if (jcr && job_canceled(jcr)) { Dmsg0(dbglvl, "Failed, because job is canceled.\n"); auth_success = false; /* force quick exit */ goto auth_fatal; } /* * Verify that the remote host is willing to meet our TLS requirements */ if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server did not" " advertize required TLS support.\n")); Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); auth_success = false; goto auth_fatal; } /* * Verify that we are willing to meet the remote host's requirements */ if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server requires TLS.\n")); Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); auth_success = false; goto auth_fatal; } if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { alist *verify_list = NULL; if (tls.verify_peer) { verify_list = tls.allowed_cns; } /* * See if we are handshaking a passive client connection. */ if (initiated_by_remote) { if (!bnet_tls_server(tls.ctx, this, verify_list)) { Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed.\n")); Dmsg0(dbglvl, "TLS negotiation failed.\n"); auth_success = false; goto auth_fatal; } } else { if (!bnet_tls_client(tls.ctx, this, tls.verify_peer, verify_list)) { Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed.\n")); Dmsg0(dbglvl, "TLS negotiation failed.\n"); auth_success = false; goto auth_fatal; } } if (tls.authenticate) { /* tls authentication only? */ free_tls(); /* yes, shutdown tls */ } } auth_fatal: if (tid) { stop_bsock_timer(tid); tid = NULL; } if (jcr) { jcr->authenticated = auth_success; } return auth_success; }
bool run_cmd(JCR *jcr) { struct timeval tv; struct timezone tz; struct timespec timeout; int errstat = 0; Dsm_check(200); Dmsg1(200, "Run_cmd: %s\n", jcr->dir_bsock->msg); /* If we do not need the FD, we are doing a virtual backup. */ if (jcr->no_client_used()) { do_vbackup(jcr); return false; } jcr->sendJobStatus(JS_WaitFD); /* wait for FD to connect */ Dmsg2(050, "sd_calls_client=%d sd_client=%d\n", jcr->sd_calls_client, jcr->sd_client); if (jcr->sd_calls_client) { if (!read_client_hello(jcr)) { return false; } /* * Authenticate the File daemon */ Dmsg0(050, "=== Authenticate FD\n"); if (jcr->authenticated || !authenticate_filed(jcr, jcr->file_bsock, jcr->FDVersion)) { Dmsg1(050, "Authentication failed Job %s\n", jcr->Job); Jmsg(jcr, M_FATAL, 0, _("Unable to authenticate File daemon\n")); } else { jcr->authenticated = true; } } else if (!jcr->sd_client) { /* We wait to receive connection from Client */ gettimeofday(&tv, &tz); timeout.tv_nsec = tv.tv_usec * 1000; timeout.tv_sec = tv.tv_sec + me->client_wait; Dmsg3(050, "%s waiting %d sec for FD to contact SD key=%s\n", jcr->Job, (int)(timeout.tv_sec-time(NULL)), jcr->sd_auth_key); Dmsg3(800, "=== Block Job=%s jid=%d %p\n", jcr->Job, jcr->JobId, jcr); /* * Wait for the File daemon to contact us to start the Job, * when he does, we will be released, unless the 30 minutes * expires. */ P(mutex); while ( !jcr->authenticated && !job_canceled(jcr) ) { errstat = pthread_cond_timedwait(&jcr->job_start_wait, &mutex, &timeout); if (errstat == ETIMEDOUT || errstat == EINVAL || errstat == EPERM) { break; } Dmsg1(800, "=== Auth cond errstat=%d\n", errstat); } Dmsg4(050, "=== Auth=%d jid=%d canceled=%d errstat=%d\n", jcr->JobId, jcr->authenticated, job_canceled(jcr), errstat); V(mutex); Dmsg2(800, "Auth fail or cancel for jid=%d %p\n", jcr->JobId, jcr); } memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); if (jcr->authenticated && !job_canceled(jcr)) { Dmsg2(800, "Running jid=%d %p\n", jcr->JobId, jcr); run_job(jcr); /* Run the job */ } Dmsg2(800, "Done jid=%d %p\n", jcr->JobId, jcr); return false; }
/* * First prove our identity to the Remote daemon and then make him prove his identity. */ static inline bool two_way_authenticate(BSOCK *bs, JCR *jcr, bool initiate, const char *what) { int tls_local_need = BNET_TLS_NONE; int tls_remote_need = BNET_TLS_NONE; bool compatible = true; bool auth_success = false; btimer_t *tid = NULL; /* * TLS Requirement */ if (have_tls && me->tls_enable) { if (me->tls_require) { tls_local_need = BNET_TLS_REQUIRED; } else { tls_local_need = BNET_TLS_OK; } } if (me->tls_authenticate) { tls_local_need = BNET_TLS_REQUIRED; } if (job_canceled(jcr)) { auth_success = false; /* force quick exit */ goto auth_fatal; } /* * Timeout Hello after 10 min */ tid = start_bsock_timer(bs, AUTH_TIMEOUT); /* * See if we initiate the challenge or respond to a challenge. */ if (initiate) { /* * Challenge SD */ auth_success = cram_md5_challenge(bs, jcr->sd_auth_key, tls_local_need, compatible); if (auth_success) { /* * Respond to his challenge */ auth_success = cram_md5_respond(bs, jcr->sd_auth_key, &tls_remote_need, &compatible); if (!auth_success) { Dmsg1(dbglvl, "Respond cram-get-auth failed with %s\n", bs->who()); } } else { Dmsg1(dbglvl, "Challenge cram-auth failed with %s\n", bs->who()); } } else { /* * Respond to challenge */ auth_success = cram_md5_respond(bs, jcr->sd_auth_key, &tls_remote_need, &compatible); if (job_canceled(jcr)) { auth_success = false; /* force quick exit */ goto auth_fatal; } if (!auth_success) { Dmsg1(dbglvl, "cram_respond failed for %s\n", bs->who()); } else { /* * Challenge SD. */ auth_success = cram_md5_challenge(bs, jcr->sd_auth_key, tls_local_need, compatible); if (!auth_success) { Dmsg1(dbglvl, "cram_challenge failed for %s\n", bs->who()); } } } if (!auth_success) { Jmsg(jcr, M_FATAL, 0, _("Authorization key rejected by %s daemon.\n" "Please see " MANUAL_AUTH_URL " for help.\n"), what); goto auth_fatal; } /* * Verify that the remote host is willing to meet our TLS requirements */ if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server did not" " advertize required TLS support.\n")); Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); auth_success = false; goto auth_fatal; } /* * Verify that we are willing to meet the remote host's requirements */ if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server requires TLS.\n")); Dmsg2(dbglvl, "remote_need=%d local_need=%d\n", tls_remote_need, tls_local_need); auth_success = false; goto auth_fatal; } if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { /* * Engage TLS! Full Speed Ahead! */ if (!bnet_tls_client(me->tls_ctx, bs, NULL)) { Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed.\n")); auth_success = false; goto auth_fatal; } if (me->tls_authenticate) { /* tls authentication only? */ bs->free_tls(); /* yes, shutdown tls */ } } auth_fatal: /* * Destroy session key */ memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); stop_bsock_timer(tid); /* * Single thread all failures to avoid DOS */ if (!auth_success) { P(mutex); bmicrosleep(6, 0); V(mutex); } return auth_success; }
static inline void unknown_compression_algorithm(JCR *jcr, uint32_t compression_algorithm) { Jmsg(jcr, M_FATAL, 0, _("%s compression not supported on this platform\n"), cmprs_algo_to_text(compression_algorithm)); }
/* * Find all the requested files and send them * to the Storage daemon. * * Note, we normally carry on a one-way * conversation from this point on with the SD, simply blasting * data to him. To properly know what is going on, we * also run a "heartbeat" monitor which reads the socket and * reacts accordingly (at the moment it has nothing to do * except echo the heartbeat to the Director). * */ bool blast_data_to_storage_daemon(JCR *jcr, char *addr) { BSOCK *sd; bool ok = true; // TODO landonf: Allow user to specify encryption algorithm sd = jcr->store_bsock; set_jcr_job_status(jcr, JS_Running); Dmsg1(300, "bfiled: opened data connection %d to stored\n", sd->m_fd); LockRes(); CLIENT *client = (CLIENT *)GetNextRes(R_CLIENT, NULL); UnlockRes(); uint32_t buf_size; if (client) { buf_size = client->max_network_buffer_size; } else { buf_size = 0; /* use default */ } if (!sd->set_buffer_size(buf_size, BNET_SETBUF_WRITE)) { set_jcr_job_status(jcr, JS_ErrorTerminated); Jmsg(jcr, M_FATAL, 0, _("Cannot set buffer size FD->SD.\n")); return false; } jcr->buf_size = sd->msglen; /* Adjust for compression so that output buffer is * 12 bytes + 0.1% larger than input buffer plus 18 bytes. * This gives a bit extra plus room for the sparse addr if any. * Note, we adjust the read size to be smaller so that the * same output buffer can be used without growing it. * * The zlib compression workset is initialized here to minimize * the "per file" load. The jcr member is only set, if the init * was successful. */ jcr->compress_buf_size = jcr->buf_size + ((jcr->buf_size+999) / 1000) + 30; jcr->compress_buf = get_memory(jcr->compress_buf_size); #ifdef HAVE_LIBZ z_stream *pZlibStream = (z_stream*)malloc(sizeof(z_stream)); if (pZlibStream) { pZlibStream->zalloc = Z_NULL; pZlibStream->zfree = Z_NULL; pZlibStream->opaque = Z_NULL; pZlibStream->state = Z_NULL; if (deflateInit(pZlibStream, Z_DEFAULT_COMPRESSION) == Z_OK) { jcr->pZLIB_compress_workset = pZlibStream; } else { free (pZlibStream); } } #endif if (!crypto_session_start(jcr)) { return false; } set_find_options((FF_PKT *)jcr->ff, jcr->incremental, jcr->mtime); /* in accurate mode, we overwrite the find_one check function */ if (jcr->accurate) { set_find_changed_function((FF_PKT *)jcr->ff, accurate_check_file); } start_heartbeat_monitor(jcr); jcr->acl_data = get_pool_memory(PM_MESSAGE); jcr->xattr_data = get_pool_memory(PM_MESSAGE); /* Subroutine save_file() is called for each file */ if (!find_files(jcr, (FF_PKT *)jcr->ff, save_file, plugin_save)) { ok = false; /* error */ set_jcr_job_status(jcr, JS_ErrorTerminated); } accurate_send_deleted_list(jcr); /* send deleted list to SD */ stop_heartbeat_monitor(jcr); sd->signal(BNET_EOD); /* end of sending data */ if (jcr->acl_data) { free_pool_memory(jcr->acl_data); jcr->acl_data = NULL; } if (jcr->xattr_data) { free_pool_memory(jcr->xattr_data); jcr->xattr_data = NULL; } if (jcr->big_buf) { free(jcr->big_buf); jcr->big_buf = NULL; } if (jcr->compress_buf) { free_pool_memory(jcr->compress_buf); jcr->compress_buf = NULL; } if (jcr->pZLIB_compress_workset) { /* Free the zlib stream */ #ifdef HAVE_LIBZ deflateEnd((z_stream *)jcr->pZLIB_compress_workset); #endif free (jcr->pZLIB_compress_workset); jcr->pZLIB_compress_workset = NULL; } crypto_session_end(jcr); Dmsg1(100, "end blast_data ok=%d\n", ok); return ok; }
/* * Send data read from an already open file descriptor. * * We return 1 on sucess and 0 on errors. * * ***FIXME*** * We use ff_pkt->statp.st_size when FO_SPARSE to know when to stop * reading. * Currently this is not a problem as the only other stream, resource forks, * are not handled as sparse files. */ static int send_data(JCR *jcr, int stream, FF_PKT *ff_pkt, DIGEST *digest, DIGEST *signing_digest) { BSOCK *sd = jcr->store_bsock; uint64_t fileAddr = 0; /* file address */ char *rbuf, *wbuf; int32_t rsize = jcr->buf_size; /* read buffer size */ POOLMEM *msgsave; CIPHER_CONTEXT *cipher_ctx = NULL; /* Quell bogus uninitialized warnings */ const uint8_t *cipher_input; uint32_t cipher_input_len; uint32_t cipher_block_size; uint32_t encrypted_len; #ifdef FD_NO_SEND_TEST return 1; #endif msgsave = sd->msg; rbuf = sd->msg; /* read buffer */ wbuf = sd->msg; /* write buffer */ cipher_input = (uint8_t *)rbuf; /* encrypt uncompressed data */ Dmsg1(300, "Saving data, type=%d\n", ff_pkt->type); #ifdef HAVE_LIBZ uLong compress_len = 0; uLong max_compress_len = 0; const Bytef *cbuf = NULL; int zstat; if (ff_pkt->flags & FO_GZIP) { if (ff_pkt->flags & FO_SPARSE) { cbuf = (Bytef *)jcr->compress_buf + SPARSE_FADDR_SIZE; max_compress_len = jcr->compress_buf_size - SPARSE_FADDR_SIZE; } else { cbuf = (Bytef *)jcr->compress_buf; max_compress_len = jcr->compress_buf_size; /* set max length */ } wbuf = jcr->compress_buf; /* compressed output here */ cipher_input = (uint8_t *)jcr->compress_buf; /* encrypt compressed data */ /* * Only change zlib parameters if there is no pending operation. * This should never happen as deflatereset is called after each * deflate. */ if (((z_stream*)jcr->pZLIB_compress_workset)->total_in == 0) { /* set gzip compression level - must be done per file */ if ((zstat=deflateParams((z_stream*)jcr->pZLIB_compress_workset, ff_pkt->GZIP_level, Z_DEFAULT_STRATEGY)) != Z_OK) { Jmsg(jcr, M_FATAL, 0, _("Compression deflateParams error: %d\n"), zstat); set_jcr_job_status(jcr, JS_ErrorTerminated); goto err; } } } #else const uint32_t max_compress_len = 0; #endif if (ff_pkt->flags & FO_ENCRYPT) { if (ff_pkt->flags & FO_SPARSE) { Jmsg0(jcr, M_FATAL, 0, _("Encrypting sparse data not supported.\n")); goto err; } /* Allocate the cipher context */ if ((cipher_ctx = crypto_cipher_new(jcr->crypto.pki_session, true, &cipher_block_size)) == NULL) { /* Shouldn't happen! */ Jmsg0(jcr, M_FATAL, 0, _("Failed to initialize encryption context.\n")); goto err; } /* * Grow the crypto buffer, if necessary. * crypto_cipher_update() will buffer up to (cipher_block_size - 1). * We grow crypto_buf to the maximum number of blocks that * could be returned for the given read buffer size. * (Using the larger of either rsize or max_compress_len) */ jcr->crypto.crypto_buf = check_pool_memory_size(jcr->crypto.crypto_buf, (MAX(rsize + (int)sizeof(uint32_t), (int32_t)max_compress_len) + cipher_block_size - 1) / cipher_block_size * cipher_block_size); wbuf = jcr->crypto.crypto_buf; /* Encrypted, possibly compressed output here. */ } /* * Send Data header to Storage daemon * <file-index> <stream> <info> */ if (!sd->fsend("%ld %d 0", jcr->JobFiles, stream)) { Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); goto err; } Dmsg1(300, ">stored: datahdr %s\n", sd->msg); /* * Make space at beginning of buffer for fileAddr because this * same buffer will be used for writing if compression is off. */ if (ff_pkt->flags & FO_SPARSE) { rbuf += SPARSE_FADDR_SIZE; rsize -= SPARSE_FADDR_SIZE; #ifdef HAVE_FREEBSD_OS /* * To read FreeBSD partitions, the read size must be * a multiple of 512. */ rsize = (rsize/512) * 512; #endif } /* a RAW device read on win32 only works if the buffer is a multiple of 512 */ #ifdef HAVE_WIN32 if (S_ISBLK(ff_pkt->statp.st_mode)) rsize = (rsize/512) * 512; #endif /* * Read the file data */ while ((sd->msglen=(uint32_t)bread(&ff_pkt->bfd, rbuf, rsize)) > 0) { /* Check for sparse blocks */ if (ff_pkt->flags & FO_SPARSE) { ser_declare; bool allZeros = false; if ((sd->msglen == rsize && fileAddr+sd->msglen < (uint64_t)ff_pkt->statp.st_size) || ((ff_pkt->type == FT_RAW || ff_pkt->type == FT_FIFO) && (uint64_t)ff_pkt->statp.st_size == 0)) { allZeros = is_buf_zero(rbuf, rsize); } if (!allZeros) { /* Put file address as first data in buffer */ ser_begin(wbuf, SPARSE_FADDR_SIZE); ser_uint64(fileAddr); /* store fileAddr in begin of buffer */ } fileAddr += sd->msglen; /* update file address */ /* Skip block of all zeros */ if (allZeros) { continue; /* skip block of zeros */ } } jcr->ReadBytes += sd->msglen; /* count bytes read */ /* Uncompressed cipher input length */ cipher_input_len = sd->msglen; /* Update checksum if requested */ if (digest) { crypto_digest_update(digest, (uint8_t *)rbuf, sd->msglen); } /* Update signing digest if requested */ if (signing_digest) { crypto_digest_update(signing_digest, (uint8_t *)rbuf, sd->msglen); } #ifdef HAVE_LIBZ /* Do compression if turned on */ if (ff_pkt->flags & FO_GZIP && jcr->pZLIB_compress_workset) { Dmsg3(400, "cbuf=0x%x rbuf=0x%x len=%u\n", cbuf, rbuf, sd->msglen); ((z_stream*)jcr->pZLIB_compress_workset)->next_in = (Bytef *)rbuf; ((z_stream*)jcr->pZLIB_compress_workset)->avail_in = sd->msglen; ((z_stream*)jcr->pZLIB_compress_workset)->next_out = (Bytef *)cbuf; ((z_stream*)jcr->pZLIB_compress_workset)->avail_out = max_compress_len; if ((zstat=deflate((z_stream*)jcr->pZLIB_compress_workset, Z_FINISH)) != Z_STREAM_END) { Jmsg(jcr, M_FATAL, 0, _("Compression deflate error: %d\n"), zstat); set_jcr_job_status(jcr, JS_ErrorTerminated); goto err; } compress_len = ((z_stream*)jcr->pZLIB_compress_workset)->total_out; /* reset zlib stream to be able to begin from scratch again */ if ((zstat=deflateReset((z_stream*)jcr->pZLIB_compress_workset)) != Z_OK) { Jmsg(jcr, M_FATAL, 0, _("Compression deflateReset error: %d\n"), zstat); set_jcr_job_status(jcr, JS_ErrorTerminated); goto err; } Dmsg2(400, "compressed len=%d uncompressed len=%d\n", compress_len, sd->msglen); sd->msglen = compress_len; /* set compressed length */ cipher_input_len = compress_len; } #endif /* * Note, here we prepend the current record length to the beginning * of the encrypted data. This is because both sparse and compression * restore handling want records returned to them with exactly the * same number of bytes that were processed in the backup handling. * That is, both are block filters rather than a stream. When doing * compression, the compression routines may buffer data, so that for * any one record compressed, when it is decompressed the same size * will not be obtained. Of course, the buffered data eventually comes * out in subsequent crypto_cipher_update() calls or at least * when crypto_cipher_finalize() is called. Unfortunately, this * "feature" of encryption enormously complicates the restore code. */ if (ff_pkt->flags & FO_ENCRYPT) { uint32_t initial_len = 0; ser_declare; if (ff_pkt->flags & FO_SPARSE) { cipher_input_len += SPARSE_FADDR_SIZE; } /* Encrypt the length of the input block */ uint8_t packet_len[sizeof(uint32_t)]; ser_begin(packet_len, sizeof(uint32_t)); ser_uint32(cipher_input_len); /* store data len in begin of buffer */ Dmsg1(20, "Encrypt len=%d\n", cipher_input_len); if (!crypto_cipher_update(cipher_ctx, packet_len, sizeof(packet_len), (uint8_t *)jcr->crypto.crypto_buf, &initial_len)) { /* Encryption failed. Shouldn't happen. */ Jmsg(jcr, M_FATAL, 0, _("Encryption error\n")); goto err; } /* Encrypt the input block */ if (crypto_cipher_update(cipher_ctx, cipher_input, cipher_input_len, (uint8_t *)&jcr->crypto.crypto_buf[initial_len], &encrypted_len)) { if ((initial_len + encrypted_len) == 0) { /* No full block of data available, read more data */ continue; } Dmsg2(400, "encrypted len=%d unencrypted len=%d\n", encrypted_len, sd->msglen); sd->msglen = initial_len + encrypted_len; /* set encrypted length */ } else { /* Encryption failed. Shouldn't happen. */ Jmsg(jcr, M_FATAL, 0, _("Encryption error\n")); goto err; } } /* Send the buffer to the Storage daemon */ if (ff_pkt->flags & FO_SPARSE) { sd->msglen += SPARSE_FADDR_SIZE; /* include fileAddr in size */ } sd->msg = wbuf; /* set correct write buffer */ if (!sd->send()) { Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); goto err; } Dmsg1(130, "Send data to SD len=%d\n", sd->msglen); /* #endif */ jcr->JobBytes += sd->msglen; /* count bytes saved possibly compressed/encrypted */ sd->msg = msgsave; /* restore read buffer */ } /* end while read file data */ if (sd->msglen < 0) { /* error */ berrno be; Jmsg(jcr, M_ERROR, 0, _("Read error on file %s. ERR=%s\n"), ff_pkt->fname, be.bstrerror(ff_pkt->bfd.berrno)); if (jcr->JobErrors++ > 1000) { /* insanity check */ Jmsg(jcr, M_FATAL, 0, _("Too many errors.\n")); } } else if (ff_pkt->flags & FO_ENCRYPT) { /* * For encryption, we must call finalize to push out any * buffered data. */ if (!crypto_cipher_finalize(cipher_ctx, (uint8_t *)jcr->crypto.crypto_buf, &encrypted_len)) { /* Padding failed. Shouldn't happen. */ Jmsg(jcr, M_FATAL, 0, _("Encryption padding error\n")); goto err; } /* Note, on SSL pre-0.9.7, there is always some output */ if (encrypted_len > 0) { sd->msglen = encrypted_len; /* set encrypted length */ sd->msg = jcr->crypto.crypto_buf; /* set correct write buffer */ if (!sd->send()) { Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); goto err; } Dmsg1(130, "Send data to SD len=%d\n", sd->msglen); jcr->JobBytes += sd->msglen; /* count bytes saved possibly compressed/encrypted */ sd->msg = msgsave; /* restore bnet buffer */ } } if (!sd->signal(BNET_EOD)) { /* indicate end of file data */ Jmsg1(jcr, M_FATAL, 0, _("Network send error to SD. ERR=%s\n"), sd->bstrerror()); goto err; } /* Free the cipher context */ if (cipher_ctx) { crypto_cipher_free(cipher_ctx); } return 1; err: /* Free the cipher context */ if (cipher_ctx) { crypto_cipher_free(cipher_ctx); } sd->msg = msgsave; /* restore bnet buffer */ sd->msglen = 0; return 0; }
/* * Called here by find() for each file included. * This is a callback. The original is find_files() above. * * Send the file and its data to the Storage daemon. * * Returns: 1 if OK * 0 if error * -1 to ignore file/directory (not used here) */ int save_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level) { bool do_read = false; int stat, data_stream; int rtnstat = 0; DIGEST *digest = NULL; DIGEST *signing_digest = NULL; int digest_stream = STREAM_NONE; SIGNATURE *sig = NULL; bool has_file_data = false; // TODO landonf: Allow the user to specify the digest algorithm #ifdef HAVE_SHA2 crypto_digest_t signing_algorithm = CRYPTO_DIGEST_SHA256; #else crypto_digest_t signing_algorithm = CRYPTO_DIGEST_SHA1; #endif BSOCK *sd = jcr->store_bsock; if (job_canceled(jcr)) { return 0; } jcr->num_files_examined++; /* bump total file count */ switch (ff_pkt->type) { case FT_LNKSAVED: /* Hard linked, file already saved */ Dmsg2(130, "FT_LNKSAVED hard link: %s => %s\n", ff_pkt->fname, ff_pkt->link); break; case FT_REGE: Dmsg1(130, "FT_REGE saving: %s\n", ff_pkt->fname); has_file_data = true; break; case FT_REG: Dmsg1(130, "FT_REG saving: %s\n", ff_pkt->fname); has_file_data = true; break; case FT_LNK: Dmsg2(130, "FT_LNK saving: %s -> %s\n", ff_pkt->fname, ff_pkt->link); break; case FT_DIRBEGIN: jcr->num_files_examined--; /* correct file count */ return 1; /* not used */ case FT_NORECURSE: Jmsg(jcr, M_INFO, 1, _(" Recursion turned off. Will not descend from %s into %s\n"), ff_pkt->top_fname, ff_pkt->fname); ff_pkt->type = FT_DIREND; /* Backup only the directory entry */ break; case FT_NOFSCHG: /* Suppress message for /dev filesystems */ if (!is_in_fileset(ff_pkt)) { Jmsg(jcr, M_INFO, 1, _(" %s is a different filesystem. Will not descend from %s into %s\n"), ff_pkt->fname, ff_pkt->top_fname, ff_pkt->fname); } ff_pkt->type = FT_DIREND; /* Backup only the directory entry */ break; case FT_INVALIDFS: Jmsg(jcr, M_INFO, 1, _(" Disallowed filesystem. Will not descend from %s into %s\n"), ff_pkt->top_fname, ff_pkt->fname); ff_pkt->type = FT_DIREND; /* Backup only the directory entry */ break; case FT_INVALIDDT: Jmsg(jcr, M_INFO, 1, _(" Disallowed drive type. Will not descend into %s\n"), ff_pkt->fname); break; case FT_REPARSE: case FT_DIREND: Dmsg1(130, "FT_DIREND: %s\n", ff_pkt->link); break; case FT_SPEC: Dmsg1(130, "FT_SPEC saving: %s\n", ff_pkt->fname); if (S_ISSOCK(ff_pkt->statp.st_mode)) { Jmsg(jcr, M_SKIPPED, 1, _(" Socket file skipped: %s\n"), ff_pkt->fname); return 1; } break; case FT_RAW: Dmsg1(130, "FT_RAW saving: %s\n", ff_pkt->fname); has_file_data = true; break; case FT_FIFO: Dmsg1(130, "FT_FIFO saving: %s\n", ff_pkt->fname); break; case FT_NOACCESS: { berrno be; Jmsg(jcr, M_NOTSAVED, 0, _(" Could not access \"%s\": ERR=%s\n"), ff_pkt->fname, be.bstrerror(ff_pkt->ff_errno)); jcr->JobErrors++; return 1; } case FT_NOFOLLOW: { berrno be; Jmsg(jcr, M_NOTSAVED, 0, _(" Could not follow link \"%s\": ERR=%s\n"), ff_pkt->fname, be.bstrerror(ff_pkt->ff_errno)); jcr->JobErrors++; return 1; } case FT_NOSTAT: { berrno be; Jmsg(jcr, M_NOTSAVED, 0, _(" Could not stat \"%s\": ERR=%s\n"), ff_pkt->fname, be.bstrerror(ff_pkt->ff_errno)); jcr->JobErrors++; return 1; } case FT_DIRNOCHG: case FT_NOCHG: Jmsg(jcr, M_SKIPPED, 1, _(" Unchanged file skipped: %s\n"), ff_pkt->fname); return 1; case FT_ISARCH: Jmsg(jcr, M_NOTSAVED, 0, _(" Archive file not saved: %s\n"), ff_pkt->fname); return 1; case FT_NOOPEN: { berrno be; Jmsg(jcr, M_NOTSAVED, 0, _(" Could not open directory \"%s\": ERR=%s\n"), ff_pkt->fname, be.bstrerror(ff_pkt->ff_errno)); jcr->JobErrors++; return 1; } default: Jmsg(jcr, M_NOTSAVED, 0, _(" Unknown file type %d; not saved: %s\n"), ff_pkt->type, ff_pkt->fname); jcr->JobErrors++; return 1; } Dmsg1(130, "bfiled: sending %s to stored\n", ff_pkt->fname); /* Digests and encryption are only useful if there's file data */ if (has_file_data) { /* * Setup for digest handling. If this fails, the digest will be set to NULL * and not used. Note, the digest (file hash) can be any one of the four * algorithms below. * * The signing digest is a single algorithm depending on * whether or not we have SHA2. * ****FIXME**** the signing algoritm should really be * determined a different way!!!!!! What happens if * sha2 was available during backup but not restore? */ if (ff_pkt->flags & FO_MD5) { digest = crypto_digest_new(jcr, CRYPTO_DIGEST_MD5); digest_stream = STREAM_MD5_DIGEST; } else if (ff_pkt->flags & FO_SHA1) { digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA1); digest_stream = STREAM_SHA1_DIGEST; } else if (ff_pkt->flags & FO_SHA256) { digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA256); digest_stream = STREAM_SHA256_DIGEST; } else if (ff_pkt->flags & FO_SHA512) { digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA512); digest_stream = STREAM_SHA512_DIGEST; } /* Did digest initialization fail? */ if (digest_stream != STREAM_NONE && digest == NULL) { Jmsg(jcr, M_WARNING, 0, _("%s digest initialization failed\n"), stream_to_ascii(digest_stream)); } /* * Set up signature digest handling. If this fails, the signature digest will be set to * NULL and not used. */ // TODO landonf: We should really only calculate the digest once, for both verification and signing. if (jcr->crypto.pki_sign) { signing_digest = crypto_digest_new(jcr, signing_algorithm); /* Full-stop if a failure occurred initializing the signature digest */ if (signing_digest == NULL) { Jmsg(jcr, M_NOTSAVED, 0, _("%s signature digest initialization failed\n"), stream_to_ascii(signing_algorithm)); jcr->JobErrors++; goto good_rtn; } } /* Enable encryption */ if (jcr->crypto.pki_encrypt) { ff_pkt->flags |= FO_ENCRYPT; } } /* Initialize the file descriptor we use for data and other streams. */ binit(&ff_pkt->bfd); if (ff_pkt->flags & FO_PORTABLE) { set_portable_backup(&ff_pkt->bfd); /* disable Win32 BackupRead() */ } if (ff_pkt->cmd_plugin) { if (!set_cmd_plugin(&ff_pkt->bfd, jcr)) { goto bail_out; } send_plugin_name(jcr, sd, true); /* signal start of plugin data */ } /* Send attributes -- must be done after binit() */ if (!encode_and_send_attributes(jcr, ff_pkt, data_stream)) { goto bail_out; } /* Set up the encryption context and send the session data to the SD */ if (has_file_data && jcr->crypto.pki_encrypt) { if (!crypto_session_send(jcr, sd)) { goto bail_out; } } /* * Open any file with data that we intend to save, then save it. * * Note, if is_win32_backup, we must open the Directory so that * the BackupRead will save its permissions and ownership streams. */ if (ff_pkt->type != FT_LNKSAVED && S_ISREG(ff_pkt->statp.st_mode)) { #ifdef HAVE_WIN32 do_read = !is_portable_backup(&ff_pkt->bfd) || ff_pkt->statp.st_size > 0; #else do_read = ff_pkt->statp.st_size > 0; #endif } else if (ff_pkt->type == FT_RAW || ff_pkt->type == FT_FIFO || ff_pkt->type == FT_REPARSE || (!is_portable_backup(&ff_pkt->bfd) && ff_pkt->type == FT_DIREND)) { do_read = true; } if (ff_pkt->cmd_plugin) { do_read = true; } Dmsg1(400, "do_read=%d\n", do_read); if (do_read) { btimer_t *tid; if (ff_pkt->type == FT_FIFO) { tid = start_thread_timer(jcr, pthread_self(), 60); } else { tid = NULL; } int noatime = ff_pkt->flags & FO_NOATIME ? O_NOATIME : 0; ff_pkt->bfd.reparse_point = ff_pkt->type == FT_REPARSE; if (bopen(&ff_pkt->bfd, ff_pkt->fname, O_RDONLY | O_BINARY | noatime, 0) < 0) { ff_pkt->ff_errno = errno; berrno be; Jmsg(jcr, M_NOTSAVED, 0, _(" Cannot open \"%s\": ERR=%s.\n"), ff_pkt->fname, be.bstrerror()); jcr->JobErrors++; if (tid) { stop_thread_timer(tid); tid = NULL; } goto good_rtn; } if (tid) { stop_thread_timer(tid); tid = NULL; } stat = send_data(jcr, data_stream, ff_pkt, digest, signing_digest); if (ff_pkt->flags & FO_CHKCHANGES) { has_file_changed(jcr, ff_pkt); } bclose(&ff_pkt->bfd); if (!stat) { goto bail_out; } } #ifdef HAVE_DARWIN_OS /* Regular files can have resource forks and Finder Info */ if (ff_pkt->type != FT_LNKSAVED && (S_ISREG(ff_pkt->statp.st_mode) && ff_pkt->flags & FO_HFSPLUS)) { if (ff_pkt->hfsinfo.rsrclength > 0) { int flags; int rsrc_stream; if (!bopen_rsrc(&ff_pkt->bfd, ff_pkt->fname, O_RDONLY | O_BINARY, 0) < 0) { ff_pkt->ff_errno = errno; berrno be; Jmsg(jcr, M_NOTSAVED, -1, _(" Cannot open resource fork for \"%s\": ERR=%s.\n"), ff_pkt->fname, be.bstrerror()); jcr->JobErrors++; if (is_bopen(&ff_pkt->bfd)) { bclose(&ff_pkt->bfd); } goto good_rtn; } flags = ff_pkt->flags; ff_pkt->flags &= ~(FO_GZIP|FO_SPARSE); if (flags & FO_ENCRYPT) { rsrc_stream = STREAM_ENCRYPTED_MACOS_FORK_DATA; } else { rsrc_stream = STREAM_MACOS_FORK_DATA; } stat = send_data(jcr, rsrc_stream, ff_pkt, digest, signing_digest); ff_pkt->flags = flags; bclose(&ff_pkt->bfd); if (!stat) { goto bail_out; } } Dmsg1(300, "Saving Finder Info for \"%s\"\n", ff_pkt->fname); sd->fsend("%ld %d 0", jcr->JobFiles, STREAM_HFSPLUS_ATTRIBUTES); Dmsg1(300, "bfiled>stored:header %s\n", sd->msg); pm_memcpy(sd->msg, ff_pkt->hfsinfo.fndrinfo, 32); sd->msglen = 32; if (digest) { crypto_digest_update(digest, (uint8_t *)sd->msg, sd->msglen); } if (signing_digest) { crypto_digest_update(signing_digest, (uint8_t *)sd->msg, sd->msglen); } sd->send(); sd->signal(BNET_EOD); } #endif /* * Save ACLs for anything not being a symlink and not being a plugin. */ if (!ff_pkt->cmd_plugin) { if (ff_pkt->flags & FO_ACL && ff_pkt->type != FT_LNK) { if (!build_acl_streams(jcr, ff_pkt)) goto bail_out; } } /* * Save Extended Attributes for all files not being a plugin. */ if (!ff_pkt->cmd_plugin) { if (ff_pkt->flags & FO_XATTR) { if (!build_xattr_streams(jcr, ff_pkt)) goto bail_out; } } /* Terminate the signing digest and send it to the Storage daemon */ if (signing_digest) { uint32_t size = 0; if ((sig = crypto_sign_new(jcr)) == NULL) { Jmsg(jcr, M_FATAL, 0, _("Failed to allocate memory for crypto signature.\n")); goto bail_out; } if (!crypto_sign_add_signer(sig, signing_digest, jcr->crypto.pki_keypair)) { Jmsg(jcr, M_FATAL, 0, _("An error occurred while signing the stream.\n")); goto bail_out; } /* Get signature size */ if (!crypto_sign_encode(sig, NULL, &size)) { Jmsg(jcr, M_FATAL, 0, _("An error occurred while signing the stream.\n")); goto bail_out; } /* Grow the bsock buffer to fit our message if necessary */ if (sizeof_pool_memory(sd->msg) < (int32_t)size) { sd->msg = realloc_pool_memory(sd->msg, size); } /* Send our header */ sd->fsend("%ld %ld 0", jcr->JobFiles, STREAM_SIGNED_DIGEST); Dmsg1(300, "bfiled>stored:header %s\n", sd->msg); /* Encode signature data */ if (!crypto_sign_encode(sig, (uint8_t *)sd->msg, &size)) { Jmsg(jcr, M_FATAL, 0, _("An error occurred while signing the stream.\n")); goto bail_out; } sd->msglen = size; sd->send(); sd->signal(BNET_EOD); /* end of checksum */ } /* Terminate any digest and send it to Storage daemon */ if (digest) { uint32_t size; sd->fsend("%ld %d 0", jcr->JobFiles, digest_stream); Dmsg1(300, "bfiled>stored:header %s\n", sd->msg); size = CRYPTO_DIGEST_MAX_SIZE; /* Grow the bsock buffer to fit our message if necessary */ if (sizeof_pool_memory(sd->msg) < (int32_t)size) { sd->msg = realloc_pool_memory(sd->msg, size); } if (!crypto_digest_finalize(digest, (uint8_t *)sd->msg, &size)) { Jmsg(jcr, M_FATAL, 0, _("An error occurred finalizing signing the stream.\n")); goto bail_out; } sd->msglen = size; sd->send(); sd->signal(BNET_EOD); /* end of checksum */ } if (ff_pkt->cmd_plugin) { send_plugin_name(jcr, sd, false); /* signal end of plugin data */ } good_rtn: rtnstat = 1; /* good return */ bail_out: if (digest) { crypto_digest_free(digest); } if (signing_digest) { crypto_digest_free(signing_digest); } if (sig) { crypto_sign_free(sig); } return rtnstat; }
/* * Called here by find() for each file. * * Find the file, compute the MD5 or SHA1 and send it back to the Director */ static int verify_file(JCR *jcr, FF_PKT *ff_pkt, bool top_level) { POOL_MEM attribs(PM_NAME), attribsEx(PM_NAME); int status; BSOCK *dir; if (job_canceled(jcr)) { return 0; } dir = jcr->dir_bsock; jcr->num_files_examined++; /* bump total file count */ switch (ff_pkt->type) { case FT_LNKSAVED: /* Hard linked, file already saved */ Dmsg2(30, "FT_LNKSAVED saving: %s => %s\n", ff_pkt->fname, ff_pkt->link); break; case FT_REGE: Dmsg1(30, "FT_REGE saving: %s\n", ff_pkt->fname); break; case FT_REG: Dmsg1(30, "FT_REG saving: %s\n", ff_pkt->fname); break; case FT_LNK: Dmsg2(30, "FT_LNK saving: %s -> %s\n", ff_pkt->fname, ff_pkt->link); break; case FT_DIRBEGIN: jcr->num_files_examined--; /* correct file count */ return 1; /* ignored */ case FT_REPARSE: case FT_JUNCTION: case FT_DIREND: Dmsg1(30, "FT_DIR saving: %s\n", ff_pkt->fname); break; case FT_SPEC: Dmsg1(30, "FT_SPEC saving: %s\n", ff_pkt->fname); break; case FT_RAW: Dmsg1(30, "FT_RAW saving: %s\n", ff_pkt->fname); break; case FT_FIFO: Dmsg1(30, "FT_FIFO saving: %s\n", ff_pkt->fname); break; case FT_NOACCESS: { berrno be; be.set_errno(ff_pkt->ff_errno); Jmsg(jcr, M_NOTSAVED, 1, _(" Could not access %s: ERR=%s\n"), ff_pkt->fname, be.bstrerror()); jcr->JobErrors++; return 1; } case FT_NOFOLLOW: { berrno be; be.set_errno(ff_pkt->ff_errno); Jmsg(jcr, M_NOTSAVED, 1, _(" Could not follow link %s: ERR=%s\n"), ff_pkt->fname, be.bstrerror()); jcr->JobErrors++; return 1; } case FT_NOSTAT: { berrno be; be.set_errno(ff_pkt->ff_errno); Jmsg(jcr, M_NOTSAVED, 1, _(" Could not stat %s: ERR=%s\n"), ff_pkt->fname, be.bstrerror()); jcr->JobErrors++; return 1; } case FT_DIRNOCHG: case FT_NOCHG: Jmsg(jcr, M_SKIPPED, 1, _(" Unchanged file skipped: %s\n"), ff_pkt->fname); return 1; case FT_ISARCH: Jmsg(jcr, M_SKIPPED, 1, _(" Archive file skipped: %s\n"), ff_pkt->fname); return 1; case FT_NORECURSE: Jmsg(jcr, M_SKIPPED, 1, _(" Recursion turned off. Directory skipped: %s\n"), ff_pkt->fname); ff_pkt->type = FT_DIREND; /* directory entry was backed up */ break; case FT_NOFSCHG: Jmsg(jcr, M_SKIPPED, 1, _(" File system change prohibited. Directory skipped: %s\n"), ff_pkt->fname); return 1; case FT_PLUGIN_CONFIG: case FT_RESTORE_FIRST: return 1; /* silently skip */ case FT_NOOPEN: { berrno be; be.set_errno(ff_pkt->ff_errno); Jmsg(jcr, M_NOTSAVED, 1, _(" Could not open directory %s: ERR=%s\n"), ff_pkt->fname, be.bstrerror()); jcr->JobErrors++; return 1; } default: Jmsg(jcr, M_NOTSAVED, 0, _(" Unknown file type %d: %s\n"), ff_pkt->type, ff_pkt->fname); jcr->JobErrors++; return 1; } /* Encode attributes and possibly extend them */ encode_stat(attribs.c_str(), &ff_pkt->statp, sizeof(ff_pkt->statp), ff_pkt->LinkFI, 0); encode_attribsEx(jcr, attribsEx.c_str(), ff_pkt); jcr->lock(); jcr->JobFiles++; /* increment number of files sent */ pm_strcpy(jcr->last_fname, ff_pkt->fname); jcr->unlock(); /* * Send file attributes to Director * File_index * Stream * Verify Options * Filename (full path) * Encoded attributes * Link name (if type==FT_LNK) * For a directory, link is the same as fname, but with trailing * slash. For a linked file, link is the link. */ /* * Send file attributes to Director (note different format than for Storage) */ Dmsg2(400, "send ATTR inx=%d fname=%s\n", jcr->JobFiles, ff_pkt->fname); if (ff_pkt->type == FT_LNK || ff_pkt->type == FT_LNKSAVED) { status = dir->fsend("%d %d %s %s%c%s%c%s%c", jcr->JobFiles, STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->fname, 0, attribs.c_str(), 0, ff_pkt->link, 0); } else if (ff_pkt->type == FT_DIREND || ff_pkt->type == FT_REPARSE || ff_pkt->type == FT_JUNCTION) { /* * Here link is the canonical filename (i.e. with trailing slash) */ status = dir->fsend("%d %d %s %s%c%s%c%c", jcr->JobFiles, STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->link, 0, attribs.c_str(), 0, 0); } else { status = dir->fsend("%d %d %s %s%c%s%c%c", jcr->JobFiles, STREAM_UNIX_ATTRIBUTES, ff_pkt->VerifyOpts, ff_pkt->fname, 0, attribs.c_str(), 0, 0); } Dmsg2(20, "filed>dir: attribs len=%d: msg=%s\n", dir->msglen, dir->msg); if (!status) { Jmsg(jcr, M_FATAL, 0, _("Network error in send to Director: ERR=%s\n"), bnet_strerror(dir)); return 0; } if (ff_pkt->type != FT_LNKSAVED && (S_ISREG(ff_pkt->statp.st_mode) && ff_pkt->flags & (FO_MD5 | FO_SHA1 | FO_SHA256 | FO_SHA512))) { int digest_stream = STREAM_NONE; DIGEST *digest = NULL; char *digest_buf = NULL; const char *digest_name = NULL; if (calculate_file_chksum(jcr, ff_pkt, &digest, &digest_stream, &digest_buf, &digest_name)) { /* * Did digest initialization fail? */ if (digest_stream != STREAM_NONE && digest == NULL) { Jmsg(jcr, M_WARNING, 0, _("%s digest initialization failed\n"), stream_to_ascii(digest_stream)); } else if (digest && digest_buf) { Dmsg3(400, "send inx=%d %s=%s\n", jcr->JobFiles, digest_name, digest_buf); dir->fsend("%d %d %s *%s-%d*", jcr->JobFiles, digest_stream, digest_buf, digest_name, jcr->JobFiles); Dmsg3(20, "filed>dir: %s len=%d: msg=%s\n", digest_name, dir->msglen, dir->msg); } } /* * Cleanup. */ if (digest_buf) { free(digest_buf); } if (digest) { crypto_digest_free(digest); } } return 1; }
/* * Called here before the job is run to do the job * specific setup. */ bool do_native_vbackup_init(JCR *jcr) { const char *storage_source; if (!get_or_create_fileset_record(jcr)) { Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId); return false; } apply_pool_overrides(jcr); if (!allow_duplicate_job(jcr)) { return false; } jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->res.pool->name()); if (jcr->jr.PoolId == 0) { Dmsg1(dbglevel, "JobId=%d no PoolId\n", (int)jcr->JobId); Jmsg(jcr, M_FATAL, 0, _("Could not get or create a Pool record.\n")); return false; } /* * Note, at this point, pool is the pool for this job. We * transfer it to rpool (read pool), and a bit later, * pool will be changed to point to the write pool, * which comes from pool->NextPool. */ jcr->res.rpool = jcr->res.pool; /* save read pool */ pm_strcpy(jcr->res.rpool_source, jcr->res.pool_source); /* If pool storage specified, use it for restore */ copy_rstorage(jcr, jcr->res.pool->storage, _("Pool resource")); Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->res.rpool->name(), jcr->res.rpool_source); jcr->start_time = time(NULL); jcr->jr.StartTime = jcr->start_time; if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); } /* * See if there is a next pool override. */ if (jcr->res.run_next_pool_override) { pm_strcpy(jcr->res.npool_source, _("Run NextPool override")); pm_strcpy(jcr->res.pool_source, _("Run NextPool override")); storage_source = _("Storage from Run NextPool override"); } else { /* * See if there is a next pool override in the Job definition. */ if (jcr->res.job->next_pool) { jcr->res.next_pool = jcr->res.job->next_pool; pm_strcpy(jcr->res.npool_source, _("Job's NextPool resource")); pm_strcpy(jcr->res.pool_source, _("Job's NextPool resource")); storage_source = _("Storage from Job's NextPool resource"); } else { /* * Fall back to the pool's NextPool definition. */ jcr->res.next_pool = jcr->res.pool->NextPool; pm_strcpy(jcr->res.npool_source, _("Job Pool's NextPool resource")); pm_strcpy(jcr->res.pool_source, _("Job Pool's NextPool resource")); storage_source = _("Storage from Pool's NextPool resource"); } } /* * If the original backup pool has a NextPool, make sure a * record exists in the database. Note, in this case, we * will be migrating from pool to pool->NextPool. */ if (jcr->res.next_pool) { jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->res.next_pool->name()); if (jcr->jr.PoolId == 0) { return false; } } if (!set_migration_wstorage(jcr, jcr->res.pool, jcr->res.next_pool, storage_source)) { return false; } jcr->res.pool = jcr->res.next_pool; Dmsg2(dbglevel, "Write pool=%s read rpool=%s\n", jcr->res.pool->name(), jcr->res.rpool->name()); // create_clones(jcr); return true; }
static bRC setup_record_translation(bpContext *ctx, void *value) { DCR *dcr; bool did_setup = false; const char *inflate_in = SETTING_UNSET; const char *inflate_out = SETTING_UNSET; const char *deflate_in = SETTING_UNSET; const char *deflate_out = SETTING_UNSET; /* * Unpack the arguments passed in. */ dcr = (DCR *)value; if (!dcr) { return bRC_Error; } /* * Give jobmessage info what is configured */ switch (dcr->autodeflate) { case IO_DIRECTION_NONE: deflate_in = SETTING_NO; deflate_out = SETTING_NO; break; case IO_DIRECTION_IN: deflate_in = SETTING_YES; deflate_out = SETTING_NO; break; case IO_DIRECTION_OUT: deflate_in = SETTING_NO; deflate_out = SETTING_YES; break; case IO_DIRECTION_INOUT: deflate_in = SETTING_YES; deflate_out = SETTING_YES; break; default: Jmsg(ctx, M_ERROR, _("Unexpected autodeflate setting on %s"), dcr->dev_name); break; } switch (dcr->autoinflate) { case IO_DIRECTION_NONE: inflate_in = SETTING_NO; inflate_out = SETTING_NO; break; case IO_DIRECTION_IN: inflate_in = SETTING_YES; inflate_out = SETTING_NO; break; case IO_DIRECTION_OUT: inflate_in = SETTING_NO; inflate_out = SETTING_YES; break; case IO_DIRECTION_INOUT: inflate_in = SETTING_YES; inflate_out = SETTING_YES; break; default: Jmsg(ctx, M_ERROR, _("Unexpected autoinflate setting on %s"), dcr->dev_name); break; } /* * Setup auto deflation/inflation of streams when enabled for this device. */ switch (dcr->autodeflate) { case IO_DIRECTION_NONE: break; case IO_DIRECTION_OUT: case IO_DIRECTION_INOUT: if (!setup_auto_deflation(ctx, dcr)) { return bRC_Error; } did_setup = true; break; default: break; } switch (dcr->autoinflate) { case IO_DIRECTION_NONE: break; case IO_DIRECTION_OUT: case IO_DIRECTION_INOUT: if (!setup_auto_inflation(ctx, dcr)) { return bRC_Error; } did_setup = true; break; default: break; } if (did_setup) { Jmsg(ctx, M_INFO, _("autoxflate-sd.c: %s OUT:[SD->inflate=%s->deflate=%s->DEV] IN:[DEV->inflate=%s->deflate=%s->SD]\n"), dcr->dev_name, inflate_out, deflate_out, inflate_in, deflate_in); } return bRC_OK; }
static inline void non_compatible_compression_algorithm(JCR *jcr, uint32_t compression_algorithm) { Jmsg(jcr, M_FATAL, 0, _("Illegal compression algorithm %s for compatible mode\n"), cmprs_algo_to_text(compression_algorithm)); }
/* * Setup deflate for auto deflate of data streams. */ static bool setup_auto_deflation(bpContext *ctx, DCR *dcr) { JCR *jcr = dcr->jcr; bool retval = false; uint32_t compress_buf_size = 0; const char *compressorname = COMPRESSOR_NAME_UNSET; if (jcr->buf_size == 0) { jcr->buf_size = DEFAULT_NETWORK_BUFFER_SIZE; } if (!setup_compression_buffers(jcr, sd_enabled_compatible, dcr->device->autodeflate_algorithm, &compress_buf_size)) { goto bail_out; } /* * See if we need to create a new compression buffer or make sure the existing is big enough. */ if (!jcr->compress.deflate_buffer) { jcr->compress.deflate_buffer = get_memory(compress_buf_size); jcr->compress.deflate_buffer_size = compress_buf_size; } else { if (compress_buf_size > jcr->compress.deflate_buffer_size) { jcr->compress.deflate_buffer = realloc_pool_memory(jcr->compress.deflate_buffer, compress_buf_size); jcr->compress.deflate_buffer_size = compress_buf_size; } } switch (dcr->device->autodeflate_algorithm) { #if defined(HAVE_LIBZ) case COMPRESS_GZIP: { compressorname = COMPRESSOR_NAME_GZIP; int zstat; z_stream *pZlibStream; pZlibStream = (z_stream *)jcr->compress.workset.pZLIB; if ((zstat = deflateParams(pZlibStream, dcr->device->autodeflate_level, Z_DEFAULT_STRATEGY)) != Z_OK) { Jmsg(ctx, M_FATAL, _("Compression deflateParams error: %d\n"), zstat); jcr->setJobStatus(JS_ErrorTerminated); goto bail_out; } break; } #endif #if defined(HAVE_LZO) case COMPRESS_LZO1X: compressorname = COMPRESSOR_NAME_LZO; break; #endif #if defined(HAVE_FASTLZ) case COMPRESS_FZFZ: compressorname = COMPRESSOR_NAME_FZLZ; case COMPRESS_FZ4L: compressorname = COMPRESSOR_NAME_FZ4L; case COMPRESS_FZ4H: { compressorname = COMPRESSOR_NAME_FZ4H; int zstat; zfast_stream *pZfastStream; zfast_stream_compressor compressor = COMPRESSOR_FASTLZ; switch (dcr->device->autodeflate_algorithm) { case COMPRESS_FZ4L: case COMPRESS_FZ4H: compressor = COMPRESSOR_LZ4; break; } pZfastStream = (zfast_stream *)jcr->compress.workset.pZFAST; if ((zstat = fastlzlibSetCompressor(pZfastStream, compressor)) != Z_OK) { Jmsg(ctx, M_FATAL, _("Compression fastlzlibSetCompressor error: %d\n"), zstat); jcr->setJobStatus(JS_ErrorTerminated); goto bail_out; } break; } #endif default: break; } Jmsg(ctx, M_INFO, _("autodeflation: Compressor on device %s is %s\n"), dcr->dev_name, compressorname); retval = true; bail_out: return retval; }
bool setup_compression_buffers(JCR *jcr, bool compatible, uint32_t compression_algorithm, uint32_t *compress_buf_size) { uint32_t wanted_compress_buf_size; switch (compression_algorithm) { case 0: /* * No compression requested. */ break; #ifdef HAVE_LIBZ case COMPRESS_GZIP: { z_stream *pZlibStream; /** * Use compressBound() to get an idea what zlib thinks * what the upper limit is of what it needs to compress * a buffer of x bytes. To that we add 18 bytes and the size * of an compression header. * * This gives a bit extra plus room for the sparse addr if any. * Note, we adjust the read size to be smaller so that the * same output buffer can be used without growing it. * * The zlib compression workset is initialized here to minimize * the "per file" load. The jcr member is only set, if the init * was successful. */ wanted_compress_buf_size = compressBound(jcr->buf_size) + 18 + (int)sizeof(comp_stream_header); if (wanted_compress_buf_size > *compress_buf_size) { *compress_buf_size = wanted_compress_buf_size; } /* * See if this compression algorithm is already setup. */ if (jcr->compress.workset.pZLIB) { return true; } pZlibStream = (z_stream *)malloc(sizeof(z_stream)); memset(pZlibStream, 0, sizeof(z_stream)); pZlibStream->zalloc = Z_NULL; pZlibStream->zfree = Z_NULL; pZlibStream->opaque = Z_NULL; pZlibStream->state = Z_NULL; if (deflateInit(pZlibStream, Z_DEFAULT_COMPRESSION) == Z_OK) { jcr->compress.workset.pZLIB = pZlibStream; } else { Jmsg(jcr, M_FATAL, 0, _("Failed to initialize ZLIB compression\n")); free(pZlibStream); return false; } break; } #endif #ifdef HAVE_LZO case COMPRESS_LZO1X: { lzo_voidp pLzoMem; /** * For LZO1X compression the recommended value is: * output_block_size = input_block_size + (input_block_size / 16) + 64 + 3 + sizeof(comp_stream_header) * * The LZO compression workset is initialized here to minimize * the "per file" load. The jcr member is only set, if the init * was successful. */ wanted_compress_buf_size = jcr->buf_size + (jcr->buf_size / 16) + 64 + 3 + (int)sizeof(comp_stream_header); if (wanted_compress_buf_size > *compress_buf_size) { *compress_buf_size = wanted_compress_buf_size; } /* * See if this compression algorithm is already setup. */ if (jcr->compress.workset.pLZO) { return true; } pLzoMem = (lzo_voidp) malloc(LZO1X_1_MEM_COMPRESS); memset(pLzoMem, 0, LZO1X_1_MEM_COMPRESS); if (lzo_init() == LZO_E_OK) { jcr->compress.workset.pLZO = pLzoMem; } else { Jmsg(jcr, M_FATAL, 0, _("Failed to initialize LZO compression\n")); free(pLzoMem); return false; } break; } #endif #ifdef HAVE_FASTLZ case COMPRESS_FZFZ: case COMPRESS_FZ4L: case COMPRESS_FZ4H: { int level, zstat; zfast_stream *pZfastStream; if (compatible) { non_compatible_compression_algorithm(jcr, compression_algorithm); return false; } if (compression_algorithm == COMPRESS_FZ4H) { level = Z_BEST_COMPRESSION; } else { level = Z_BEST_SPEED; } /* * For FASTLZ compression the recommended value is: * output_block_size = input_block_size + (input_block_size / 10 + 16 * 2) + sizeof(comp_stream_header) * * The FASTLZ compression workset is initialized here to minimize * the "per file" load. The jcr member is only set, if the init * was successful. */ wanted_compress_buf_size = jcr->buf_size + (jcr->buf_size / 10 + 16 * 2) + (int)sizeof(comp_stream_header); if (wanted_compress_buf_size > *compress_buf_size) { *compress_buf_size = wanted_compress_buf_size; } /* * See if this compression algorithm is already setup. */ if (jcr->compress.workset.pZFAST) { return true; } pZfastStream = (zfast_stream *)malloc(sizeof(zfast_stream)); memset(pZfastStream, 0, sizeof(zfast_stream)); pZfastStream->zalloc = Z_NULL; pZfastStream->zfree = Z_NULL; pZfastStream->opaque = Z_NULL; pZfastStream->state = Z_NULL; if ((zstat = fastlzlibCompressInit(pZfastStream, level)) == Z_OK) { jcr->compress.workset.pZFAST = pZfastStream; } else { Jmsg(jcr, M_FATAL, 0, _("Failed to initialize FASTLZ compression\n")); free(pZfastStream); return false; } break; } #endif default: unknown_compression_algorithm(jcr, compression_algorithm); return false; } return true; }
bool run_cmd(JCR *jcr) { struct timeval tv; struct timezone tz; struct timespec timeout; int errstat = 0; BSOCK *cl; int fd_version = 0; int sd_version = 0; char job_name[500]; int i; int stat; Dsm_check(200); Dmsg1(200, "Run_cmd: %s\n", jcr->dir_bsock->msg); /* If we do not need the FD, we are doing a virtual backup. */ if (jcr->no_client_used()) { do_vbackup(jcr); return false; } jcr->sendJobStatus(JS_WaitFD); /* wait for FD to connect */ Dmsg2(050, "sd_calls_client=%d sd_client=%d\n", jcr->sd_calls_client, jcr->sd_client); if (jcr->sd_calls_client) { /* We connected to Client, so finish work */ cl = jcr->file_bsock; if (!cl) { Jmsg0(jcr, M_FATAL, 0, _("Client socket not open. Could not connect to Client.\n")); Dmsg0(050, "Client socket not open. Could not connect to Client.\n"); return false; } /* Get response to Hello command sent earlier */ Dmsg0(050, "Read Hello command from Client\n"); for (i=0; i<60; i++) { stat = cl->recv(); if (stat <= 0) { bmicrosleep(1, 0); } else { break; } } if (stat <= 0) { berrno be; Jmsg1(jcr, M_FATAL, 0, _("Recv request to Client failed. ERR=%s\n"), be.bstrerror()); Dmsg1(050, _("Recv request to Client failed. ERR=%s\n"), be.bstrerror()); return false; } Dmsg1(050, "Got from FD: %s\n", cl->msg); if (sscanf(cl->msg, "Hello Bacula SD: Start Job %127s %d %d", job_name, &fd_version, &sd_version) != 3) { Jmsg1(jcr, M_FATAL, 0, _("Bad Hello from Client: %s.\n"), cl->msg); Dmsg1(050, _("Bad Hello from Client: %s.\n"), cl->msg); return false; } unbash_spaces(job_name); jcr->FDVersion = fd_version; jcr->SDVersion = sd_version; Dmsg1(050, "FDVersion=%d\n", fd_version); /* * Authenticate the File daemon */ Dmsg0(050, "=== Authenticate FD\n"); if (jcr->authenticated || !authenticate_filed(jcr)) { Dmsg1(050, "Authentication failed Job %s\n", jcr->Job); Jmsg(jcr, M_FATAL, 0, _("Unable to authenticate File daemon\n")); } else { jcr->authenticated = true; } } else if (!jcr->sd_client) { /* We wait to receive connection from Client */ gettimeofday(&tv, &tz); timeout.tv_nsec = tv.tv_usec * 1000; timeout.tv_sec = tv.tv_sec + me->client_wait; Dmsg3(050, "%s waiting %d sec for FD to contact SD key=%s\n", jcr->Job, (int)(timeout.tv_sec-time(NULL)), jcr->sd_auth_key); Dmsg3(800, "=== Block Job=%s jid=%d %p\n", jcr->Job, jcr->JobId, jcr); /* * Wait for the File daemon to contact us to start the Job, * when he does, we will be released, unless the 30 minutes * expires. */ P(mutex); while ( !jcr->authenticated && !job_canceled(jcr) ) { errstat = pthread_cond_timedwait(&jcr->job_start_wait, &mutex, &timeout); if (errstat == ETIMEDOUT || errstat == EINVAL || errstat == EPERM) { break; } Dmsg1(800, "=== Auth cond errstat=%d\n", errstat); } Dmsg4(050, "=== Auth=%d jid=%d canceled=%d errstat=%d\n", jcr->JobId, jcr->authenticated, job_canceled(jcr), errstat); V(mutex); Dmsg2(800, "Auth fail or cancel for jid=%d %p\n", jcr->JobId, jcr); } memset(jcr->sd_auth_key, 0, strlen(jcr->sd_auth_key)); if (jcr->authenticated && !job_canceled(jcr)) { Dmsg2(800, "Running jid=%d %p\n", jcr->JobId, jcr); run_job(jcr); /* Run the job */ } Dmsg2(800, "Done jid=%d %p\n", jcr->JobId, jcr); return false; }
static inline DEVICE *m_init_dev(JCR *jcr, DEVRES *device, bool new_init) { struct stat statp; int errstat; DCR *dcr = NULL; DEVICE *dev = NULL; uint32_t max_bs; Dmsg1(400, "max_block_size in device res is %u\n", device->max_block_size); /* * If no device type specified, try to guess */ if (!device->dev_type) { /* * Check that device is available */ if (stat(device->device_name, &statp) < 0) { berrno be; Jmsg2(jcr, M_ERROR, 0, _("Unable to stat device %s: ERR=%s\n"), device->device_name, be.bstrerror()); return NULL; } if (S_ISDIR(statp.st_mode)) { device->dev_type = B_FILE_DEV; } else if (S_ISCHR(statp.st_mode)) { device->dev_type = B_TAPE_DEV; } else if (S_ISFIFO(statp.st_mode)) { device->dev_type = B_FIFO_DEV; } else if (!bit_is_set(CAP_REQMOUNT, device->cap_bits)) { Jmsg2(jcr, M_ERROR, 0, _("%s is an unknown device type. Must be tape or directory, st_mode=%x\n"), device->device_name, statp.st_mode); return NULL; } } /* * See what type of device is wanted. */ switch (device->dev_type) { /* * When using dynamic loading use the init_backend_dev() function * for any type of device not being of the type file. */ #ifndef HAVE_DYNAMIC_SD_BACKENDS #ifdef HAVE_GFAPI case B_GFAPI_DEV: dev = New(gfapi_device); break; #endif #ifdef HAVE_OBJECTSTORE case B_OBJECT_STORE_DEV: dev = New(object_store_device); break; #endif #ifdef HAVE_RADOS case B_RADOS_DEV: dev = New(rados_device); break; #endif #ifdef HAVE_CEPHFS case B_CEPHFS_DEV: dev = New(cephfs_device); break; #endif #ifdef HAVE_ELASTO case B_ELASTO_DEV: dev = New(elasto_device); break; #endif #ifdef HAVE_WIN32 case B_TAPE_DEV: dev = New(win32_tape_device); break; case B_FIFO_DEV: dev = New(win32_fifo_device); break; #else case B_TAPE_DEV: dev = New(unix_tape_device); break; case B_FIFO_DEV: dev = New(unix_fifo_device); break; #endif #endif /* HAVE_DYNAMIC_SD_BACKENDS */ #ifdef HAVE_WIN32 case B_FILE_DEV: dev = New(win32_file_device); break; #else case B_FILE_DEV: dev = New(unix_file_device); break; #endif default: #ifdef HAVE_DYNAMIC_SD_BACKENDS dev = init_backend_dev(jcr, device->dev_type); #endif break; } if (!dev) { Jmsg2(jcr, M_ERROR, 0, _("%s has an unknown device type %d\n"), device->device_name, device->dev_type); return NULL; } dev->clear_slot(); /* unknown */ /* * Copy user supplied device parameters from Resource */ dev->dev_name = get_memory(strlen(device->device_name) + 1); pm_strcpy(dev->dev_name, device->device_name); if (device->device_options) { dev->dev_options = get_memory(strlen(device->device_options) + 1); pm_strcpy(dev->dev_options, device->device_options); } dev->prt_name = get_memory(strlen(device->device_name) + strlen(device->name()) + 20); /* * We edit "Resource-name" (physical-name) */ Mmsg(dev->prt_name, "\"%s\" (%s)", device->name(), device->device_name); Dmsg1(400, "Allocate dev=%s\n", dev->print_name()); copy_bits(CAP_MAX, device->cap_bits, dev->capabilities); /* * current block sizes */ dev->min_block_size = device->min_block_size; dev->max_block_size = device->max_block_size; dev->max_volume_size = device->max_volume_size; dev->max_file_size = device->max_file_size; dev->max_concurrent_jobs = device->max_concurrent_jobs; dev->volume_capacity = device->volume_capacity; dev->max_rewind_wait = device->max_rewind_wait; dev->max_open_wait = device->max_open_wait; dev->max_open_vols = device->max_open_vols; dev->vol_poll_interval = device->vol_poll_interval; dev->max_spool_size = device->max_spool_size; dev->drive_index = device->drive_index; dev->autoselect = device->autoselect; dev->norewindonclose = device->norewindonclose; dev->dev_type = device->dev_type; dev->device = device; /* * Sanity check */ if (dev->vol_poll_interval && dev->vol_poll_interval < 60) { dev->vol_poll_interval = 60; } device->dev = dev; if (dev->is_fifo()) { dev->set_cap(CAP_STREAM); /* set stream device */ } /* * If the device requires mount : * - Check that the mount point is available * - Check that (un)mount commands are defined */ if (dev->is_file() && dev->requires_mount()) { if (!device->mount_point || stat(device->mount_point, &statp) < 0) { berrno be; dev->dev_errno = errno; Jmsg2(jcr, M_ERROR_TERM, 0, _("Unable to stat mount point %s: ERR=%s\n"), device->mount_point, be.bstrerror()); } if (!device->mount_command || !device->unmount_command) { Jmsg0(jcr, M_ERROR_TERM, 0, _("Mount and unmount commands must defined for a device which requires mount.\n")); } } /* * Sanity check */ if (dev->max_block_size == 0) { max_bs = DEFAULT_BLOCK_SIZE; } else { max_bs = dev->max_block_size; } if (dev->min_block_size > max_bs) { Jmsg(jcr, M_ERROR_TERM, 0, _("Min block size > max on device %s\n"), dev->print_name()); } if (dev->max_block_size > MAX_BLOCK_LENGTH) { Jmsg3(jcr, M_ERROR, 0, _("Block size %u on device %s is too large, using default %u\n"), dev->max_block_size, dev->print_name(), DEFAULT_BLOCK_SIZE); dev->max_block_size = 0; } if (dev->max_block_size % TAPE_BSIZE != 0) { Jmsg3(jcr, M_WARNING, 0, _("Max block size %u not multiple of device %s block size=%d.\n"), dev->max_block_size, dev->print_name(), TAPE_BSIZE); } if (dev->max_volume_size != 0 && dev->max_volume_size < (dev->max_block_size << 4)) { Jmsg(jcr, M_ERROR_TERM, 0, _("Max Vol Size < 8 * Max Block Size for device %s\n"), dev->print_name()); } dev->errmsg = get_pool_memory(PM_EMSG); *dev->errmsg = 0; if ((errstat = dev->init_mutex()) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("Unable to init mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = pthread_cond_init(&dev->wait, NULL)) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("Unable to init cond variable: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = pthread_cond_init(&dev->wait_next_vol, NULL)) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("Unable to init cond variable: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = pthread_mutex_init(&dev->spool_mutex, NULL)) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("Unable to init spool mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = dev->init_acquire_mutex()) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("Unable to init acquire mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } if ((errstat = dev->init_read_acquire_mutex()) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("Unable to init read acquire mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } dev->set_mutex_priorities(); #ifdef xxx if ((errstat = rwl_init(&dev->lock)) != 0) { berrno be; dev->dev_errno = errstat; Mmsg1(dev->errmsg, _("Unable to init mutex: ERR=%s\n"), be.bstrerror(errstat)); Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg); } #endif dev->clear_opened(); dev->attached_dcrs = New(dlist(dcr, &dcr->dev_link)); Dmsg2(100, "init_dev: tape=%d dev_name=%s\n", dev->is_tape(), dev->dev_name); dev->initiated = true; Dmsg3(100, "dev=%s dev_max_bs=%u max_bs=%u\n", dev->dev_name, dev->device->max_block_size, dev->max_block_size); return dev; }
void generic_tape_device::set_os_device_parameters(DCR *dcr) { DEVICE *dev = dcr->dev; if (bstrcmp(dev->dev_name, "/dev/null")) { return; /* no use trying to set /dev/null */ } #if defined(HAVE_LINUX_OS) || defined(HAVE_WIN32) struct mtop mt_com; Dmsg0(100, "In set_os_device_parameters\n"); #if defined(MTSETBLK) if (dev->min_block_size == dev->max_block_size && dev->min_block_size == 0) { /* variable block mode */ mt_com.mt_op = MTSETBLK; mt_com.mt_count = 0; Dmsg0(100, "Set block size to zero\n"); if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(mt_com.mt_op); } } #endif #if defined(MTSETDRVBUFFER) if (getuid() == 0) { /* Only root can do this */ mt_com.mt_op = MTSETDRVBUFFER; mt_com.mt_count = MT_ST_CLEARBOOLEANS; if (!dev->has_cap(CAP_TWOEOF)) { mt_com.mt_count |= MT_ST_TWO_FM; } if (dev->has_cap(CAP_EOM)) { mt_com.mt_count |= MT_ST_FAST_MTEOM; } Dmsg0(100, "MTSETDRVBUFFER\n"); if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(mt_com.mt_op); } } #endif return; #endif #ifdef HAVE_NETBSD_OS struct mtop mt_com; if (dev->min_block_size == dev->max_block_size && dev->min_block_size == 0) { /* variable block mode */ mt_com.mt_op = MTSETBSIZ; mt_com.mt_count = 0; if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(mt_com.mt_op); } /* Get notified at logical end of tape */ mt_com.mt_op = MTEWARN; mt_com.mt_count = 1; if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(mt_com.mt_op); } } return; #endif #if HAVE_FREEBSD_OS || HAVE_OPENBSD_OS struct mtop mt_com; if (dev->min_block_size == dev->max_block_size && dev->min_block_size == 0) { /* variable block mode */ mt_com.mt_op = MTSETBSIZ; mt_com.mt_count = 0; if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(mt_com.mt_op); } } #if defined(MTIOCSETEOTMODEL) uint32_t neof; if (dev->has_cap(CAP_TWOEOF)) { neof = 2; } else { neof = 1; } if (dev->d_ioctl(dev->fd(), MTIOCSETEOTMODEL, (caddr_t)&neof) < 0) { berrno be; dev->dev_errno = errno; /* save errno */ Mmsg2(dev->errmsg, _("Unable to set eotmodel on device %s: ERR=%s\n"), dev->print_name(), be.bstrerror(dev->dev_errno)); Jmsg(dcr->jcr, M_FATAL, 0, dev->errmsg); } #endif return; #endif #ifdef HAVE_SUN_OS struct mtop mt_com; if (dev->min_block_size == dev->max_block_size && dev->min_block_size == 0) { /* variable block mode */ mt_com.mt_op = MTSRSZ; mt_com.mt_count = 0; if (dev->d_ioctl(dev->fd(), MTIOCTOP, (char *)&mt_com) < 0) { dev->clrerror(mt_com.mt_op); } } return; #endif }
/* * Set the block size of the device. * If the volume block size is zero, we set the max block size to what is * configured in the device resource i.e. dev->device->max_block_size. * * If dev->device->max_block_size is zero, do nothing and leave dev->max_block_size as it is. */ void DEVICE::set_blocksizes(DCR *dcr) { DEVICE* dev = this; JCR* jcr = dcr->jcr; uint32_t max_bs; Dmsg4(100, "Device %s has dev->device->max_block_size of %u and dev->max_block_size of %u, dcr->VolMaxBlocksize is %u\n", dev->print_name(), dev->device->max_block_size, dev->max_block_size, dcr->VolMaxBlocksize); if (dcr->VolMaxBlocksize == 0 && dev->device->max_block_size != 0) { Dmsg2(100, "setting dev->max_block_size to dev->device->max_block_size=%u " "on device %s because dcr->VolMaxBlocksize is 0\n", dev->device->max_block_size, dev->print_name()); dev->min_block_size = dev->device->min_block_size; dev->max_block_size = dev->device->max_block_size; } else if (dcr->VolMaxBlocksize != 0) { dev->min_block_size = dcr->VolMinBlocksize; dev->max_block_size = dcr->VolMaxBlocksize; } /* * Sanity check */ if (dev->max_block_size == 0) { max_bs = DEFAULT_BLOCK_SIZE; } else { max_bs = dev->max_block_size; } if (dev->min_block_size > max_bs) { Jmsg(jcr, M_ERROR_TERM, 0, _("Min block size > max on device %s\n"), dev->print_name()); } if (dev->max_block_size > MAX_BLOCK_LENGTH) { Jmsg3(jcr, M_ERROR, 0, _("Block size %u on device %s is too large, using default %u\n"), dev->max_block_size, dev->print_name(), DEFAULT_BLOCK_SIZE); dev->max_block_size = 0; } if (dev->max_block_size % TAPE_BSIZE != 0) { Jmsg3(jcr, M_WARNING, 0, _("Max block size %u not multiple of device %s block size=%d.\n"), dev->max_block_size, dev->print_name(), TAPE_BSIZE); } if (dev->max_volume_size != 0 && dev->max_volume_size < (dev->max_block_size << 4)) { Jmsg(jcr, M_ERROR_TERM, 0, _("Max Vol Size < 8 * Max Block Size for device %s\n"), dev->print_name()); } Dmsg3(100, "set minblocksize to %d, maxblocksize to %d on device %s\n", dev->min_block_size, dev->max_block_size, dev->print_name()); /* * If blocklen is not dev->max_block_size create a new block with the right size. * (as header is always dev->label_block_size which is preset with DEFAULT_BLOCK_SIZE) */ if (dcr->block) { if (dcr->block->buf_len != dev->max_block_size) { Dmsg2(100, "created new block of buf_len: %u on device %s\n", dev->max_block_size, dev->print_name()); free_block(dcr->block); dcr->block = new_block(dev); Dmsg2(100, "created new block of buf_len: %u on device %s, freeing block\n", dcr->block->buf_len, dev->print_name()); } } }
/* * See if there is any local shadowing within an include block or * any global shadowing between include blocks. */ static inline void check_global_fileset_shadowing(JCR *jcr, findFILESET *fileset, bool remove) { int i, j; bool local_recursive, global_recursive; findINCEXE *current, *compare_against; dlistString *str1, *str2, *next; /* * Walk all the include blocks and see if there * is any shadowing between the different sets. */ for (i = 0; i < fileset->include_list.size(); i++) { current = (findINCEXE *)fileset->include_list.get(i); /* * See if there is any local shadowing. */ check_local_fileset_shadowing(jcr, current, remove); /* * Only check global shadowing against this include block * when it doesn't have any patterns. Testing if a fileset * shadows the other with patterns is next to impossible * without comparing the matching criteria which can be * in so many forms we forget it all together. When you * are smart enough to create include/exclude patterns * we also don't provide you with basic stop gap measures. */ if (include_block_has_patterns(current)) { continue; } /* * Now compare this block against any include block after this one. * We can shortcut as we don't have to start at the beginning of * the list again because we compare all sets against each other * this way anyhow. e.g. we start with set 1 against 2 .. x and * then 2 against 3 .. x (No need to compare 2 against 1 again * as we did that in the first run already. * * See if this is a recursive include block. */ local_recursive = include_block_is_recursive(current); for (j = i + 1; j < fileset->include_list.size(); j++) { compare_against = (findINCEXE *)fileset->include_list.get(j); /* * Only check global shadowing against this include block * when it doesn't have any patterns. */ if (include_block_has_patterns(compare_against)) { continue; } /* * See if both include blocks are recursive. */ global_recursive = (local_recursive && include_block_is_recursive(compare_against)); /* * Walk over the filename list and compare it * against the other entry from the other list. */ str1 = (dlistString *)current->name_list.first(); while (str1) { str2 = (dlistString *)compare_against->name_list.first(); while (str1 && str2) { if (check_include_pattern_shadowing(jcr, str1->c_str(), str2->c_str(), global_recursive)) { /* * See what entry shadows the other, the longest entry * shadow the shorter one. */ if (strlen(str1->c_str()) < strlen(str2->c_str())) { if (remove) { /* * Pattern2 is longer then Pattern1 e.g. the include block patterns * are probably sorted right. This is the easiest case where we just * remove the entry from the list and continue. */ Jmsg(jcr, M_WARNING, 0, _("Fileset include block entry %s shadows %s removing it from fileset\n"), str2->c_str(), str1->c_str()); next = (dlistString *)compare_against->name_list.next(str2); compare_against->name_list.remove(str2); str2 = next; continue; } else { Jmsg(jcr, M_WARNING, 0, _("Fileset include block entry %s shadows %s\n"), str2->c_str(), str1->c_str()); } } else { if (remove) { /* * Pattern1 is longer then Pattern2 e.g. the include block patterns * are not sorted right and probably reverse. This is a bit more * difficult. We remove the first pattern from the list and restart * the shadow scan. By setting str1 to NULL we force a rescan as the * next method of the dlist will start at the first entry of the * dlist again. */ Jmsg(jcr, M_WARNING, 0, _("Fileset include block entry %s shadows %s removing it from fileset\n"), str1->c_str(), str2->c_str()); current->name_list.remove(str1); str1 = NULL; continue; } else { Jmsg(jcr, M_WARNING, 0, _("Fileset include block entry %s shadows %s\n"), str1->c_str(), str2->c_str()); } } } str2 = (dlistString *)compare_against->name_list.next(str2); } str1 = (dlistString *)current->name_list.next(str1); } } } }
int unpack_attributes_record(JCR *jcr, int32_t stream, char *rec, int32_t reclen, ATTR *attr) { char *p; int object_len; /* * An Attributes record consists of: * File_index * Type (FT_types) * Filename * Attributes * Link name (if file linked i.e. FT_LNK) * Extended attributes (Win32) * plus optional values determined by AR_ flags in upper bits of Type * Data_stream * */ attr->stream = stream; Dmsg1(dbglvl, "Attr: %s\n", rec); if (sscanf(rec, "%d %d", &attr->file_index, &attr->type) != 2) { Jmsg(jcr, M_FATAL, 0, _("Error scanning attributes: %s\n"), rec); Dmsg1(dbglvl, "\nError scanning attributes. %s\n", rec); return 0; } Dmsg2(dbglvl, "Got Attr: FilInx=%d type=%d\n", attr->file_index, attr->type); /* * Note AR_DATA_STREAM should never be set since it is encoded * at the end of the attributes. */ if (attr->type & AR_DATA_STREAM) { attr->data_stream = 1; } else { attr->data_stream = 0; } attr->type &= FT_MASK; /* keep only type bits */ p = rec; while (*p++ != ' ') /* skip record file index */ { } while (*p++ != ' ') /* skip type */ { } attr->fname = p; /* set filname position */ while (*p++ != 0) /* skip filename */ { } attr->attr = p; /* set attributes position */ while (*p++ != 0) /* skip attributes */ { } attr->lname = p; /* set link position */ while (*p++ != 0) /* skip link */ { } attr->delta_seq = 0; if (attr->type == FT_RESTORE_FIRST) { /* We have an object, so do a binary copy */ object_len = reclen + rec - p; attr->attrEx = check_pool_memory_size(attr->attrEx, object_len + 1); memcpy(attr->attrEx, p, object_len); /* Add a EOS for those who attempt to print the object */ p = attr->attrEx + object_len; *p = 0; } else { pm_strcpy(attr->attrEx, p); /* copy extended attributes, if any */ if (attr->data_stream) { int64_t val; while (*p++ != 0) /* skip extended attributes */ { } from_base64(&val, p); attr->data_stream = (int32_t)val; } else { while (*p++ != 0) /* skip extended attributes */ { } if (p - rec < reclen) { attr->delta_seq = str_to_int32(p); /* delta_seq */ } } } Dmsg8(dbglvl, "unpack_attr FI=%d Type=%d fname=%s attr=%s lname=%s attrEx=%s datastr=%d delta_seq=%d\n", attr->file_index, attr->type, attr->fname, attr->attr, attr->lname, attr->attrEx, attr->data_stream, attr->delta_seq); *attr->ofname = 0; *attr->olname = 0; return 1; }
/* * Prune File records from the database. For any Job which * is older than the retention period, we unconditionally delete * all File records for that Job. This is simple enough that no * temporary tables are needed. We simply make an in memory list of * the JobIds meeting the prune conditions, then delete all File records * pointing to each of those JobIds. * * This routine assumes you want the pruning to be done. All checking * must be done before calling this routine. * * Note: client or pool can possibly be NULL (not both). */ bool prune_files(UAContext *ua, CLIENTRES *client, POOLRES *pool) { struct del_ctx del; struct s_count_ctx cnt; POOL_MEM query(PM_MESSAGE); POOL_MEM sql_where(PM_MESSAGE); POOL_MEM sql_from(PM_MESSAGE); utime_t period; char ed1[50]; memset(&del, 0, sizeof(del)); if (pool && pool->FileRetention > 0) { period = pool->FileRetention; } else if (client) { period = client->FileRetention; } else { /* should specify at least pool or client */ return false; } db_lock(ua->db); /* Specify JobTDate and Pool.Name= and/or Client.Name= in the query */ if (!prune_set_filter(ua, client, pool, period, &sql_from, &sql_where)) { goto bail_out; } // edit_utime(now-period, ed1, sizeof(ed1)); // Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Jobs older than %s secs.\n"), ed1); Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Files.\n")); /* Select Jobs -- for counting */ Mmsg(query, "SELECT COUNT(1) FROM Job %s WHERE PurgedFiles=0 %s", sql_from.c_str(), sql_where.c_str()); Dmsg1(050, "select sql=%s\n", query.c_str()); cnt.count = 0; if (!db_sql_query(ua->db, query.c_str(), del_count_handler, (void *)&cnt)) { ua->error_msg("%s", db_strerror(ua->db)); Dmsg0(050, "Count failed\n"); goto bail_out; } if (cnt.count == 0) { if (ua->verbose) { ua->warning_msg(_("No Files found to prune.\n")); } goto bail_out; } if (cnt.count < MAX_DEL_LIST_LEN) { del.max_ids = cnt.count + 1; } else { del.max_ids = MAX_DEL_LIST_LEN; } del.tot_ids = 0; del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); /* Now process same set but making a delete list */ Mmsg(query, "SELECT JobId FROM Job %s WHERE PurgedFiles=0 %s", sql_from.c_str(), sql_where.c_str()); Dmsg1(050, "select sql=%s\n", query.c_str()); db_sql_query(ua->db, query.c_str(), file_delete_handler, (void *)&del); purge_files_from_job_list(ua, del); edit_uint64_with_commas(del.num_del, ed1); ua->info_msg(_("Pruned Files from %s Jobs for client %s from catalog.\n"), ed1, client->name()); bail_out: db_unlock(ua->db); if (del.JobId) { free(del.JobId); } return 1; }
/* * Authenticate File daemon connection */ int authenticate_file_daemon(JCR *jcr) { BSOCK *fd = jcr->file_bsock; CLIENT *client = jcr->client; char dirname[MAX_NAME_LENGTH]; int tls_local_need = BNET_TLS_NONE; int tls_remote_need = BNET_TLS_NONE; int compatible = true; bool auth_success = false; /* * Send my name to the File daemon then do authentication */ bstrncpy(dirname, director->name(), sizeof(dirname)); bash_spaces(dirname); /* Timeout Hello after 1 min */ btimer_t *tid = start_bsock_timer(fd, AUTH_TIMEOUT); if (!fd->fsend(hello, dirname)) { stop_bsock_timer(tid); Jmsg(jcr, M_FATAL, 0, _("Error sending Hello to File daemon at \"%s:%d\". ERR=%s\n"), fd->host(), fd->port(), fd->bstrerror()); return 0; } Dmsg1(dbglvl, "Sent: %s", fd->msg); /* TLS Requirement */ if (client->tls_enable) { if (client->tls_require) { tls_local_need = BNET_TLS_REQUIRED; } else { tls_local_need = BNET_TLS_OK; } } if (client->tls_authenticate) { tls_local_need = BNET_TLS_REQUIRED; } auth_success = cram_md5_respond(fd, client->password, &tls_remote_need, &compatible); if (auth_success) { auth_success = cram_md5_challenge(fd, client->password, tls_local_need, compatible); if (!auth_success) { Dmsg1(dbglvl, "cram_auth failed for %s\n", fd->who()); } } else { Dmsg1(dbglvl, "cram_get_auth failed for %s\n", fd->who()); } if (!auth_success) { stop_bsock_timer(tid); Dmsg0(dbglvl, _("Director and File daemon passwords or names not the same.\n")); Jmsg(jcr, M_FATAL, 0, _("Unable to authenticate with File daemon at \"%s:%d\". Possible causes:\n" "Passwords or names not the same or\n" "Maximum Concurrent Jobs exceeded on the FD or\n" "FD networking messed up (restart daemon).\n" "Please see " MANUAL_AUTH_URL " for help.\n"), fd->host(), fd->port()); return 0; } /* Verify that the remote host is willing to meet our TLS requirements */ if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { stop_bsock_timer(tid); Jmsg(jcr, M_FATAL, 0, _("Authorization problem: FD \"%s:%s\" did not advertise required TLS support.\n"), fd->who(), fd->host()); return 0; } /* Verify that we are willing to meet the remote host's requirements */ if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { stop_bsock_timer(tid); Jmsg(jcr, M_FATAL, 0, _("Authorization problem: FD at \"%s:%d\" requires TLS.\n"), fd->host(), fd->port()); return 0; } /* Is TLS Enabled? */ if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { /* Engage TLS! Full Speed Ahead! */ if (!bnet_tls_client(client->tls_ctx, fd, client->tls_allowed_cns)) { stop_bsock_timer(tid); Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed with FD at \"%s:%d\".\n"), fd->host(), fd->port()); return 0; } if (client->tls_authenticate) { /* tls authentication only? */ fd->free_tls(); /* yes, shutdown tls */ } } Dmsg1(116, ">filed: %s", fd->msg); if (fd->recv() <= 0) { stop_bsock_timer(tid); Dmsg1(dbglvl, _("Bad response from File daemon to Hello command: ERR=%s\n"), bnet_strerror(fd)); Jmsg(jcr, M_FATAL, 0, _("Bad response from File daemon at \"%s:%d\" to Hello command: ERR=%s\n"), fd->host(), fd->port(), fd->bstrerror()); return 0; } Dmsg1(110, "<filed: %s", fd->msg); stop_bsock_timer(tid); jcr->FDVersion = 0; if (strncmp(fd->msg, FDOKhello, sizeof(FDOKhello)) != 0 && sscanf(fd->msg, FDOKnewHello, &jcr->FDVersion) != 1) { Dmsg0(dbglvl, _("File daemon rejected Hello command\n")); Jmsg(jcr, M_FATAL, 0, _("File daemon at \"%s:%d\" rejected Hello command\n"), fd->host(), fd->port()); return 0; } return 1; }
/* * Prune at least one Volume in current Pool. This is called from * catreq.c => next_vol.c when the Storage daemon is asking for another * volume and no appendable volumes are available. * */ void prune_volumes(JCR *jcr, bool InChanger, MEDIA_DBR *mr, STORE *store) { int count; int i; dbid_list ids; struct del_ctx prune_list; POOL_MEM query(PM_MESSAGE), changer(PM_MESSAGE); UAContext *ua; char ed1[50], ed2[100], ed3[50]; POOL_DBR spr; Dmsg1(100, "Prune volumes PoolId=%d\n", jcr->jr.PoolId); if (!jcr->job->PruneVolumes && !jcr->pool->AutoPrune) { Dmsg0(100, "AutoPrune not set in Pool.\n"); return; } memset(&prune_list, 0, sizeof(prune_list)); prune_list.max_ids = 10000; prune_list.JobId = (JobId_t *)malloc(sizeof(JobId_t) * prune_list.max_ids); ua = new_ua_context(jcr); db_lock(jcr->db); /* Edit PoolId */ edit_int64(mr->PoolId, ed1); /* * Get Pool record for Scratch Pool */ memset(&spr, 0, sizeof(spr)); bstrncpy(spr.Name, "Scratch", sizeof(spr.Name)); if (db_get_pool_record(jcr, jcr->db, &spr)) { edit_int64(spr.PoolId, ed2); bstrncat(ed2, ",", sizeof(ed2)); } else { ed2[0] = 0; } if (mr->ScratchPoolId) { edit_int64(mr->ScratchPoolId, ed3); bstrncat(ed2, ed3, sizeof(ed2)); bstrncat(ed2, ",", sizeof(ed2)); } Dmsg1(100, "Scratch pool(s)=%s\n", ed2); /* * ed2 ends up with scratch poolid and current poolid or * just current poolid if there is no scratch pool */ bstrncat(ed2, ed1, sizeof(ed2)); /* * Get the List of all media ids in the current Pool or whose * RecyclePoolId is the current pool or the scratch pool */ const char *select = "SELECT DISTINCT MediaId,LastWritten FROM Media WHERE " "(PoolId=%s OR RecyclePoolId IN (%s)) AND MediaType='%s' %s" "ORDER BY LastWritten ASC,MediaId"; set_storageid_in_mr(store, mr); if (InChanger) { Mmsg(changer, "AND InChanger=1 AND StorageId IN (%s) ", edit_int64(mr->StorageId, ed3)); } Mmsg(query, select, ed1, ed2, mr->MediaType, changer.c_str()); Dmsg1(100, "query=%s\n", query.c_str()); if (!db_get_query_dbids(ua->jcr, ua->db, query, ids)) { Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); goto bail_out; } Dmsg1(100, "Volume prune num_ids=%d\n", ids.num_ids); /* Visit each Volume and Prune it until we find one that is purged */ for (i=0; i<ids.num_ids; i++) { MEDIA_DBR lmr; lmr.MediaId = ids.DBId[i]; Dmsg1(100, "Get record MediaId=%d\n", (int)lmr.MediaId); if (!db_get_media_record(jcr, jcr->db, &lmr)) { Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); continue; } Dmsg1(100, "Examine vol=%s\n", lmr.VolumeName); /* Don't prune archived volumes */ if (lmr.Enabled == 2) { Dmsg1(100, "Vol=%s disabled\n", lmr.VolumeName); continue; } /* Prune only Volumes with status "Full", or "Used" */ if (strcmp(lmr.VolStatus, "Full") == 0 || strcmp(lmr.VolStatus, "Used") == 0) { Dmsg2(100, "Add prune list MediaId=%d Volume %s\n", (int)lmr.MediaId, lmr.VolumeName); count = get_prune_list_for_volume(ua, &lmr, &prune_list); Dmsg1(100, "Num pruned = %d\n", count); if (count != 0) { purge_job_list_from_catalog(ua, prune_list); prune_list.num_ids = 0; /* reset count */ } if (!is_volume_purged(ua, &lmr)) { Dmsg1(100, "Vol=%s not pruned\n", lmr.VolumeName); continue; } Dmsg1(100, "Vol=%s is purged\n", lmr.VolumeName); /* * Since we are also pruning the Scratch pool, continue * until and check if this volume is available (InChanger + StorageId) * If not, just skip this volume and try the next one */ if (InChanger) { if (!lmr.InChanger || (lmr.StorageId != mr->StorageId)) { Dmsg1(100, "Vol=%s not inchanger\n", lmr.VolumeName); continue; /* skip this volume, ie not loadable */ } } if (!lmr.Recycle) { Dmsg1(100, "Vol=%s not recyclable\n", lmr.VolumeName); continue; } if (has_volume_expired(jcr, &lmr)) { Dmsg1(100, "Vol=%s has expired\n", lmr.VolumeName); continue; /* Volume not usable */ } /* * If purged and not moved to another Pool, * then we stop pruning and take this volume. */ if (lmr.PoolId == mr->PoolId) { Dmsg2(100, "Got Vol=%s MediaId=%d purged.\n", lmr.VolumeName, (int)lmr.MediaId); mr->copy(&lmr); set_storageid_in_mr(store, mr); break; /* got a volume */ } } } bail_out: Dmsg0(100, "Leave prune volumes\n"); db_unlock(jcr->db); free_ua_context(ua); if (prune_list.JobId) { free(prune_list.JobId); } return; }
/* * Authenticate Storage daemon connection */ bool authenticate_storage_daemon(JCR *jcr, STORE *store) { BSOCK *sd = jcr->store_bsock; char dirname[MAX_NAME_LENGTH]; int tls_local_need = BNET_TLS_NONE; int tls_remote_need = BNET_TLS_NONE; int compatible = true; bool auth_success = false; /* * Send my name to the Storage daemon then do authentication */ bstrncpy(dirname, director->hdr.name, sizeof(dirname)); bash_spaces(dirname); /* Timeout Hello after 1 min */ btimer_t *tid = start_bsock_timer(sd, AUTH_TIMEOUT); if (!sd->fsend(hello, dirname)) { stop_bsock_timer(tid); Dmsg1(dbglvl, _("Error sending Hello to Storage daemon. ERR=%s\n"), bnet_strerror(sd)); Jmsg(jcr, M_FATAL, 0, _("Error sending Hello to Storage daemon. ERR=%s\n"), bnet_strerror(sd)); return 0; } /* TLS Requirement */ if (store->tls_enable) { if (store->tls_require) { tls_local_need = BNET_TLS_REQUIRED; } else { tls_local_need = BNET_TLS_OK; } } if (store->tls_authenticate) { tls_local_need = BNET_TLS_REQUIRED; } auth_success = cram_md5_respond(sd, store->password, &tls_remote_need, &compatible); if (auth_success) { auth_success = cram_md5_challenge(sd, store->password, tls_local_need, compatible); if (!auth_success) { Dmsg1(dbglvl, "cram_challenge failed for %s\n", sd->who()); } } else { Dmsg1(dbglvl, "cram_respond failed for %s\n", sd->who()); } if (!auth_success) { stop_bsock_timer(tid); Dmsg0(dbglvl, _("Director and Storage daemon passwords or names not the same.\n")); Jmsg2(jcr, M_FATAL, 0, _("Director unable to authenticate with Storage daemon at \"%s:%d\". Possible causes:\n" "Passwords or names not the same or\n" "Maximum Concurrent Jobs exceeded on the SD or\n" "SD networking messed up (restart daemon).\n" "Please see " MANUAL_AUTH_URL " for help.\n"), sd->host(), sd->port()); return 0; } /* Verify that the remote host is willing to meet our TLS requirements */ if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { stop_bsock_timer(tid); Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server did not advertise required TLS support.\n")); return 0; } /* Verify that we are willing to meet the remote host's requirements */ if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && tls_remote_need != BNET_TLS_OK) { stop_bsock_timer(tid); Jmsg(jcr, M_FATAL, 0, _("Authorization problem: Remote server requires TLS.\n")); return 0; } /* Is TLS Enabled? */ if (tls_local_need >= BNET_TLS_OK && tls_remote_need >= BNET_TLS_OK) { /* Engage TLS! Full Speed Ahead! */ if (!bnet_tls_client(store->tls_ctx, sd, NULL)) { stop_bsock_timer(tid); Jmsg(jcr, M_FATAL, 0, _("TLS negotiation failed with SD at \"%s:%d\"\n"), sd->host(), sd->port()); return 0; } if (store->tls_authenticate) { /* authentication only? */ sd->free_tls(); /* yes, stop tls */ } } Dmsg1(116, ">stored: %s", sd->msg); if (sd->recv() <= 0) { stop_bsock_timer(tid); Jmsg3(jcr, M_FATAL, 0, _("bdird<stored: \"%s:%s\" bad response to Hello command: ERR=%s\n"), sd->who(), sd->host(), sd->bstrerror()); return 0; } Dmsg1(110, "<stored: %s", sd->msg); stop_bsock_timer(tid); if (strncmp(sd->msg, OKhello, sizeof(OKhello)) != 0) { Dmsg0(dbglvl, _("Storage daemon rejected Hello command\n")); Jmsg2(jcr, M_FATAL, 0, _("Storage daemon at \"%s:%d\" rejected Hello command\n"), sd->host(), sd->port()); return 0; } return 1; }
static bRC getAcl(bpContext *ctx, acl_pkt *ap) { bool skip_xattr, abort_retrieval; int current_size; int32_t xattr_value_length; uint32_t content_length = 0; uint32_t expected_serialize_len; POOL_MEM xattr_value(PM_MESSAGE), serialized_acls(PM_MESSAGE); plugin_ctx *p_ctx = (plugin_ctx *)ctx->pContext; if (!p_ctx) { return bRC_Error; } abort_retrieval = false; for (int cnt = 0; xattr_acl_skiplist[cnt] != NULL; cnt++) { skip_xattr = false; while (1) { current_size = xattr_value.max_size(); xattr_value_length = ceph_lgetxattr(p_ctx->cmount, ap->fname, xattr_acl_skiplist[cnt], xattr_value.c_str(), current_size); if (xattr_value_length < 0) { berrno be; switch (errno) { #if defined(ENOATTR) || defined(ENODATA) #if defined(ENOATTR) case ENOATTR: #endif #if defined(ENODATA) && ENOATTR != ENODATA case ENODATA: #endif skip_xattr = true; break; #endif #if defined(ENOTSUP) || defined(EOPNOTSUPP) #if defined(ENOTSUP) case ENOTSUP: #endif #if defined(EOPNOTSUPP) && EOPNOTSUPP != ENOTSUP case EOPNOTSUPP: #endif abort_retrieval = true; break; #endif case ERANGE: /* * Not enough room in buffer double its size and retry. */ xattr_value.check_size(current_size * 2); continue; default: Jmsg(ctx, M_ERROR, "ceph_lgetxattr(%s) failed: %s\n", ap->fname, be.bstrerror(-xattr_value_length)); return bRC_Error; } } /* * Retrieved the xattr so break the loop. */ break; } if (abort_retrieval) { break; } if (skip_xattr) { continue; } /* * Serialize the data. */ expected_serialize_len = strlen(xattr_acl_skiplist[cnt]) + xattr_value_length + 4; content_length = serialize_acl_stream(&serialized_acls, expected_serialize_len, content_length, xattr_acl_skiplist[cnt], strlen(xattr_acl_skiplist[cnt]), xattr_value.c_str(), xattr_value_length); } if (content_length > 0) { ap->content = (char *)malloc(content_length); memcpy(ap->content, serialized_acls.c_str(), content_length); ap->content_length = content_length; } return bRC_OK; }
/* * Do a virtual backup, which consolidates all previous backups into * a sort of synthetic Full. * * Returns: false on failure * true on success */ bool do_native_vbackup(JCR *jcr) { char ed1[100]; BSOCK *sd; char *p; db_list_ctx jobids; if (!jcr->rstorage) { Jmsg(jcr, M_FATAL, 0, _("No storage for reading given.\n")); return false; } if (!jcr->wstorage) { Jmsg(jcr, M_FATAL, 0, _("No storage for writing given.\n")); return false; } Dmsg2(100, "rstorage=%p wstorage=%p\n", jcr->rstorage, jcr->wstorage); Dmsg2(100, "Read store=%s, write store=%s\n", ((STORERES *)jcr->rstorage->first())->name(), ((STORERES *)jcr->wstorage->first())->name()); /* * Print Job Start message */ Jmsg(jcr, M_INFO, 0, _("Start Virtual Backup JobId %s, Job=%s\n"), edit_uint64(jcr->JobId, ed1), jcr->Job); if (!jcr->accurate) { Jmsg(jcr, M_WARNING, 0, _("This Job is not an Accurate backup so is not equivalent to a Full backup.\n")); } db_accurate_get_jobids(jcr, jcr->db, &jcr->jr, &jobids); Dmsg1(10, "Accurate jobids=%s\n", jobids.list); if (jobids.count == 0) { Jmsg(jcr, M_FATAL, 0, _("No previous Jobs found.\n")); return false; } /* * Now we find the last job that ran and store it's info in * the previous_jr record. We will set our times to the * values from that job so that anything changed after that * time will be picked up on the next backup. */ p = strrchr(jobids.list, ','); /* find last jobid */ if (p != NULL) { p++; } else { p = jobids.list; } memset(&jcr->previous_jr, 0, sizeof(jcr->previous_jr)); jcr->previous_jr.JobId = str_to_int64(p); Dmsg1(10, "Previous JobId=%s\n", p); if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) { Jmsg(jcr, M_FATAL, 0, _("Error getting Job record for previous Job: ERR=%s"), db_strerror(jcr->db)); return false; } if (!create_bootstrap_file(jcr, jobids.list)) { Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n")); return false; } /* * Open a message channel connection with the Storage * daemon. This is to let him know that our client * will be contacting him for a backup session. * */ Dmsg0(110, "Open connection with storage daemon\n"); jcr->setJobStatus(JS_WaitSD); /* * Start conversation with Storage daemon */ if (!connect_to_storage_daemon(jcr, 10, me->SDConnectTimeout, true)) { return false; } sd = jcr->store_bsock; /* * Now start a job with the Storage daemon */ if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage, /* send_bsr */ true)) { return false; } Dmsg0(100, "Storage daemon connection OK\n"); /* * We re-update the job start record so that the start * time is set after the run before job. This avoids * that any files created by the run before job will * be saved twice. They will be backed up in the current * job, but not in the next one unless they are changed. * Without this, they will be backed up in this job and * in the next job run because in that case, their date * is after the start of this run. */ jcr->start_time = time(NULL); jcr->jr.StartTime = jcr->start_time; jcr->jr.JobTDate = jcr->start_time; jcr->setJobStatus(JS_Running); /* * Update job start record */ if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); return false; } /* * Declare the job started to start the MaxRunTime check */ jcr->setJobStarted(); /* * Start the job prior to starting the message thread below * to avoid two threads from using the BSOCK structure at * the same time. */ if (!sd->fsend("run")) { return false; } /* * Now start a Storage daemon message thread */ if (!start_storage_daemon_message_thread(jcr)) { return false; } jcr->setJobStatus(JS_Running); /* * Pickup Job termination data * Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */ wait_for_storage_daemon_termination(jcr); jcr->setJobStatus(jcr->SDJobStatus); db_write_batch_file_records(jcr); /* used by bulk batch file insert */ if (!jcr->is_JobStatus(JS_Terminated)) { return false; } native_vbackup_cleanup(jcr, jcr->JobStatus); return true; }