/* * Auto Prune Jobs and Files. This is called at the end of every * Job. We do not prune volumes here. */ void do_autoprune(JCR *jcr) { UAContext *ua; JOBRES *job; CLIENTRES *client; POOLRES *pool; bool pruned; if (!jcr->res.client) { /* temp -- remove me */ return; } ua = new_ua_context(jcr); job = jcr->res.job; client = jcr->res.client; pool = jcr->res.pool; if (job->PruneJobs || client->AutoPrune) { prune_jobs(ua, client, pool, jcr->getJobType()); pruned = true; } else { pruned = false; } if (job->PruneFiles || client->AutoPrune) { prune_files(ua, client, pool); pruned = true; } if (pruned) { Jmsg(jcr, M_INFO, 0, _("End auto prune.\n\n")); } free_ua_context(ua); return; }
static bool create_bootstrap_file(JCR *jcr, char *jobids) { RESTORE_CTX rx; UAContext *ua; memset(&rx, 0, sizeof(rx)); rx.bsr = new_bsr(); ua = new_ua_context(jcr); rx.JobIds = jobids; if (!db_open_batch_connection(jcr, jcr->db)) { Jmsg0(jcr, M_FATAL, 0, "Can't get batch sql connexion"); return false; } if (!db_get_file_list(jcr, jcr->db_batch, jobids, false /* don't use md5 */, true /* use delta */, insert_bootstrap_handler, (void *)rx.bsr)) { Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db_batch)); } complete_bsr(ua, rx.bsr); jcr->ExpectedFiles = write_bsr_file(ua, rx); if (debug_level >= 10) { Dmsg1(000, "Found %d files to consolidate.\n", jcr->ExpectedFiles); } if (jcr->ExpectedFiles == 0) { free_ua_context(ua); free_bsr(rx.bsr); return false; } free_ua_context(ua); free_bsr(rx.bsr); return true; }
void cancel_storage_daemon_job(JCR *jcr) { if (jcr->sd_canceled) { return; /* cancel only once */ } UAContext *ua = new_ua_context(jcr); JCR *control_jcr = new_control_jcr("*JobCancel*", JT_SYSTEM); BSOCK *sd; ua->jcr = control_jcr; if (jcr->store_bsock) { if (!ua->jcr->wstorage) { if (jcr->rstorage) { copy_wstorage(ua->jcr, jcr->rstorage, _("Job resource")); } else { copy_wstorage(ua->jcr, jcr->wstorage, _("Job resource")); } } else { USTORE store; if (jcr->rstorage) { store.store = jcr->rstore; } else { store.store = jcr->wstore; } set_wstorage(ua->jcr, &store); } if (!connect_to_storage_daemon(ua->jcr, 10, SDConnectTimeout, 1)) { goto bail_out; } Dmsg0(200, "Connected to storage daemon\n"); sd = ua->jcr->store_bsock; sd->fsend("cancel Job=%s\n", jcr->Job); while (sd->recv() >= 0) { } sd->signal(BNET_TERMINATE); sd->close(); ua->jcr->store_bsock = NULL; jcr->sd_canceled = true; jcr->store_bsock->set_timed_out(); jcr->store_bsock->set_terminated(); sd_msg_thread_send_signal(jcr, TIMEOUT_SIGNAL); jcr->my_thread_send_signal(TIMEOUT_SIGNAL); } bail_out: free_jcr(control_jcr); free_ua_context(ua); }
static void job_monitor_watchdog(watchdog_t *self) { JCR *control_jcr, *jcr; control_jcr = (JCR *)self->data; Dsm_check(100); Dmsg1(800, "job_monitor_watchdog %p called\n", self); foreach_jcr(jcr) { bool cancel = false; if (jcr->JobId == 0 || job_canceled(jcr) || jcr->no_maxtime) { Dmsg2(800, "Skipping JCR=%p Job=%s\n", jcr, jcr->Job); continue; } /* check MaxWaitTime */ if (job_check_maxwaittime(jcr)) { jcr->setJobStatus(JS_Canceled); Qmsg(jcr, M_FATAL, 0, _("Max wait time exceeded. Job canceled.\n")); cancel = true; /* check MaxRunTime */ } else if (job_check_maxruntime(jcr)) { jcr->setJobStatus(JS_Canceled); Qmsg(jcr, M_FATAL, 0, _("Max run time exceeded. Job canceled.\n")); cancel = true; /* check MaxRunSchedTime */ } else if (job_check_maxrunschedtime(jcr)) { jcr->setJobStatus(JS_Canceled); Qmsg(jcr, M_FATAL, 0, _("Max run sched time exceeded. Job canceled.\n")); cancel = true; } if (cancel) { Dmsg3(800, "Cancelling JCR %p jobid %d (%s)\n", jcr, jcr->JobId, jcr->Job); UAContext *ua = new_ua_context(jcr); ua->jcr = control_jcr; cancel_job(ua, jcr); free_ua_context(ua); Dmsg2(800, "Have cancelled JCR %p Job=%d\n", jcr, jcr->JobId); } } /* Keep reference counts correct */ endeach_jcr(jcr); }
/* Run a Bacula job */ static PyObject *job_run(PyObject *self, PyObject *arg) { JCR *jcr; char *item; int stat; if (!PyArg_ParseTuple(arg, "s:run", &item)) { Dmsg0(000, "Error in ParseTuple\n"); return NULL; } /* Release lock due to recursion */ // PyEval_ReleaseLock(); jcr = get_jcr_from_PyObject(self); UAContext *ua = new_ua_context(jcr); ua->batch = true; pm_strcpy(ua->cmd, item); /* copy command */ parse_ua_args(ua); /* parse command */ stat = run_cmd(ua, ua->cmd); free_ua_context(ua); // PyEval_AcquireLock(); return PyInt_FromLong((long)stat); }
static PyObject *job_cancel(PyObject *self, PyObject *args) { JobId_t JobId = 0; JCR *jcr; bool found = false; if (!PyArg_ParseTuple(args, "i:cancel", &JobId)) { Dmsg0(000, "Parse tuple error in job_write\n"); return NULL; } foreach_jcr(jcr) { if (jcr->JobId == 0) { continue; } if (jcr->JobId == JobId) { found = true; break; } } /* endeach_jcr(jcr) not needed because freed below */ if (!found) { /* ***FIXME*** raise exception */ return NULL; } // PyEval_ReleaseLock(); UAContext *ua = new_ua_context(jcr); ua->batch = true; if (!cancel_job(ua, jcr)) { /* ***FIXME*** raise exception */ return NULL; } free_ua_context(ua); free_jcr(jcr); // PyEval_AcquireLock(); Py_INCREF(Py_None); return Py_None; }
/* * Open the bootstrap file and find the first Storage= * Returns ok if able to open * * It fills the storage name (should be the first line) * and the file descriptor to the bootstrap file, * it should be used for next operations, and need to be * closed at the end. */ bool open_bootstrap_file(JCR *jcr, bootstrap_info &info) { FILE *bs; UAContext *ua; info.bs = NULL; info.ua = NULL; if (!jcr->RestoreBootstrap) { return false; } bstrncpy(info.storage, jcr->res.rstore->name(), MAX_NAME_LENGTH); bs = fopen(jcr->RestoreBootstrap, "rb"); if (!bs) { berrno be; Jmsg(jcr, M_FATAL, 0, _("Could not open bootstrap file %s: ERR=%s\n"), jcr->RestoreBootstrap, be.bstrerror()); jcr->setJobStatus(JS_ErrorTerminated); return false; } ua = new_ua_context(jcr); ua->cmd = check_pool_memory_size(ua->cmd, UA_CMD_SIZE + 1); while (!fgets(ua->cmd, UA_CMD_SIZE, bs)) { parse_ua_args(ua); if (ua->argc != 1) { continue; } if (bstrcasecmp(ua->argk[0], "Storage")) { bstrncpy(info.storage, ua->argv[0], MAX_NAME_LENGTH); break; } } info.bs = bs; info.ua = ua; fseek(bs, 0, SEEK_SET); /* return to the top of the file */ return true; }
/* * Cancel a running job on a storage daemon. System invoked * non interactive version this builds a ua context and calls * the interactive one with the silent flag set. */ void cancel_storage_daemon_job(JCR *jcr) { UAContext *ua; JCR *control_jcr; if (jcr->sd_canceled) { return; /* cancel only once */ } ua = new_ua_context(jcr); control_jcr = new_control_jcr("*JobCancel*", JT_SYSTEM); ua->jcr = control_jcr; if (jcr->store_bsock) { if (!cancel_storage_daemon_job(ua, jcr, true)) { goto bail_out; } } bail_out: free_jcr(control_jcr); free_ua_context(ua); }
/* * Handle Director User Agent commands */ static void *handle_UA_client_request(void *arg) { int status; UAContext *ua; JCR *jcr; BSOCK *user = (BSOCK *)arg; pthread_detach(pthread_self()); jcr = new_control_jcr("-Console-", JT_CONSOLE); ua = new_ua_context(jcr); ua->UA_sock = user; set_jcr_in_tsd(INVALID_JCR); user->recv(); /* Get first message */ if (!authenticate_user_agent(ua)) { goto getout; } while (!ua->quit) { if (ua->api) { user->signal(BNET_MAIN_PROMPT); } status = user->recv(); if (status >= 0) { pm_strcpy(ua->cmd, ua->UA_sock->msg); parse_ua_args(ua); if (ua->argc > 0 && ua->argk[0][0] == '.') { do_a_dot_command(ua); } else { do_a_command(ua); } dequeue_messages(ua->jcr); if (!ua->quit) { if (console_msg_pending && acl_access_ok(ua, Command_ACL, "messages")) { if (ua->auto_display_messages) { pm_strcpy(ua->cmd, "messages"); qmessages_cmd(ua, ua->cmd); ua->user_notified_msg_pending = false; } else if (!ua->gui && !ua->user_notified_msg_pending && console_msg_pending) { if (ua->api) { user->signal(BNET_MSGS_PENDING); } else { bsendmsg(ua, _("You have messages.\n")); } ua->user_notified_msg_pending = true; } } if (!ua->api) { user->signal(BNET_EOD); /* send end of command */ } } } else if (is_bnet_stop(user)) { ua->quit = true; } else { /* signal */ user->signal(BNET_POLL); } } getout: close_db(ua); free_ua_context(ua); free_jcr(jcr); user->close(); delete user; return NULL; }
/* * Check for duplicate jobs. * Returns: true if current job should continue * false if current job should terminate */ bool allow_duplicate_job(JCR *jcr) { JCR *djcr; /* possible duplicate job */ JOBRES *job = jcr->res.job; bool cancel_dup = false; bool cancel_me = false; /* * See if AllowDuplicateJobs is set or * if duplicate checking is disabled for this job. */ if (job->AllowDuplicateJobs || jcr->IgnoreDuplicateJobChecking) { return true; } Dmsg0(800, "Enter allow_duplicate_job\n"); /* * After this point, we do not want to allow any duplicate * job to run. */ foreach_jcr(djcr) { if (jcr == djcr || djcr->JobId == 0) { continue; /* do not cancel this job or consoles */ } /* * See if this Job has the IgnoreDuplicateJobChecking flag set, ignore it * for any checking against other jobs. */ if (djcr->IgnoreDuplicateJobChecking) { continue; } if (bstrcmp(job->name(), djcr->res.job->name())) { if (job->DuplicateJobProximity > 0) { utime_t now = (utime_t)time(NULL); if ((now - djcr->start_time) > job->DuplicateJobProximity) { continue; /* not really a duplicate */ } } if (job->CancelLowerLevelDuplicates && djcr->getJobType() == 'B' && jcr->getJobType() == 'B') { switch (jcr->getJobLevel()) { case L_FULL: if (djcr->getJobLevel() == L_DIFFERENTIAL || djcr->getJobLevel() == L_INCREMENTAL) { cancel_dup = true; } break; case L_DIFFERENTIAL: if (djcr->getJobLevel() == L_INCREMENTAL) { cancel_dup = true; } if (djcr->getJobLevel() == L_FULL) { cancel_me = true; } break; case L_INCREMENTAL: if (djcr->getJobLevel() == L_FULL || djcr->getJobLevel() == L_DIFFERENTIAL) { cancel_me = true; } } /* * cancel_dup will be done below */ if (cancel_me) { /* Zap current job */ jcr->setJobStatus(JS_Canceled); Jmsg(jcr, M_FATAL, 0, _("JobId %d already running. Duplicate job not allowed.\n"), djcr->JobId); break; /* get out of foreach_jcr */ } } /* * Cancel one of the two jobs (me or dup) * If CancelQueuedDuplicates is set do so only if job is queued. */ if (job->CancelQueuedDuplicates) { switch (djcr->JobStatus) { case JS_Created: case JS_WaitJobRes: case JS_WaitClientRes: case JS_WaitStoreRes: case JS_WaitPriority: case JS_WaitMaxJobs: case JS_WaitStartTime: cancel_dup = true; /* cancel queued duplicate */ break; default: break; } } if (cancel_dup || job->CancelRunningDuplicates) { /* * Zap the duplicated job djcr */ UAContext *ua = new_ua_context(jcr); Jmsg(jcr, M_INFO, 0, _("Cancelling duplicate JobId=%d.\n"), djcr->JobId); cancel_job(ua, djcr); bmicrosleep(0, 500000); djcr->setJobStatus(JS_Canceled); cancel_job(ua, djcr); free_ua_context(ua); Dmsg2(800, "Cancel dup %p JobId=%d\n", djcr, djcr->JobId); } else { /* * Zap current job */ jcr->setJobStatus(JS_Canceled); Jmsg(jcr, M_FATAL, 0, _("JobId %d already running. Duplicate job not allowed.\n"), djcr->JobId); Dmsg2(800, "Cancel me %p JobId=%d\n", jcr, jcr->JobId); } Dmsg4(800, "curJobId=%d use_cnt=%d dupJobId=%d use_cnt=%d\n", jcr->JobId, jcr->use_count(), djcr->JobId, djcr->use_count()); break; /* did our work, get out of foreach loop */ } } endeach_jcr(djcr); return true; }
/* * Prune at least one Volume in current Pool. This is called from * catreq.c => next_vol.c when the Storage daemon is asking for another * volume and no appendable volumes are available. * */ void prune_volumes(JCR *jcr, bool InChanger, MEDIA_DBR *mr, STORERES *store) { int i; int count; POOL_DBR spr; UAContext *ua; dbid_list ids; struct del_ctx prune_list; POOL_MEM query(PM_MESSAGE); char ed1[50], ed2[100], ed3[50]; Dmsg1(100, "Prune volumes PoolId=%d\n", jcr->jr.PoolId); if (!jcr->res.job->PruneVolumes && !jcr->res.pool->AutoPrune) { Dmsg0(100, "AutoPrune not set in Pool.\n"); return; } memset(&prune_list, 0, sizeof(prune_list)); prune_list.max_ids = 10000; prune_list.JobId = (JobId_t *)malloc(sizeof(JobId_t) * prune_list.max_ids); ua = new_ua_context(jcr); db_lock(jcr->db); /* Edit PoolId */ edit_int64(mr->PoolId, ed1); /* * Get Pool record for Scratch Pool */ memset(&spr, 0, sizeof(spr)); bstrncpy(spr.Name, "Scratch", sizeof(spr.Name)); if (db_get_pool_record(jcr, jcr->db, &spr)) { edit_int64(spr.PoolId, ed2); bstrncat(ed2, ",", sizeof(ed2)); } else { ed2[0] = 0; } if (mr->ScratchPoolId) { edit_int64(mr->ScratchPoolId, ed3); bstrncat(ed2, ed3, sizeof(ed2)); bstrncat(ed2, ",", sizeof(ed2)); } Dmsg1(100, "Scratch pool(s)=%s\n", ed2); /* * ed2 ends up with scratch poolid and current poolid or * just current poolid if there is no scratch pool */ bstrncat(ed2, ed1, sizeof(ed2)); /* * Get the List of all media ids in the current Pool or whose * RecyclePoolId is the current pool or the scratch pool */ const char *select = "SELECT DISTINCT MediaId,LastWritten FROM Media WHERE " "(PoolId=%s OR RecyclePoolId IN (%s)) AND MediaType='%s' %s" "ORDER BY LastWritten ASC,MediaId"; if (InChanger) { char changer[100]; /* Ensure it is in this autochanger */ bsnprintf(changer, sizeof(changer), "AND InChanger=1 AND StorageId=%s ", edit_int64(mr->StorageId, ed3)); Mmsg(query, select, ed1, ed2, mr->MediaType, changer); } else { Mmsg(query, select, ed1, ed2, mr->MediaType, ""); } Dmsg1(100, "query=%s\n", query.c_str()); if (!db_get_query_dbids(ua->jcr, ua->db, query, ids)) { Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); goto bail_out; } Dmsg1(100, "Volume prune num_ids=%d\n", ids.num_ids); /* Visit each Volume and Prune it until we find one that is purged */ for (i=0; i<ids.num_ids; i++) { MEDIA_DBR lmr; lmr.MediaId = ids.DBId[i]; Dmsg1(100, "Get record MediaId=%d\n", (int)lmr.MediaId); if (!db_get_media_record(jcr, jcr->db, &lmr)) { Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db)); continue; } Dmsg1(100, "Examine vol=%s\n", lmr.VolumeName); /* Don't prune archived volumes */ if (lmr.Enabled == 2) { Dmsg1(100, "Vol=%s disabled\n", lmr.VolumeName); continue; } /* Prune only Volumes with status "Full", or "Used" */ if (bstrcmp(lmr.VolStatus, "Full") || bstrcmp(lmr.VolStatus, "Used")) { Dmsg2(100, "Add prune list MediaId=%d Volume %s\n", (int)lmr.MediaId, lmr.VolumeName); count = get_prune_list_for_volume(ua, &lmr, &prune_list); Dmsg1(100, "Num pruned = %d\n", count); if (count != 0) { purge_job_list_from_catalog(ua, prune_list); prune_list.num_ids = 0; /* reset count */ } if (!is_volume_purged(ua, &lmr)) { Dmsg1(050, "Vol=%s not pruned\n", lmr.VolumeName); continue; } Dmsg1(050, "Vol=%s is purged\n", lmr.VolumeName); /* * Since we are also pruning the Scratch pool, continue * until and check if this volume is available (InChanger + StorageId) * If not, just skip this volume and try the next one */ if (InChanger) { if (!lmr.InChanger || (lmr.StorageId != mr->StorageId)) { Dmsg1(100, "Vol=%s not inchanger or correct StoreId\n", lmr.VolumeName); continue; /* skip this volume, ie not loadable */ } } if (!lmr.Recycle) { Dmsg1(100, "Vol=%s not recyclable\n", lmr.VolumeName); continue; } if (has_volume_expired(jcr, &lmr)) { Dmsg1(100, "Vol=%s has expired\n", lmr.VolumeName); continue; /* Volume not usable */ } /* * If purged and not moved to another Pool, * then we stop pruning and take this volume. */ if (lmr.PoolId == mr->PoolId) { Dmsg2(100, "Got Vol=%s MediaId=%d purged.\n", lmr.VolumeName, (int)lmr.MediaId); mr->copy(&lmr); set_storageid_in_mr(store, mr); break; /* got a volume */ } } } bail_out: Dmsg0(100, "Leave prune volumes\n"); db_unlock(jcr->db); free_ua_context(ua); if (prune_list.JobId) { free(prune_list.JobId); } return; }
/* * Items needed: * mr.PoolId must be set * mr.StorageId should also be set * mr.ScratchPoolId could be set (used if create==true) * jcr->wstore * jcr->db * jcr->pool * MEDIA_DBR mr with PoolId set * create -- whether or not to create a new volume */ int find_next_volume_for_append(JCR *jcr, MEDIA_DBR *mr, int index, bool create, bool prune) { int retry = 0; bool ok; bool InChanger; STORE *store = jcr->wstore; bstrncpy(mr->MediaType, store->media_type, sizeof(mr->MediaType)); Dmsg3(100, "find_next_vol_for_append: JobId=%u PoolId=%d, MediaType=%s\n", (uint32_t)jcr->JobId, (int)mr->PoolId, mr->MediaType); /* * If we are using an Autochanger, restrict Volume * search to the Autochanger on the first pass */ InChanger = store->autochanger; /* * Find the Next Volume for Append */ db_lock(jcr->db); for ( ;; ) { bstrncpy(mr->VolStatus, "Append", sizeof(mr->VolStatus)); /* want only appendable volumes */ /* * 1. Look for volume with "Append" status. */ ok = db_find_next_volume(jcr, jcr->db, index, InChanger, mr); if (!ok) { Dmsg4(150, "after find_next_vol ok=%d index=%d InChanger=%d Vstat=%s\n", ok, index, InChanger, mr->VolStatus); /* * 2. Try finding a recycled volume */ ok = find_recycled_volume(jcr, InChanger, mr); Dmsg2(150, "find_recycled_volume ok=%d FW=%d\n", ok, mr->FirstWritten); if (!ok) { /* * 3. Try recycling any purged volume */ ok = recycle_oldest_purged_volume(jcr, InChanger, mr); if (!ok) { /* * 4. Try pruning Volumes */ if (prune) { Dmsg0(150, "Call prune_volumes\n"); prune_volumes(jcr, InChanger, mr); } ok = recycle_oldest_purged_volume(jcr, InChanger, mr); if (!ok && create) { Dmsg4(150, "after prune volumes_vol ok=%d index=%d InChanger=%d Vstat=%s\n", ok, index, InChanger, mr->VolStatus); /* * 5. Try pulling a volume from the Scratch pool */ ok = get_scratch_volume(jcr, InChanger, mr); Dmsg4(150, "after get scratch volume ok=%d index=%d InChanger=%d Vstat=%s\n", ok, index, InChanger, mr->VolStatus); } /* * If we are using an Autochanger and have not found * a volume, retry looking for any volume. */ if (!ok && InChanger) { InChanger = false; continue; /* retry again accepting any volume */ } } } if (!ok && create) { /* * 6. Try "creating" a new Volume */ ok = newVolume(jcr, mr); } /* * Look at more drastic ways to find an Appendable Volume */ if (!ok && (jcr->pool->purge_oldest_volume || jcr->pool->recycle_oldest_volume)) { Dmsg2(200, "No next volume found. PurgeOldest=%d\n RecyleOldest=%d", jcr->pool->purge_oldest_volume, jcr->pool->recycle_oldest_volume); /* Find oldest volume to recycle */ ok = db_find_next_volume(jcr, jcr->db, -1, InChanger, mr); Dmsg1(200, "Find oldest=%d Volume\n", ok); if (ok && prune) { UAContext *ua; Dmsg0(200, "Try purge Volume.\n"); /* * 7. Try to purging oldest volume only if not UA calling us. */ ua = new_ua_context(jcr); if (jcr->pool->purge_oldest_volume && create) { Jmsg(jcr, M_INFO, 0, _("Purging oldest volume \"%s\"\n"), mr->VolumeName); ok = purge_jobs_from_volume(ua, mr); /* * 8. or try recycling the oldest volume */ } else if (jcr->pool->recycle_oldest_volume) { Jmsg(jcr, M_INFO, 0, _("Pruning oldest volume \"%s\"\n"), mr->VolumeName); ok = prune_volume(ua, mr); } free_ua_context(ua); if (ok) { ok = recycle_volume(jcr, mr); Dmsg1(400, "Recycle after purge oldest=%d\n", ok); } } } } Dmsg2(100, "VolJobs=%d FirstWritten=%d\n", mr->VolJobs, mr->FirstWritten); if (ok) { /* If we can use the volume, check if it is expired */ if (has_volume_expired(jcr, mr)) { if (retry++ < 200) { /* sanity check */ continue; /* try again from the top */ } else { Jmsg(jcr, M_ERROR, 0, _( "We seem to be looping trying to find the next volume. I give up.\n")); } } } break; } /* end for loop */ db_unlock(jcr->db); Dmsg1(150, "return ok=%d find_next_vol\n", ok); return ok; }
/* * Try hard to recycle the current volume * * Returns: on failure - reason = NULL * on success - reason - pointer to reason */ void check_if_volume_valid_or_recyclable(JCR *jcr, MEDIA_DBR *mr, const char **reason) { int ok; *reason = NULL; if (!mr->Recycle) { *reason = _("volume has recycling disabled"); return; } /* Check if a duration or limit has expired */ if (has_volume_expired(jcr, mr)) { *reason = _("volume has expired"); /* Keep going because we may be able to recycle volume */ } /* * Now see if we can use the volume as is */ if (strcmp(mr->VolStatus, "Append") == 0 || strcmp(mr->VolStatus, "Recycle") == 0) { *reason = NULL; return; } /* * Check if the Volume is already marked for recycling */ if (strcmp(mr->VolStatus, "Purged") == 0) { if (recycle_volume(jcr, mr)) { Jmsg(jcr, M_INFO, 0, _("Recycled current volume \"%s\"\n"), mr->VolumeName); *reason = NULL; return; } else { /* In principle this shouldn't happen */ *reason = _("and recycling of current volume failed"); return; } } /* At this point, the volume is not valid for writing */ *reason = _("but should be Append, Purged or Recycle"); /* * What we're trying to do here is see if the current volume is * "recyclable" - ie. if we prune all expired jobs off it, is * it now possible to reuse it for the job that it is currently * needed for? */ if ((mr->LastWritten + mr->VolRetention) < (utime_t)time(NULL) && mr->Recycle && jcr->pool->recycle_current_volume && (strcmp(mr->VolStatus, "Full") == 0 || strcmp(mr->VolStatus, "Used") == 0)) { /* * Attempt prune of current volume to see if we can * recycle it for use. */ UAContext *ua; ua = new_ua_context(jcr); ok = prune_volume(ua, mr); free_ua_context(ua); if (ok) { /* If fully purged, recycle current volume */ if (recycle_volume(jcr, mr)) { Jmsg(jcr, M_INFO, 0, _("Recycled current volume \"%s\"\n"), mr->VolumeName); *reason = NULL; } else { *reason = _("but should be Append, Purged or Recycle (recycling of the " "current volume failed)"); } } else { *reason = _("but should be Append, Purged or Recycle (cannot automatically " "recycle current volume, as it still contains unpruned data " "or the Volume Retention time has not expired.)"); } } }