/** * Start a transaction on a database - used for db recovery */ int32_t ctdb_control_db_transaction_start(struct ctdb_context *ctdb, TDB_DATA indata) { struct ctdb_transdb *w = (struct ctdb_transdb *)indata.dptr; struct ctdb_db_context *ctdb_db; struct db_start_transaction_state state; ctdb_db = find_ctdb_db(ctdb, w->db_id); if (ctdb_db == NULL) { DEBUG(DEBUG_ERR, ("Transaction start for unknown dbid 0x%08x\n", w->db_id)); return -1; } state.transaction_id = w->tid; state.transaction_started = true; return db_start_transaction(ctdb_db, &state); }
int main(){ MYSQL * conn; struct gs_scope campaign; /* Seed Database with Scope from Config file. */ conn = _getMySQLConnection(); if(!conn){ fprintf(stderr, "%s\n", "Could not connect to mySQL"); close(STDIN_FILENO); close(STDOUT_FILENO); close(STDERR_FILENO); return 1; } gs_scope_setId(CAMPAIGN_ID, &campaign ); gs_scope_setDesc("GREEN_UP" , &campaign); db_start_transaction(conn); db_insertScope(&campaign, conn); if(campaign.id == CAMPAIGN_ID) fprintf(stdout, "Created Campaign Scope with requested campaign ID of %ld\n", CAMPAIGN_ID); else if(campaign.id == GS_SCOPE_INVALID_ID) fprintf(stderr, "There was a problem attempting to create the scope.\n" ); else fprintf(stdout, "Could not created campaign with requested ID %ld, created with ID of: %ld\n", CAMPAIGN_ID, campaign.id); db_end_transaction(conn); mysql_close(conn); mysql_library_end(); close(STDIN_FILENO); close(STDOUT_FILENO); close(STDERR_FILENO); return 0; }
/* * Note, we receive the whole attribute record, but we select out only the stat * packet, VolSessionId, VolSessionTime, FileIndex, file type, and file name to * store in the catalog. */ static void update_attribute(JCR *jcr, char *msg, int32_t msglen) { unser_declare; uint32_t VolSessionId, VolSessionTime; int32_t Stream; uint32_t FileIndex; char *p; int len; char *fname, *attr; ATTR_DBR *ar = NULL; uint32_t reclen; /* * Start transaction allocates jcr->attr and jcr->ar if needed */ db_start_transaction(jcr, jcr->db); /* start transaction if not already open */ ar = jcr->ar; /* * Start by scanning directly in the message buffer to get Stream * there may be a cached attr so we cannot yet write into * jcr->attr or jcr->ar */ p = msg; skip_nonspaces(&p); /* UpdCat */ skip_spaces(&p); skip_nonspaces(&p); /* Job=nnn */ skip_spaces(&p); skip_nonspaces(&p); /* "FileAttributes" */ p += 1; /* * The following "SD header" fields are serialized */ unser_begin(p, 0); unser_uint32(VolSessionId); /* VolSessionId */ unser_uint32(VolSessionTime); /* VolSessionTime */ unser_int32(FileIndex); /* FileIndex */ unser_int32(Stream); /* Stream */ unser_uint32(reclen); /* Record length */ p += unser_length(p); /* Raw record follows */ /** * At this point p points to the raw record, which varies according * to what kind of a record (Stream) was sent. Note, the integer * fields at the beginning of these "raw" records are in ASCII with * spaces between them so one can use scanf or manual scanning to * extract the fields. * * File Attributes * File_index * File type * Filename (full path) * Encoded attributes * Link name (if type==FT_LNK or FT_LNKSAVED) * Encoded extended-attributes (for Win32) * Delta sequence number (32 bit int) * * Restore Object * File_index * File_type * Object_index * Object_len (possibly compressed) * Object_full_len (not compressed) * Object_compression * Plugin_name * Object_name * Binary Object data */ Dmsg1(400, "UpdCat msg=%s\n", msg); Dmsg5(400, "UpdCat VolSessId=%d VolSessT=%d FI=%d Strm=%d reclen=%d\n", VolSessionId, VolSessionTime, FileIndex, Stream, reclen); jcr->SDJobBytes += reclen; /* update number of bytes transferred for quotas */ /* * Depending on the stream we are handling dispatch. */ switch (Stream) { case STREAM_UNIX_ATTRIBUTES: case STREAM_UNIX_ATTRIBUTES_EX: if (jcr->cached_attribute) { Dmsg2(400, "Cached attr. Stream=%d fname=%s\n", ar->Stream, ar->fname); if (!db_create_attributes_record(jcr, jcr->db, ar)) { Jmsg1(jcr, M_FATAL, 0, _("Attribute create error: ERR=%s"), db_strerror(jcr->db)); } jcr->cached_attribute = false; } /* * Any cached attr is flushed so we can reuse jcr->attr and jcr->ar */ jcr->attr = check_pool_memory_size(jcr->attr, msglen); memcpy(jcr->attr, msg, msglen); p = jcr->attr - msg + p; /* point p into jcr->attr */ skip_nonspaces(&p); /* skip FileIndex */ skip_spaces(&p); ar->FileType = str_to_int32(p); skip_nonspaces(&p); /* skip FileType */ skip_spaces(&p); fname = p; len = strlen(fname); /* length before attributes */ attr = &fname[len+1]; ar->DeltaSeq = 0; if (ar->FileType == FT_REG) { p = attr + strlen(attr) + 1; /* point to link */ p = p + strlen(p) + 1; /* point to extended attributes */ p = p + strlen(p) + 1; /* point to delta sequence */ /* * Older FDs don't have a delta sequence, so check if it is there */ if (p - jcr->attr < msglen) { ar->DeltaSeq = str_to_int32(p); /* delta_seq */ } } Dmsg2(400, "dird<stored: stream=%d %s\n", Stream, fname); Dmsg1(400, "dird<stored: attr=%s\n", attr); ar->attr = attr; ar->fname = fname; if (ar->FileType == FT_DELETED) { ar->FileIndex = 0; /* special value */ } else { ar->FileIndex = FileIndex; } ar->Stream = Stream; ar->link = NULL; if (jcr->mig_jcr) { ar->JobId = jcr->mig_jcr->JobId; } else { ar->JobId = jcr->JobId; } ar->Digest = NULL; ar->DigestType = CRYPTO_DIGEST_NONE; jcr->cached_attribute = true; Dmsg2(400, "dird<filed: stream=%d %s\n", Stream, fname); Dmsg1(400, "dird<filed: attr=%s\n", attr); break; case STREAM_RESTORE_OBJECT: { ROBJECT_DBR ro; memset(&ro, 0, sizeof(ro)); ro.Stream = Stream; ro.FileIndex = FileIndex; if (jcr->mig_jcr) { ro.JobId = jcr->mig_jcr->JobId; } else { ro.JobId = jcr->JobId; } Dmsg1(100, "Robj=%s\n", p); skip_nonspaces(&p); /* skip FileIndex */ skip_spaces(&p); ro.FileType = str_to_int32(p); /* FileType */ skip_nonspaces(&p); skip_spaces(&p); ro.object_index = str_to_int32(p); /* Object Index */ skip_nonspaces(&p); skip_spaces(&p); ro.object_len = str_to_int32(p); /* object length possibly compressed */ skip_nonspaces(&p); skip_spaces(&p); ro.object_full_len = str_to_int32(p); /* uncompressed object length */ skip_nonspaces(&p); skip_spaces(&p); ro.object_compression = str_to_int32(p); /* compression */ skip_nonspaces(&p); skip_spaces(&p); ro.plugin_name = p; /* point to plugin name */ len = strlen(ro.plugin_name); ro.object_name = &ro.plugin_name[len+1]; /* point to object name */ len = strlen(ro.object_name); ro.object = &ro.object_name[len+1]; /* point to object */ ro.object[ro.object_len] = 0; /* add zero for those who attempt printing */ Dmsg7(100, "oname=%s stream=%d FT=%d FI=%d JobId=%d, obj_len=%d\nobj=\"%s\"\n", ro.object_name, ro.Stream, ro.FileType, ro.FileIndex, ro.JobId, ro.object_len, ro.object); /* * Store it. */ if (!db_create_restore_object_record(jcr, jcr->db, &ro)) { Jmsg1(jcr, M_FATAL, 0, _("Restore object create error. %s"), db_strerror(jcr->db)); } break; } default: if (crypto_digest_stream_type(Stream) != CRYPTO_DIGEST_NONE) { fname = p; if (ar->FileIndex != FileIndex) { Jmsg3(jcr, M_WARNING, 0, _("%s not same File=%d as attributes=%d\n"), stream_to_ascii(Stream), FileIndex, ar->FileIndex); } else { /* * Update digest in catalog */ char digestbuf[BASE64_SIZE(CRYPTO_DIGEST_MAX_SIZE)]; int len = 0; int type = CRYPTO_DIGEST_NONE; switch(Stream) { case STREAM_MD5_DIGEST: len = CRYPTO_DIGEST_MD5_SIZE; type = CRYPTO_DIGEST_MD5; break; case STREAM_SHA1_DIGEST: len = CRYPTO_DIGEST_SHA1_SIZE; type = CRYPTO_DIGEST_SHA1; break; case STREAM_SHA256_DIGEST: len = CRYPTO_DIGEST_SHA256_SIZE; type = CRYPTO_DIGEST_SHA256; break; case STREAM_SHA512_DIGEST: len = CRYPTO_DIGEST_SHA512_SIZE; type = CRYPTO_DIGEST_SHA512; break; default: /* * Never reached ... */ Jmsg(jcr, M_ERROR, 0, _("Catalog error updating file digest. Unsupported digest stream type: %d"), Stream); } bin_to_base64(digestbuf, sizeof(digestbuf), fname, len, true); Dmsg3(400, "DigestLen=%d Digest=%s type=%d\n", strlen(digestbuf), digestbuf, Stream); if (jcr->cached_attribute) { ar->Digest = digestbuf; ar->DigestType = type; Dmsg2(400, "Cached attr with digest. Stream=%d fname=%s\n", ar->Stream, ar->fname); /* * Update BaseFile table */ if (!db_create_attributes_record(jcr, jcr->db, ar)) { Jmsg1(jcr, M_FATAL, 0, _("attribute create error. %s"), db_strerror(jcr->db)); } jcr->cached_attribute = false; } else { if (!db_add_digest_to_file_record(jcr, jcr->db, ar->FileId, digestbuf, type)) { Jmsg(jcr, M_ERROR, 0, _("Catalog error updating file digest. %s"), db_strerror(jcr->db)); } } } } break; } }
static void perform(void) { word last_report_day = 9; // Initialise database { db_init(conf[conf_db_server], conf[conf_db_user], conf[conf_db_password], conf[conf_db_name]); word e; if((e=database_upgrade(vstpdb))) { _log(CRITICAL, "Error %d in upgrade_database(). Aborting.", e); exit(1); } } { time_t now = time(NULL); struct tm * broken = localtime(&now); if(broken->tm_hour >= REPORT_HOUR) { last_report_day = broken->tm_wday; } } while(run) { stats[ConnectAttempt]++; int run_receive = !open_stompy(STOMPY_PORT); while(run_receive && run) { holdoff = 0; { time_t now = time(NULL); struct tm * broken = localtime(&now); if(broken->tm_hour >= REPORT_HOUR && broken->tm_wday != last_report_day) { last_report_day = broken->tm_wday; report_stats(); } } word r = read_stompy(body, FRAME_SIZE, 64); _log(DEBUG, "read_stompy() returned %d.", r); if(!r && run && run_receive) { if(db_start_transaction()) { run_receive = false; } if(run_receive) process_frame(body); if(!db_errored) { if(db_commit_transaction()) { db_rollback_transaction(); run_receive = false; } else { // Send ACK if(ack_stompy()) { _log(CRITICAL, "Failed to write message ack. Error %d %s", errno, strerror(errno)); run_receive = false; } } } else { // DB error occurred during processing of frame. db_rollback_transaction(); run_receive = false; } } else if(run && run_receive) { if(r != 3) { run_receive = false; _log(CRITICAL, "Receive error %d on stompy connection.", r); } else { // Don't report these because it is normal on VSTP stream // _log(MINOR, "Receive timeout on stompy connection."); } } } // while(run_receive && run) close_stompy(); { word i; if(holdoff < 256) holdoff += 38; else holdoff = 256; for(i = 0; i < holdoff + 64 && run; i++) sleep(1); } } // while(run) if(interrupt) { _log(CRITICAL, "Terminating due to interrupt."); } db_disconnect(); report_stats(); }
int main(){ MYSQL * conn; struct gs_comment testComment; struct gs_marker testMarker; struct gs_marker * markerPage; Decimal latitude; Decimal longitude; char json[JSON_LENGTH]; int numMarkers; int i; bzero(json,JSON_LENGTH); conn = _getMySQLConnection(); if(!conn){ fprintf(stderr, "%s\n", "Could not connect to mySQL"); close(STDIN_FILENO); close(STDOUT_FILENO); close(STDERR_FILENO); return 1; } db_start_transaction(conn); /* Setup referenced comment */ gs_comment_ZeroStruct(&testComment); gs_comment_setContent("Test Comment", &testComment); gs_comment_setScopeId(CAMPAIGN_ID, &testComment); db_insertComment(&testComment,conn); latitude = createDecimalFromString( "-44.050"); longitude= createDecimalFromString( "-44.70"); gs_marker_ZeroStruct(&testMarker); gs_marker_setCommentId(1, &testMarker); gs_marker_setScopeId(CAMPAIGN_ID, &testMarker); gs_marker_setLongitude(longitude, &testMarker); gs_marker_setLatitude(latitude, &testMarker); db_insertMarker(&testMarker, conn); gs_markerNToJSON(testMarker, json, JSON_LENGTH); printf("%s\n", json); db_getMarkerById(testMarker.id, &testMarker, conn); gs_markerNToJSON(testMarker, json, JSON_LENGTH); printf("%s\n", json); markerPage = malloc(RESULTS_PER_PAGE * sizeof(struct gs_marker)); if(markerPage != NULL){ numMarkers = db_getMarkers(0, CAMPAIGN_ID, markerPage, conn); for(i=0; i < numMarkers; ++i){ bzero(json,JSON_LENGTH); gs_markerNToJSON(markerPage[i], json, JSON_LENGTH); printf("%s\n", json); } free(markerPage); }else{ fprintf(stderr, "%s\n", "Could not allocate enough memory for marker page"); } db_abort_transaction(conn); db_end_transaction(conn); mysql_close(conn); mysql_library_end(); close(STDIN_FILENO); close(STDOUT_FILENO); close(STDERR_FILENO); }
int main(int argc, char **argv) { char config_file_path[256]; opt_filename = NULL; opt_url = NULL; fetch_all = false; test_mode = false; verbose = false; opt_insecure = false; used_insecure = false; strcpy(config_file_path, "/etc/openrail.conf"); word usage = false; int c; while ((c = getopt (argc, argv, ":c:u:f:tpih")) != -1) switch (c) { case 'c': strcpy(config_file_path, optarg); break; case 'u': if(!opt_filename) opt_url = optarg; break; case 'f': if(!opt_url) opt_filename = optarg; break; case 'a': fetch_all = true; break; case 't': test_mode = true; break; case 'p': verbose = true; break; case 'i': opt_insecure = true; break; case 'h': usage = true; break; case ':': break; case '?': default: usage = true; break; } char * config_fail; if((config_fail = load_config(config_file_path))) { printf("Failed to read config file \"%s\": %s\n", config_file_path, config_fail); usage = true; } if(usage) { printf("%s %s Usage: %s [-c /path/to/config/file.conf] [-u <url> | -f <path> | -a] [-t | -r] [-p][-i]\n", NAME, BUILD, argv[0]); printf( "-c <file> Path to config file.\n" "Data source:\n" "default Fetch latest update.\n" "-u <url> Fetch from specified URL.\n" "-f <file> Use specified file. (Must already be decompressed.)\n" "Actions:\n" "default Apply data to database.\n" "-t Report datestamp on download or file, do not apply to database.\n" "Options:\n" "-i Insecure. Circumvent certificate checks if necessary.\n" "-p Print activity as well as logging.\n" ); exit(1); } char zs[1024]; start_time = time(NULL); debug = *conf[conf_debug]; _log_init(debug?"/tmp/tscdb.log":"/var/log/garner/tscdb.log", (debug?1:(verbose?4:0))); _log(GENERAL, ""); _log(GENERAL, "%s %s", NAME, BUILD); // Enable core dumps struct rlimit limit; if(!getrlimit(RLIMIT_CORE, &limit)) { limit.rlim_cur = RLIM_INFINITY; setrlimit(RLIMIT_CORE, &limit); } int i; for(i = 0; i < MATCHES; i++) { if(regcomp(&match[i], match_strings[i], REG_ICASE + REG_EXTENDED)) { sprintf(zs, "Failed to compile regex match %d", i); _log(MAJOR, zs); } } // Initialise database if(db_init(conf[conf_db_server], conf[conf_db_user], conf[conf_db_password], conf[conf_db_name])) exit(1); { word e; if((e=database_upgrade(cifdb))) { _log(CRITICAL, "Error %d in upgrade_database(). Aborting.", e); exit(1); } } run = 1; tiploc_ignored = false; // Zero the stats { word i; for(i = 0; i < MAXStats; i++) { stats[i] = 0; } } if(fetch_file()) { if(opt_url || opt_filename) { _log(GENERAL, "Failed to find data."); exit(1); } { char report[256]; _log(GENERAL, "Failed to fetch file."); sprintf(report, "Failed to collect timetable update after %lld attempts.", stats[Fetches]); email_alert(NAME, BUILD, "Timetable Update Failure Report", report); } exit(1); } char in_q = 0; char b_depth = 0; size_t ibuf = 0; size_t iobj = 0; size_t buf_end; // Determine applicable update { MYSQL_RES * result0; MYSQL_ROW row0; last_update_id[0] = '\0'; if(!db_query("SELECT MAX(id) FROM updates_processed")) { result0 = db_store_result(); if((row0 = mysql_fetch_row(result0))) { strcpy(last_update_id, row0[0]); } mysql_free_result(result0); } } if(last_update_id[0] == '\0') { _log(CRITICAL, "Failed to determine last update id from database."); exit(1); } if(test_mode) { } else { char c, pc; pc = 0; // Run through the file splitting off each JSON object and passing it on for processing. // DB may have dropped out due to long delay (void) db_connect(); if(db_start_transaction()) _log(CRITICAL, "Failed to initiate database transaction."); while((buf_end = fread(buffer, 1, MAX_BUF, fp_result)) && run && !db_errored) { for(ibuf = 0; ibuf < buf_end && run && !db_errored; ibuf++) { c = buffer[ibuf]; if(c != '\r' && c != '\n') { obj[iobj++] = c; if(iobj >= MAX_OBJ) { _log(CRITICAL, "Object buffer overflow!"); exit(1); } if(c == '"' && pc != '\\') in_q = ! in_q; if(!in_q && c == '{') b_depth++; if(!in_q && c == '}' && b_depth-- && !b_depth) { obj[iobj] = '\0'; process_object(obj); iobj = 0; } } pc = c; } } fclose(fp_result); if(db_errored) { _log(CRITICAL, "Update rolled back due to database error."); (void) db_rollback_transaction(); } else { _log(GENERAL, "Committing database updates..."); if(db_commit_transaction()) { _log(CRITICAL, "Database commit failed."); } else { _log(GENERAL, "Committed."); } } } #define REPORT_SIZE 16384 char report[REPORT_SIZE]; report[0] = '\0'; _log(GENERAL, ""); _log(GENERAL, "End of run:"); if(used_insecure) { strcat(report, "*** Warning: Insecure download used.\n"); _log(GENERAL, "*** Warning: Insecure download used."); } sprintf(zs, " Elapsed time: %ld minutes", (time(NULL) - start_time + 30) / 60); _log(GENERAL, zs); strcat(report, zs); strcat(report, "\n"); if(test_mode) { sprintf(zs, "Test mode. No database changes made."); _log(GENERAL, zs); strcat(report, zs); strcat(report, "\n"); exit(0); } for(i=0; i<MAXStats; i++) { sprintf(zs, "%25s: %s", stats_category[i], commas_q(stats[i])); if(i == DBError && stats[i]) strcat(zs, " ****************"); _log(GENERAL, zs); strcat(report, zs); strcat(report, "\n"); } db_disconnect(); email_alert(NAME, BUILD, "Timetable Update Report", report); exit(0); }
static void perform(void) { word last_report_day = 9; word stompy_timeout = true; // Initialise database connection while(db_init(conf[conf_db_server], conf[conf_db_user], conf[conf_db_password], conf[conf_db_name]) && run) { _log(CRITICAL, "Failed to initialise database connection. Will retry..."); word i; for(i = 0; i < 64 && run; i++) sleep(1); } create_database(); handle = 0xfff0; { time_t now = time(NULL); struct tm * broken = localtime(&now); if(broken->tm_hour >= REPORT_HOUR) { last_report_day = broken->tm_wday; } last_message_count_report = now; message_count = message_count_rel = 0; } // Status status_last_td_processed = 0; { word describer; for(describer = 0; describer < DESCRIBERS; describer++) { status_last_td_actual[describer] = last_td_processed[describer] = 0; timeout_reported[describer] = false; } } // Signalling { word i,j; for(i = 0; i < DESCRIBERS; i++) { for(j = 0; j < SIG_BYTES; j++) { signalling[i][j] = 0xffff; } } } while(run) { stats[ConnectAttempt]++; int run_receive = !open_stompy(STOMPY_PORT); while(run_receive && run) { holdoff = 0; { time_t now = time(NULL); struct tm * broken = localtime(&now); if(broken->tm_hour >= REPORT_HOUR && broken->tm_wday != last_report_day) { last_report_day = broken->tm_wday; report_stats(); } if(now - last_message_count_report > MESSAGE_COUNT_REPORT_INTERVAL) { char query[256]; sprintf(query, "INSERT INTO message_count VALUES('tddb', %ld, %d)", now, message_count); if(!db_query(query)) { message_count = 0; last_message_count_report = now; } sprintf(query, "INSERT INTO message_count VALUES('tddbrel', %ld, %d)", now, message_count_rel); if(!db_query(query)) { message_count_rel = 0; } } } int r = read_stompy(body, FRAME_SIZE, 64); _log(DEBUG, "read_stompy() returned %d.", r); if(!r && run && run_receive) { if(stompy_timeout) { _log(MINOR, "TD message stream - Receive OK."); stompy_timeout = false; } if(db_start_transaction()) { run_receive = false; } if(run_receive) process_frame(body); if(!db_errored) { if(db_commit_transaction()) { db_rollback_transaction(); run_receive = false; } else { // Send ACK if(ack_stompy()) { _log(CRITICAL, "Failed to write message ack. Error %d %s", errno, strerror(errno)); run_receive = false; } } } else { // DB error. db_rollback_transaction(); run_receive = false; } } else if(run && run_receive) { if(r != 3) { run_receive = false; _log(CRITICAL, "Receive error %d on stompy connection.", r); } else { if(!stompy_timeout) _log(MINOR, "TD message stream - Receive timeout."); stompy_timeout = true; } } if(run) check_timeout(); } // while(run_receive && run) close_stompy(); if(run) check_timeout(); { word i; if(holdoff < 256) holdoff += 34; else holdoff = 256; for(i = 0; i < holdoff + 64 && run; i++) sleep(1); } } if(interrupt) { _log(CRITICAL, "Terminating due to interrupt."); } db_disconnect(); report_stats(); }
/* TODO: See if we want to let the FD do all kind * of catalog request/update */ void catalog_request(JCR *jcr, BSOCK *bs) { MEDIA_DBR mr, sdmr; JOBMEDIA_DBR jm; char Job[MAX_NAME_LENGTH]; char pool_name[MAX_NAME_LENGTH]; int index, ok, label, writing; POOLMEM *omsg; POOL_DBR pr; uint64_t MediaId; utime_t VolFirstWritten; utime_t VolLastWritten; int n; memset(&sdmr, 0, sizeof(sdmr)); memset(&jm, 0, sizeof(jm)); Dsm_check(100); /* * Request to find next appendable Volume for this Job */ Dmsg1(200, "catreq %s", bs->msg); if (!jcr->db) { omsg = get_memory(bs->msglen+1); pm_strcpy(omsg, bs->msg); bs->fsend(_("1990 Invalid Catalog Request: %s"), omsg); Jmsg1(jcr, M_FATAL, 0, _("Invalid Catalog request; DB not open: %s"), omsg); free_memory(omsg); return; } /* * Find next appendable medium for SD */ n = sscanf(bs->msg, Find_media, &Job, &index, &pool_name, &mr.MediaType, &mr.VolType); if (n == 5) { memset(&pr, 0, sizeof(pr)); bstrncpy(pr.Name, pool_name, sizeof(pr.Name)); unbash_spaces(pr.Name); ok = db_get_pool_record(jcr, jcr->db, &pr); if (ok) { mr.PoolId = pr.PoolId; set_storageid_in_mr(jcr->wstore, &mr); mr.ScratchPoolId = pr.ScratchPoolId; ok = find_next_volume_for_append(jcr, &mr, index, fnv_create_vol, fnv_prune); Dmsg3(050, "find_media ok=%d idx=%d vol=%s\n", ok, index, mr.VolumeName); } else { /* Report problem finding pool */ Jmsg1(jcr, M_WARNING, 0, _("Pool \"%s\" not found for SD find media request.\n"), pr.Name); } /* * Send Find Media response to Storage daemon */ if (ok) { send_volume_info_to_storage_daemon(jcr, bs, &mr); } else { bs->fsend(_("1901 No Media.\n")); Dmsg0(500, "1901 No Media.\n"); } goto ok_out; } Dmsg1(1000, "Tried find_media. fields wanted=4, got=%d\n", n); /* * Request to find specific Volume information */ n = sscanf(bs->msg, Get_Vol_Info, &Job, &mr.VolumeName, &writing); if (n == 3) { Dmsg1(100, "CatReq GetVolInfo Vol=%s\n", mr.VolumeName); /* * Find the Volume */ unbash_spaces(mr.VolumeName); if (db_get_media_record(jcr, jcr->db, &mr)) { const char *reason = NULL; /* detailed reason for rejection */ /* * If we are reading, accept any volume (reason == NULL) * If we are writing, check if the Volume is valid * for this job, and do a recycle if necessary */ if (writing) { /* * SD wants to write this Volume, so make * sure it is suitable for this job, i.e. * Pool matches, and it is either Append or Recycle * and Media Type matches and Pool allows any volume. */ if (mr.PoolId != jcr->jr.PoolId) { reason = _("not in Pool"); } else if (strcmp(mr.MediaType, jcr->wstore->media_type) != 0) { reason = _("not correct MediaType"); } else { /* * Now try recycling if necessary * reason set non-NULL if we cannot use it */ check_if_volume_valid_or_recyclable(jcr, &mr, &reason); } } if (!reason && mr.Enabled != 1) { reason = _("is not Enabled"); } if (reason == NULL) { /* * Send Find Media response to Storage daemon */ send_volume_info_to_storage_daemon(jcr, bs, &mr); } else { /* Not suitable volume */ bs->fsend(_("1998 Volume \"%s\" catalog status is %s, %s.\n"), mr.VolumeName, mr.VolStatus, reason); } } else { bs->fsend(_("1997 Volume \"%s\" not in catalog.\n"), mr.VolumeName); Dmsg1(100, "1997 Volume \"%s\" not in catalog.\n", mr.VolumeName); } goto ok_out; } Dmsg1(1000, "Tried get_vol_info. fields wanted=3, got=%d\n", n); /* * Request to update Media record. Comes typically at the end * of a Storage daemon Job Session, when labeling/relabeling a * Volume, or when an EOF mark is written. */ n = sscanf(bs->msg, Update_media, &Job, &sdmr.VolumeName, &sdmr.VolJobs, &sdmr.VolFiles, &sdmr.VolBlocks, &sdmr.VolBytes, &sdmr.VolABytes, &sdmr.VolHoleBytes, &sdmr.VolHoles, &sdmr.VolMounts, &sdmr.VolErrors, &sdmr.VolWrites, &sdmr.MaxVolBytes, &VolLastWritten, &sdmr.VolStatus, &sdmr.Slot, &label, &sdmr.InChanger, &sdmr.VolReadTime, &sdmr.VolWriteTime, &VolFirstWritten, &sdmr.VolType); if (n == 22) { db_lock(jcr->db); Dmsg3(400, "Update media %s oldStat=%s newStat=%s\n", sdmr.VolumeName, mr.VolStatus, sdmr.VolStatus); bstrncpy(mr.VolumeName, sdmr.VolumeName, sizeof(mr.VolumeName)); /* copy Volume name */ unbash_spaces(mr.VolumeName); if (!db_get_media_record(jcr, jcr->db, &mr)) { Jmsg(jcr, M_ERROR, 0, _("Unable to get Media record for Volume %s: ERR=%s\n"), mr.VolumeName, db_strerror(jcr->db)); bs->fsend(_("1991 Catalog Request for vol=%s failed: %s"), mr.VolumeName, db_strerror(jcr->db)); db_unlock(jcr->db); return; } /* Set first written time if this is first job */ if (mr.FirstWritten == 0) { if (VolFirstWritten == 0) { mr.FirstWritten = jcr->start_time; /* use Job start time as first write */ } else { mr.FirstWritten = VolFirstWritten; } mr.set_first_written = true; } /* If we just labeled the tape set time */ if (label || mr.LabelDate == 0) { mr.LabelDate = jcr->start_time; mr.set_label_date = true; if (mr.InitialWrite == 0) { mr.InitialWrite = jcr->start_time; } Dmsg2(400, "label=%d labeldate=%d\n", label, mr.LabelDate); } else { /* * Insanity check for VolFiles get set to a smaller value */ if (sdmr.VolFiles < mr.VolFiles) { Jmsg(jcr, M_INFO, 0, _("Attempt to set Volume Files from %u to %u" " for Volume \"%s\". Ignored.\n"), mr.VolFiles, sdmr.VolFiles, mr.VolumeName); sdmr.VolFiles = mr.VolFiles; /* keep orginal value */ } } Dmsg2(400, "Update media: BefVolJobs=%u After=%u\n", mr.VolJobs, sdmr.VolJobs); /* * Check if the volume has been written by the job, * and update the LastWritten field if needed. */ if (mr.VolBlocks != sdmr.VolBlocks && VolLastWritten != 0) { mr.LastWritten = VolLastWritten; } /* * Update to point to the last device used to write the Volume. * However, do so only if we are writing the tape, i.e. * the number of VolWrites has increased. */ if (jcr->wstore && sdmr.VolWrites > mr.VolWrites) { Dmsg2(050, "Update StorageId old=%d new=%d\n", mr.StorageId, jcr->wstore->StorageId); /* Update StorageId after write */ set_storageid_in_mr(jcr->wstore, &mr); } else { /* Nothing written, reset same StorageId */ set_storageid_in_mr(NULL, &mr); } /* Copy updated values to original media record */ mr.VolJobs = sdmr.VolJobs; mr.VolFiles = sdmr.VolFiles; mr.VolBlocks = sdmr.VolBlocks; mr.VolBytes = sdmr.VolBytes; mr.VolABytes = sdmr.VolABytes; mr.VolHoleBytes = sdmr.VolHoleBytes; mr.VolHoles = sdmr.VolHoles; mr.VolMounts = sdmr.VolMounts; mr.VolErrors = sdmr.VolErrors; mr.VolWrites = sdmr.VolWrites; mr.Slot = sdmr.Slot; mr.InChanger = sdmr.InChanger; mr.VolType = sdmr.VolType; bstrncpy(mr.VolStatus, sdmr.VolStatus, sizeof(mr.VolStatus)); if (sdmr.VolReadTime >= 0) { mr.VolReadTime = sdmr.VolReadTime; } if (sdmr.VolWriteTime >= 0) { mr.VolWriteTime = sdmr.VolWriteTime; } Dmsg2(400, "db_update_media_record. Stat=%s Vol=%s\n", mr.VolStatus, mr.VolumeName); /* * Update the database, then before sending the response to the * SD, check if the Volume has expired. */ if (!db_update_media_record(jcr, jcr->db, &mr)) { Jmsg(jcr, M_FATAL, 0, _("Catalog error updating Media record. %s"), db_strerror(jcr->db)); bs->fsend(_("1993 Update Media error\n")); Pmsg0(000, "1993 Update Media error\n"); } else { (void)has_volume_expired(jcr, &mr); send_volume_info_to_storage_daemon(jcr, bs, &mr); } db_unlock(jcr->db); goto ok_out; } Dmsg1(1000, "Tried update_media. fields wanted=20, got=%d\n", n); /* * Request to create a JobMedia record */ if (sscanf(bs->msg, Create_jobmedia, &Job) == 1) { if (jcr->wjcr) { jm.JobId = jcr->wjcr->JobId; } else { jm.JobId = jcr->JobId; } ok = true; db_lock(jcr->db); db_start_transaction(jcr, jcr->db); while (bs->recv() >= 0) { if (ok && sscanf(bs->msg, "%u %u %u %u %u %u %lld\n", &jm.FirstIndex, &jm.LastIndex, &jm.StartFile, &jm.EndFile, &jm.StartBlock, &jm.EndBlock, &MediaId) != 7) { ok = false; continue; } if (ok) { jm.MediaId = MediaId; Dmsg6(400, "create_jobmedia JobId=%d MediaId=%d SF=%d EF=%d FI=%d LI=%d\n", jm.JobId, jm.MediaId, jm.StartFile, jm.EndFile, jm.FirstIndex, jm.LastIndex); ok = db_create_jobmedia_record(jcr, jcr->db, &jm); } } db_end_transaction(jcr, jcr->db); if (!ok) { Jmsg(jcr, M_FATAL, 0, _("Catalog error creating JobMedia record. %s"), db_strerror(jcr->db)); db_unlock(jcr->db); bs->fsend(_("1992 Create JobMedia error\n")); goto ok_out; } db_unlock(jcr->db); Dmsg0(400, "JobMedia record created\n"); bs->fsend(OK_create); goto ok_out; } /* Handle snapshot catalog request */ if (snapshot_catreq(jcr, bs)) { goto ok_out; } Dmsg1(1000, "Tried create_jobmedia. fields wanted=10, got=%d\n", n); /* Everything failed. Send error message. */ omsg = get_memory(bs->msglen+1); pm_strcpy(omsg, bs->msg); bs->fsend(_("1990 Invalid Catalog Request: %s"), omsg); Jmsg1(jcr, M_FATAL, 0, _("Invalid Catalog request: %s"), omsg); free_memory(omsg); ok_out: Dmsg1(400, ">CatReq response: %s", bs->msg); Dmsg1(400, "Leave catreq jcr 0x%x\n", jcr); return; }