int rename_all_files_to_final(struct list *updates) { int ret, update_errs = 0, update_good = 0, skip = 0; struct list *list; unsigned int complete = 0; unsigned int list_length = list_len(updates); list = list_head(updates); while (list) { struct file *file; file = list->data; list = list->next; complete++; if (file->do_not_update) { skip += 1; continue; } ret = rename_staged_file_to_final(file); if (ret != 0) { update_errs += 1; } else { update_good += 1; } progress_report(complete, list_length); } progress_report(list_length, list_length); /* Force out 100% */ info("\n"); return update_count - update_good - update_errs - (update_skip - skip); }
void D2V::demuxVideo(FILE *video_file, int64_t start_gop_position, int64_t end_gop_position) { result = ProcessingFinished; f->deselectAllStreams(); video_stream->discard = AVDISCARD_DEFAULT; f->seek(start_gop_position); AVPacket packet; av_init_packet(&packet); while (av_read_frame(f->fctx, &packet) == 0) { if (stop_processing) { stop_processing = false; result = ProcessingCancelled; fclose(video_file); return; } // Apparently we might receive packets from streams with AVDISCARD_ALL set, // and also from streams discovered late, probably. if (packet.stream_index != video_stream->index || packet.pos < start_gop_position) { av_free_packet(&packet); continue; } else if (packet.pos >= end_gop_position) { av_free_packet(&packet); break; } if (progress_report) progress_report(packet.pos - start_gop_position, end_gop_position - start_gop_position, progress_data); if (fwrite(packet.data, 1, packet.size, video_file) < (size_t)packet.size) { char id[20] = { 0 }; snprintf(id, 19, "%x", video_stream->id); error = "Failed to write video packet from stream id "; error += id; error += ": fwrite() failed."; result = ProcessingError; av_free_packet(&packet); fclose(video_file); return; } av_free_packet(&packet); } fclose(video_file); }
/* This function is called (via check_events()) from the top level sieve loops (prime_sieve() etc.). It can assume that it is safe to tighten any sieving parameters other than p_min and p_max. */ void process_events(uint64_t current_prime) { /* event_happened was set last in notify_event(), so clear it first which ensures that if some signal arrives while we are in process_events() it might have to wait until the next sieve iteration to get processed, but it won't be lost. */ event_happened = 0; if (clear_event(initialise_events)) { init_signals(); init_progress_report(current_prime); } if (clear_event(subsequence_eliminated)) remove_eliminated_subsequences(current_prime); if (clear_event(sieve_parameters_changed)) init_progress_report(current_prime); if (clear_event(received_sigterm)) { finish_srsieve("SIGTERM was received",current_prime); signal(SIGTERM,SIG_DFL); raise(SIGTERM); } if (clear_event(received_sigint)) { finish_srsieve("SIGINT was received",current_prime); signal(SIGINT,SIG_DFL); raise(SIGINT); } if (clear_event(report_due)) progress_report(current_prime); if (clear_event(save_due)) write_save_file(current_prime); }
void write_target_range(char *buf, off_t begin, size_t size) { int writeleft; char *p; /* update progress report */ fetch_done += size; progress_report(false); if (dry_run) return; if (lseek(dstfd, begin, SEEK_SET) == -1) pg_fatal("could not seek in target file \"%s\": %s\n", dstpath, strerror(errno)); writeleft = size; p = buf; while (writeleft > 0) { int writelen; errno = 0; writelen = write(dstfd, p, writeleft); if (writelen < 0) { /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) errno = ENOSPC; pg_fatal("could not write file \"%s\": %s\n", dstpath, strerror(errno)); } p += writelen; writeleft -= writelen; } /* keep the file open, in case we need to copy more blocks in it */ }
static void BaseBackup(void) { PGresult *res; char *sysidentifier; uint32 timeline; char current_path[MAXPGPATH]; char escaped_label[MAXPGPATH]; int i; char xlogstart[64]; char xlogend[64]; /* * Connect in replication mode to the server */ conn = GetConnection(); if (!conn) /* Error message already written in GetConnection() */ exit(1); /* * Run IDENTIFY_SYSTEM so we can get the timeline */ res = PQexec(conn, "IDENTIFY_SYSTEM"); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not send replication command \"%s\": %s"), progname, "IDENTIFY_SYSTEM", PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) != 1 || PQnfields(res) != 3) { fprintf(stderr, _("%s: could not identify system: got %d rows and %d fields, expected %d rows and %d fields\n"), progname, PQntuples(res), PQnfields(res), 1, 3); disconnect_and_exit(1); } sysidentifier = pg_strdup(PQgetvalue(res, 0, 0)); timeline = atoi(PQgetvalue(res, 0, 1)); PQclear(res); /* * Start the actual backup */ PQescapeStringConn(conn, escaped_label, label, sizeof(escaped_label), &i); snprintf(current_path, sizeof(current_path), "BASE_BACKUP LABEL '%s' %s %s %s %s", escaped_label, showprogress ? "PROGRESS" : "", includewal && !streamwal ? "WAL" : "", fastcheckpoint ? "FAST" : "", includewal ? "NOWAIT" : ""); if (PQsendQuery(conn, current_path) == 0) { fprintf(stderr, _("%s: could not send replication command \"%s\": %s"), progname, "BASE_BACKUP", PQerrorMessage(conn)); disconnect_and_exit(1); } /* * Get the starting xlog position */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not initiate base backup: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) != 1) { fprintf(stderr, _("%s: no start point returned from server\n"), progname); disconnect_and_exit(1); } strcpy(xlogstart, PQgetvalue(res, 0, 0)); if (verbose && includewal) fprintf(stderr, "transaction log start point: %s\n", xlogstart); PQclear(res); MemSet(xlogend, 0, sizeof(xlogend)); /* * Get the header */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not get backup header: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) < 1) { fprintf(stderr, _("%s: no data returned from server\n"), progname); disconnect_and_exit(1); } /* * Sum up the total size, for progress reporting */ totalsize = totaldone = 0; tablespacecount = PQntuples(res); for (i = 0; i < PQntuples(res); i++) { if (showprogress) totalsize += atol(PQgetvalue(res, i, 2)); /* * Verify tablespace directories are empty. Don't bother with the * first once since it can be relocated, and it will be checked before * we do anything anyway. */ if (format == 'p' && !PQgetisnull(res, i, 1)) verify_dir_is_empty_or_create(PQgetvalue(res, i, 1)); } /* * When writing to stdout, require a single tablespace */ if (format == 't' && strcmp(basedir, "-") == 0 && PQntuples(res) > 1) { fprintf(stderr, _("%s: can only write single tablespace to stdout, database has %d\n"), progname, PQntuples(res)); disconnect_and_exit(1); } /* * If we're streaming WAL, start the streaming session before we start * receiving the actual data chunks. */ if (streamwal) { if (verbose) fprintf(stderr, _("%s: starting background WAL receiver\n"), progname); StartLogStreamer(xlogstart, timeline, sysidentifier); } /* * Start receiving chunks */ for (i = 0; i < PQntuples(res); i++) { if (format == 't') ReceiveTarFile(conn, res, i); else ReceiveAndUnpackTarFile(conn, res, i); } /* Loop over all tablespaces */ if (showprogress) { progress_report(PQntuples(res), NULL); fprintf(stderr, "\n"); /* Need to move to next line */ } PQclear(res); /* * Get the stop position */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not get transaction log end position from server: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) != 1) { fprintf(stderr, _("%s: no transaction log end position returned from server\n"), progname); disconnect_and_exit(1); } strcpy(xlogend, PQgetvalue(res, 0, 0)); if (verbose && includewal) fprintf(stderr, "transaction log end point: %s\n", xlogend); PQclear(res); res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, _("%s: final receive failed: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (bgchild > 0) { #ifndef WIN32 int status; int r; #else DWORD status; uint32 hi, lo; #endif if (verbose) fprintf(stderr, _("%s: waiting for background process to finish streaming...\n"), progname); #ifndef WIN32 if (write(bgpipe[1], xlogend, strlen(xlogend)) != strlen(xlogend)) { fprintf(stderr, _("%s: could not send command to background pipe: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } /* Just wait for the background process to exit */ r = waitpid(bgchild, &status, 0); if (r == -1) { fprintf(stderr, _("%s: could not wait for child process: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } if (r != bgchild) { fprintf(stderr, _("%s: child %d died, expected %d\n"), progname, r, (int) bgchild); disconnect_and_exit(1); } if (!WIFEXITED(status)) { fprintf(stderr, _("%s: child process did not exit normally\n"), progname); disconnect_and_exit(1); } if (WEXITSTATUS(status) != 0) { fprintf(stderr, _("%s: child process exited with error %d\n"), progname, WEXITSTATUS(status)); disconnect_and_exit(1); } /* Exited normally, we're happy! */ #else /* WIN32 */ /* * On Windows, since we are in the same process, we can just store the * value directly in the variable, and then set the flag that says * it's there. */ if (sscanf(xlogend, "%X/%X", &hi, &lo) != 2) { fprintf(stderr, _("%s: could not parse transaction log location \"%s\"\n"), progname, xlogend); disconnect_and_exit(1); } xlogendptr = ((uint64) hi) << 32 | lo; InterlockedIncrement(&has_xlogendptr); /* First wait for the thread to exit */ if (WaitForSingleObjectEx((HANDLE) bgchild, INFINITE, FALSE) != WAIT_OBJECT_0) { _dosmaperr(GetLastError()); fprintf(stderr, _("%s: could not wait for child thread: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } if (GetExitCodeThread((HANDLE) bgchild, &status) == 0) { _dosmaperr(GetLastError()); fprintf(stderr, _("%s: could not get child thread exit status: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } if (status != 0) { fprintf(stderr, _("%s: child thread exited with error %u\n"), progname, (unsigned int) status); disconnect_and_exit(1); } /* Exited normally, we're happy */ #endif } /* * End of copy data. Final result is already checked inside the loop. */ PQclear(res); PQfinish(conn); if (verbose) fprintf(stderr, "%s: base backup completed\n", progname); }
/* * Receive a tar format stream from the connection to the server, and unpack * the contents of it into a directory. Only files, directories and * symlinks are supported, no other kinds of special files. * * If the data is for the main data directory, it will be restored in the * specified directory. If it's for another tablespace, it will be restored * in the original directory, since relocation of tablespaces is not * supported. */ static void ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum) { char current_path[MAXPGPATH]; char filename[MAXPGPATH]; int current_len_left; int current_padding = 0; char *copybuf = NULL; FILE *file = NULL; if (PQgetisnull(res, rownum, 0)) strcpy(current_path, basedir); else strcpy(current_path, PQgetvalue(res, rownum, 1)); /* * Get the COPY data */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_COPY_OUT) { fprintf(stderr, _("%s: could not get COPY data stream: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } while (1) { int r; if (copybuf != NULL) { PQfreemem(copybuf); copybuf = NULL; } r = PQgetCopyData(conn, ©buf, 0); if (r == -1) { /* * End of chunk */ if (file) fclose(file); break; } else if (r == -2) { fprintf(stderr, _("%s: could not read COPY data: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (file == NULL) { int filemode; /* * No current file, so this must be the header for a new file */ if (r != 512) { fprintf(stderr, _("%s: invalid tar block header size: %d\n"), progname, r); disconnect_and_exit(1); } totaldone += 512; if (sscanf(copybuf + 124, "%11o", ¤t_len_left) != 1) { fprintf(stderr, _("%s: could not parse file size\n"), progname); disconnect_and_exit(1); } /* Set permissions on the file */ if (sscanf(©buf[100], "%07o ", &filemode) != 1) { fprintf(stderr, _("%s: could not parse file mode\n"), progname); disconnect_and_exit(1); } /* * All files are padded up to 512 bytes */ current_padding = ((current_len_left + 511) & ~511) - current_len_left; /* * First part of header is zero terminated filename */ snprintf(filename, sizeof(filename), "%s/%s", current_path, copybuf); if (filename[strlen(filename) - 1] == '/') { /* * Ends in a slash means directory or symlink to directory */ if (copybuf[156] == '5') { /* * Directory */ filename[strlen(filename) - 1] = '\0'; /* Remove trailing slash */ if (mkdir(filename, S_IRWXU) != 0) { /* * When streaming WAL, pg_xlog will have been created * by the wal receiver process, so just ignore failure * on that. */ if (!streamwal || strcmp(filename + strlen(filename) - 8, "/pg_xlog") != 0) { fprintf(stderr, _("%s: could not create directory \"%s\": %s\n"), progname, filename, strerror(errno)); disconnect_and_exit(1); } } #ifndef WIN32 if (chmod(filename, (mode_t) filemode)) fprintf(stderr, _("%s: could not set permissions on directory \"%s\": %s\n"), progname, filename, strerror(errno)); #endif } else if (copybuf[156] == '2') { /* * Symbolic link */ filename[strlen(filename) - 1] = '\0'; /* Remove trailing slash */ if (symlink(©buf[157], filename) != 0) { fprintf(stderr, _("%s: could not create symbolic link from \"%s\" to \"%s\": %s\n"), progname, filename, ©buf[157], strerror(errno)); disconnect_and_exit(1); } } else { fprintf(stderr, _("%s: unrecognized link indicator \"%c\"\n"), progname, copybuf[156]); disconnect_and_exit(1); } continue; /* directory or link handled */ } /* * regular file */ file = fopen(filename, "wb"); if (!file) { fprintf(stderr, _("%s: could not create file \"%s\": %s\n"), progname, filename, strerror(errno)); disconnect_and_exit(1); } #ifndef WIN32 if (chmod(filename, (mode_t) filemode)) fprintf(stderr, _("%s: could not set permissions on file \"%s\": %s\n"), progname, filename, strerror(errno)); #endif if (current_len_left == 0) { /* * Done with this file, next one will be a new tar header */ fclose(file); file = NULL; continue; } } /* new file */ else { /* * Continuing blocks in existing file */ if (current_len_left == 0 && r == current_padding) { /* * Received the padding block for this file, ignore it and * close the file, then move on to the next tar header. */ fclose(file); file = NULL; totaldone += r; continue; } if (fwrite(copybuf, r, 1, file) != 1) { fprintf(stderr, _("%s: could not write to file \"%s\": %s\n"), progname, filename, strerror(errno)); disconnect_and_exit(1); } totaldone += r; if (showprogress) progress_report(rownum, filename); current_len_left -= r; if (current_len_left == 0 && current_padding == 0) { /* * Received the last block, and there is no padding to be * expected. Close the file and move on to the next tar * header. */ fclose(file); file = NULL; continue; } } /* continuing data in existing file */ } /* loop over all data blocks */ if (file != NULL) { fprintf(stderr, _("%s: COPY stream ended before last file was finished\n"), progname); disconnect_and_exit(1); } if (copybuf != NULL) PQfreemem(copybuf); }
/* * Receive a tar format file from the connection to the server, and write * the data from this file directly into a tar file. If compression is * enabled, the data will be compressed while written to the file. * * The file will be named base.tar[.gz] if it's for the main data directory * or <tablespaceoid>.tar[.gz] if it's for another tablespace. * * No attempt to inspect or validate the contents of the file is done. */ static void ReceiveTarFile(PGconn *conn, PGresult *res, int rownum) { char filename[MAXPGPATH]; char *copybuf = NULL; FILE *tarfile = NULL; #ifdef HAVE_LIBZ gzFile ztarfile = NULL; #endif if (PQgetisnull(res, rownum, 0)) { /* * Base tablespaces */ if (strcmp(basedir, "-") == 0) { #ifdef HAVE_LIBZ if (compresslevel != 0) { ztarfile = gzdopen(dup(fileno(stdout)), "wb"); if (gzsetparams(ztarfile, compresslevel, Z_DEFAULT_STRATEGY) != Z_OK) { fprintf(stderr, _("%s: could not set compression level %d: %s\n"), progname, compresslevel, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif tarfile = stdout; } else { #ifdef HAVE_LIBZ if (compresslevel != 0) { snprintf(filename, sizeof(filename), "%s/base.tar.gz", basedir); ztarfile = gzopen(filename, "wb"); if (gzsetparams(ztarfile, compresslevel, Z_DEFAULT_STRATEGY) != Z_OK) { fprintf(stderr, _("%s: could not set compression level %d: %s\n"), progname, compresslevel, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { snprintf(filename, sizeof(filename), "%s/base.tar", basedir); tarfile = fopen(filename, "wb"); } } } else { /* * Specific tablespace */ #ifdef HAVE_LIBZ if (compresslevel != 0) { snprintf(filename, sizeof(filename), "%s/%s.tar.gz", basedir, PQgetvalue(res, rownum, 0)); ztarfile = gzopen(filename, "wb"); if (gzsetparams(ztarfile, compresslevel, Z_DEFAULT_STRATEGY) != Z_OK) { fprintf(stderr, _("%s: could not set compression level %d: %s\n"), progname, compresslevel, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { snprintf(filename, sizeof(filename), "%s/%s.tar", basedir, PQgetvalue(res, rownum, 0)); tarfile = fopen(filename, "wb"); } } #ifdef HAVE_LIBZ if (compresslevel != 0) { if (!ztarfile) { /* Compression is in use */ fprintf(stderr, _("%s: could not create compressed file \"%s\": %s\n"), progname, filename, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { /* Either no zlib support, or zlib support but compresslevel = 0 */ if (!tarfile) { fprintf(stderr, _("%s: could not create file \"%s\": %s\n"), progname, filename, strerror(errno)); disconnect_and_exit(1); } } /* * Get the COPY data stream */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_COPY_OUT) { fprintf(stderr, _("%s: could not get COPY data stream: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } while (1) { int r; if (copybuf != NULL) { PQfreemem(copybuf); copybuf = NULL; } r = PQgetCopyData(conn, ©buf, 0); if (r == -1) { /* * End of chunk. Close file (but not stdout). * * Also, write two completely empty blocks at the end of the tar * file, as required by some tar programs. */ char zerobuf[1024]; MemSet(zerobuf, 0, sizeof(zerobuf)); #ifdef HAVE_LIBZ if (ztarfile != NULL) { if (gzwrite(ztarfile, zerobuf, sizeof(zerobuf)) != sizeof(zerobuf)) { fprintf(stderr, _("%s: could not write to compressed file \"%s\": %s\n"), progname, filename, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { if (fwrite(zerobuf, sizeof(zerobuf), 1, tarfile) != 1) { fprintf(stderr, _("%s: could not write to file \"%s\": %s\n"), progname, filename, strerror(errno)); disconnect_and_exit(1); } } #ifdef HAVE_LIBZ if (ztarfile != NULL) { if (gzclose(ztarfile) != 0) { fprintf(stderr, _("%s: could not close compressed file \"%s\": %s\n"), progname, filename, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { if (strcmp(basedir, "-") != 0) { if (fclose(tarfile) != 0) { fprintf(stderr, _("%s: could not close file \"%s\": %s\n"), progname, filename, strerror(errno)); disconnect_and_exit(1); } } } break; } else if (r == -2) { fprintf(stderr, _("%s: could not read COPY data: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } #ifdef HAVE_LIBZ if (ztarfile != NULL) { if (gzwrite(ztarfile, copybuf, r) != r) { fprintf(stderr, _("%s: could not write to compressed file \"%s\": %s\n"), progname, filename, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { if (fwrite(copybuf, r, 1, tarfile) != 1) { fprintf(stderr, _("%s: could not write to file \"%s\": %s\n"), progname, filename, strerror(errno)); disconnect_and_exit(1); } } totaldone += r; if (showprogress) progress_report(rownum, filename); } /* while (1) */ if (copybuf != NULL) PQfreemem(copybuf); }
/* This function is called (via check_events()) from the top level sieve loops (prime_sieve() etc.). It can assume that it is safe to tighten any sieving parameters other than p_min and p_max. */ void process_events(uint64_t current_prime) { /* event_happened was set last in notify_event(), so clear it first which ensures that if some signal arrives while we are in process_events() it might have to wait until the next sieve iteration to get processed, but it won't be lost. */ event_happened = 0; if (clear_event(initialise_events)) { init_signals(); init_progress_report(current_prime); } if (clear_event(sieve_parameters_changed)) init_progress_report(current_prime); if (clear_event(received_sigterm)) { finish_srsieve("SIGTERM was received",current_prime); signal(SIGTERM,SIG_DFL); raise(SIGTERM); } if (clear_event(received_sigint)) { finish_srsieve("SIGINT was received",current_prime); signal(SIGINT,SIG_DFL); raise(SIGINT); } #ifdef SIGHUP if (clear_event(received_sighup)) { finish_srsieve("SIGHUP was received",current_prime); signal(SIGHUP,SIG_DFL); raise(SIGHUP); } #endif #if HAVE_FORK if (clear_event(received_sigpipe)) { finish_srsieve("SIGPIPE was received",current_prime); signal(SIGPIPE,SIG_DFL); raise(SIGPIPE); } if (clear_event(received_sigchld)) { finish_srsieve("SIGCHLD was received",current_prime); signal(SIGCHLD,SIG_DFL); raise(SIGCHLD); exit(EXIT_FAILURE); } #endif if (clear_event(factor_found)) next_report_due = time(NULL); if (clear_event(report_due)) progress_report(current_prime); if (clear_event(save_due)) { #if SOBISTRATOR_OPT if (sobistrator_opt) sob_write_checkpoint(current_prime); #endif write_checkpoint(current_prime); } }
/* * Receive a tar format file from the connection to the server, and write * the data from this file directly into a tar file. If compression is * enabled, the data will be compressed while written to the file. * * The file will be named base.tar[.gz] if it's for the main data directory * or <tablespaceoid>.tar[.gz] if it's for another tablespace. * * No attempt to inspect or validate the contents of the file is done. */ static void ReceiveTarFile(PGconn *conn, PGresult *res, int rownum) { char filename[MAXPGPATH]; char *copybuf = NULL; FILE *tarfile = NULL; char tarhdr[512]; bool basetablespace = PQgetisnull(res, rownum, 0); bool in_tarhdr = true; bool skip_file = false; size_t tarhdrsz = 0; size_t filesz = 0; #ifdef HAVE_LIBZ gzFile ztarfile = NULL; #endif if (basetablespace) { /* * Base tablespaces */ if (strcmp(basedir, "-") == 0) { #ifdef HAVE_LIBZ if (compresslevel != 0) { ztarfile = gzdopen(dup(fileno(stdout)), "wb"); if (gzsetparams(ztarfile, compresslevel, Z_DEFAULT_STRATEGY) != Z_OK) { fprintf(stderr, _("%s: could not set compression level %d: %s\n"), progname, compresslevel, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif tarfile = stdout; } else { #ifdef HAVE_LIBZ if (compresslevel != 0) { snprintf(filename, sizeof(filename), "%s/base.tar.gz", basedir); ztarfile = gzopen(filename, "wb"); if (gzsetparams(ztarfile, compresslevel, Z_DEFAULT_STRATEGY) != Z_OK) { fprintf(stderr, _("%s: could not set compression level %d: %s\n"), progname, compresslevel, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { snprintf(filename, sizeof(filename), "%s/base.tar", basedir); tarfile = fopen(filename, "wb"); } } } else { /* * Specific tablespace */ #ifdef HAVE_LIBZ if (compresslevel != 0) { snprintf(filename, sizeof(filename), "%s/%s.tar.gz", basedir, PQgetvalue(res, rownum, 0)); ztarfile = gzopen(filename, "wb"); if (gzsetparams(ztarfile, compresslevel, Z_DEFAULT_STRATEGY) != Z_OK) { fprintf(stderr, _("%s: could not set compression level %d: %s\n"), progname, compresslevel, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { snprintf(filename, sizeof(filename), "%s/%s.tar", basedir, PQgetvalue(res, rownum, 0)); tarfile = fopen(filename, "wb"); } } #ifdef HAVE_LIBZ if (compresslevel != 0) { if (!ztarfile) { /* Compression is in use */ fprintf(stderr, _("%s: could not create compressed file \"%s\": %s\n"), progname, filename, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { /* Either no zlib support, or zlib support but compresslevel = 0 */ if (!tarfile) { fprintf(stderr, _("%s: could not create file \"%s\": %s\n"), progname, filename, strerror(errno)); disconnect_and_exit(1); } } /* * Get the COPY data stream */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_COPY_OUT) { fprintf(stderr, _("%s: could not get COPY data stream: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } while (1) { int r; if (copybuf != NULL) { PQfreemem(copybuf); copybuf = NULL; } r = PQgetCopyData(conn, ©buf, 0); if (r == -1) { /* * End of chunk. If requested, and this is the base tablespace, * write recovery.conf into the tarfile. When done, close the file * (but not stdout). * * Also, write two completely empty blocks at the end of the tar * file, as required by some tar programs. */ char zerobuf[1024]; MemSet(zerobuf, 0, sizeof(zerobuf)); if (basetablespace && writerecoveryconf) { char header[512]; int padding; tarCreateHeader(header, "recovery.conf", NULL, recoveryconfcontents->len, 0600, 04000, 02000, time(NULL)); padding = ((recoveryconfcontents->len + 511) & ~511) - recoveryconfcontents->len; WRITE_TAR_DATA(header, sizeof(header)); WRITE_TAR_DATA(recoveryconfcontents->data, recoveryconfcontents->len); if (padding) WRITE_TAR_DATA(zerobuf, padding); } /* 2 * 512 bytes empty data at end of file */ WRITE_TAR_DATA(zerobuf, sizeof(zerobuf)); #ifdef HAVE_LIBZ if (ztarfile != NULL) { if (gzclose(ztarfile) != 0) { fprintf(stderr, _("%s: could not close compressed file \"%s\": %s\n"), progname, filename, get_gz_error(ztarfile)); disconnect_and_exit(1); } } else #endif { if (strcmp(basedir, "-") != 0) { if (fclose(tarfile) != 0) { fprintf(stderr, _("%s: could not close file \"%s\": %s\n"), progname, filename, strerror(errno)); disconnect_and_exit(1); } } } break; } else if (r == -2) { fprintf(stderr, _("%s: could not read COPY data: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (!writerecoveryconf || !basetablespace) { /* * When not writing recovery.conf, or when not working on the base * tablespace, we never have to look for an existing recovery.conf * file in the stream. */ WRITE_TAR_DATA(copybuf, r); } else { /* * Look for a recovery.conf in the existing tar stream. If it's * there, we must skip it so we can later overwrite it with our * own version of the file. * * To do this, we have to process the individual files inside the * TAR stream. The stream consists of a header and zero or more * chunks, all 512 bytes long. The stream from the server is * broken up into smaller pieces, so we have to track the size of * the files to find the next header structure. */ int rr = r; int pos = 0; while (rr > 0) { if (in_tarhdr) { /* * We're currently reading a header structure inside the * TAR stream, i.e. the file metadata. */ if (tarhdrsz < 512) { /* * Copy the header structure into tarhdr in case the * header is not aligned to 512 bytes or it's not * returned in whole by the last PQgetCopyData call. */ int hdrleft; int bytes2copy; hdrleft = 512 - tarhdrsz; bytes2copy = (rr > hdrleft ? hdrleft : rr); memcpy(&tarhdr[tarhdrsz], copybuf + pos, bytes2copy); rr -= bytes2copy; pos += bytes2copy; tarhdrsz += bytes2copy; } else { /* * We have the complete header structure in tarhdr, * look at the file metadata: - the subsequent file * contents have to be skipped if the filename is * recovery.conf - find out the size of the file * padded to the next multiple of 512 */ int padding; skip_file = (strcmp(&tarhdr[0], "recovery.conf") == 0); sscanf(&tarhdr[124], "%11o", (unsigned int *) &filesz); padding = ((filesz + 511) & ~511) - filesz; filesz += padding; /* Next part is the file, not the header */ in_tarhdr = false; /* * If we're not skipping the file, write the tar * header unmodified. */ if (!skip_file) WRITE_TAR_DATA(tarhdr, 512); } } else { /* * We're processing a file's contents. */ if (filesz > 0) { /* * We still have data to read (and possibly write). */ int bytes2write; bytes2write = (filesz > rr ? rr : filesz); if (!skip_file) WRITE_TAR_DATA(copybuf + pos, bytes2write); rr -= bytes2write; pos += bytes2write; filesz -= bytes2write; } else { /* * No more data in the current file, the next piece of * data (if any) will be a new file header structure. */ in_tarhdr = true; skip_file = false; tarhdrsz = 0; filesz = 0; } } } } totaldone += r; if (showprogress) progress_report(rownum, filename); } /* while (1) */ if (copybuf != NULL) PQfreemem(copybuf); }
bool D2V::handleVideoPacket(AVPacket *packet) { parser.parseData(packet->data, packet->size); uint8_t flags = 0; if (parser.width <= 0 || parser.height <= 0) { if (log_message) log_message("Skipping frame with invalid dimensions " + std::to_string(parser.width) + "x" + std::to_string(parser.height) + ".", log_data); return true; } if (parser.picture_coding_type == MPEGParser::I_PICTURE) { if (!isDataLineNull()) { reorderDataLineFlags(); lines.push_back(line); clearDataLine(); } line.info = INFO_BIT11; if (parser.progressive_sequence) line.info |= INFO_PROGRESSIVE_SEQUENCE; if (parser.group_of_pictures_header) { line.info |= INFO_STARTS_NEW_GOP; if (parser.closed_gop) line.info |= INFO_CLOSED_GOP; } line.matrix = parser.matrix_coefficients; line.file = fake_file->getFileIndex(packet->pos); line.position = fake_file->getPositionInRealFile(packet->pos); flags = FLAGS_I_PICTURE | FLAGS_DECODABLE_WITHOUT_PREVIOUS_GOP; if (progress_report) progress_report(packet->pos, fake_file->getTotalSize(), progress_data); } else if (parser.picture_coding_type == MPEGParser::P_PICTURE) { flags = FLAGS_P_PICTURE | FLAGS_DECODABLE_WITHOUT_PREVIOUS_GOP; } else if (parser.picture_coding_type == MPEGParser::B_PICTURE) { flags = FLAGS_B_PICTURE; int reference_pictures = 0; for (auto it = line.flags.cbegin(); it != line.flags.cend(); it++) { uint8_t frame_type = *it & FLAGS_B_PICTURE; if (frame_type == FLAGS_I_PICTURE || frame_type == FLAGS_P_PICTURE) reference_pictures++; } // av_read_frame returns *frames*, so this works fine even when picture_structure is "field", // i.e. when the pictures in the mpeg2 stream are individual fields. if (reference_pictures >= 2) { flags |= FLAGS_DECODABLE_WITHOUT_PREVIOUS_GOP; } else { line.info &= ~INFO_CLOSED_GOP; } } else { if (log_message) log_message("Skipping unknown picture type " + std::to_string(parser.picture_coding_type) + ".", log_data); return true; } if (parser.repeat_first_field) flags |= FLAGS_RFF; if (parser.top_field_first) flags |= FLAGS_TFF; if ((line.info & INFO_PROGRESSIVE_SEQUENCE) || parser.progressive_frame) flags |= FLAGS_PROGRESSIVE; line.flags.push_back(flags); stats.video_frames++; if (flags & FLAGS_PROGRESSIVE) stats.progressive_frames++; if (flags & FLAGS_TFF) stats.tff_frames++; if (flags & FLAGS_RFF) stats.rff_frames++; return true; }
int test_dispatch(int u, test_t *test, int loops, args_t *a) /* * Function: test_dispatch * Purpose: Run a selected test. * Parameters: test - Pointer to test entry to run. * loops - # of iterations, if -1 count taken from test default. * a - arguments to pass to test. * Returns: TEST_XXX */ { jmp_buf jmp; /* ^C out of tests must be trapped */ int i; /* Loop counter */ int error; /* No error */ int arg_saved; volatile int vloops = loops; /* "loops"/setjmp clobber war */ args_t * volatile av = a; args_t * volatile args = 0; void * volatile fp; /* Test function pointer parameter */ volatile int cleanup = FALSE; int rv; /* Check if test supported */ if (!(test->t_flags & _test_chip(u))) { if (test_options & TEST_O_OVERRIDE) { cli_out("Warning: Running test %d (%s) not supported on %s\n", test->t_test, test->t_name, SOC_UNIT_GROUP(u)); } } if (test_options & TEST_O_RUN) { cli_out("Test %d (%s) Started\n", test->t_test, test->t_name); } COMPILER_REFERENCE(cleanup); error = TEST_RUNNING; #ifndef NO_CTRL_C if (TEST_RUNNING != (error = setjmp(jmp))) { /* Control C handler */ if (test_active) { if (cleanup) { cli_out("Warning: Cleanup aborted - " "continue at your own risk\n"); } else { cleanup = TRUE; cli_out("Warning: cleaning up active test: %s\n", test_active->t_name); test->t_flags |= T_F_STOP; /* Say processed stop */ test_done(u, test_active, error); test_test_done(u, test_active, fp); if (args) { sal_free(args); } } sh_pop_ctrl_c(); test_thread = NULL; test_active = NULL; return(TEST_INTR); } } #endif sh_push_ctrl_c(&jmp); test_thread = sal_thread_self(); /* If no arguments passed in, use default args */ error = TEST_ABORT; if (!av) { if ((av = args = sal_alloc(sizeof(args_t), "test_args")) == NULL) { goto return_no_free; } if (diag_parse_args(test->t_override_string ? test->t_override_string : test->t_default_string, NULL, av)) { test->t_fail++; last_test_status[u] = -1; goto return_with_free; } } fp = NULL; if (-1 == vloops) { /* Assign AFTER setjmp */ vloops = test->t_loops; /* Use default */ } test_active = test; test->t_flags |= T_F_ACTIVE; arg_saved = av->a_arg; /* Some may be consumed */ rv = test_test_init(u, test, av, (void **)&fp); /* check if return value is != BCM_E_UNAVAIL because the memory test on the internal * memories will return BCM_E_UNAVAIL if External TCAM is present. Those memories shouldn't * be tested instead should be skipped. */ if ((rv != BCM_E_UNAVAIL) && (rv)){ test->t_runs++; /* Increment RUN/FAIL count */ test->t_fail++; last_test_status[u] = -1; test->t_flags &= ~T_F_ACTIVE; test_active = NULL; test_thread = NULL; goto return_with_free; } #ifndef NO_CTRL_C if (TEST_RUNNING != (error = setjmp(test_active_jmp))) { if (cleanup) { cli_out("Warning: Cleanup aborted - " "continue at your own risk\n"); } else { cleanup = TRUE; test_done(u, test, error); } } else { #endif /* If progress reporting requested, set it up ... */ if (test_options & TEST_O_PROGRESS) { progress_init(vloops, 0, FALSE); progress_status(test->t_name); } for (i = 0; i < vloops; i++) { if (vloops > 1) { LOG_VERBOSE(BSL_LS_APPL_TESTS, (BSL_META_U(u, "Test %d: %s. Starting interation %d.\n"), test->t_test, test->t_name, i+1)); } test->t_runs++; /* Let-em know we ran */ av->a_arg = arg_saved; /* Reset ARG pointer */ test->t_flags &= ~(T_F_STOP|T_F_ERROR); /* Skip if rv is BCM_E_UNAVAIL as we mentioned above */ if (rv != BCM_E_UNAVAIL) { error = test->t_test_f(u, av, fp);/* Run Test */ } if (error == 0 && test->t_flags & T_F_ERROR) { /* Some tests call test_error but fail to return error */ error = -1; } test_done(u, test, error); if (test_options & TEST_O_PROGRESS) { progress_report(1); } } if (test_options & TEST_O_PROGRESS) { progress_done(); } #ifndef NO_CTRL_C } #endif /* Skip if rv is BCM_E_UNAVAIL as we mentioned above */ if (rv != BCM_E_UNAVAIL) { (void)test_test_done(u, test, fp); /* Run done routines/scripts */ } else { /* If Return Value is BCM_E_UNAVAIL it is assumed that the internal * memories are being tested for External TCAm devices and the test has * not supported these memories. Even though the test is not executed * increasing the test success count in order to show up in the final test * results as not failed. * */ test_active = NULL; test_thread = NULL; test->t_success++; } return_with_free: if (args != NULL) { sh_block_ctrl_c(TRUE); sal_free(args); args = 0; sh_block_ctrl_c(FALSE); } return_no_free: sh_pop_ctrl_c(); if (test_options & TEST_O_RUN) { cli_out("Completed test (%d) %s\n", test->t_test, test->t_name); } if (a != NULL) ARG_DISCARD(a); return(error); }
/* * Download fullfiles from the list of files. * * Return 0 on success or a negative number or errors. */ int download_fullfiles(struct list *files, int *num_downloads) { struct swupd_curl_parallel_handle *download_handle; struct list *iter; struct list *need_download = NULL; struct file *file; struct stat stat; struct download_progress download_progress = { 0, 0, 0 }; unsigned int complete = 0; unsigned int list_length; const unsigned int MAX_FILES = 1000; if (!files) { /* nothing needs to be downloaded */ return SWUPD_OK; } /* make a new list with only the files we actually need to download */ for (iter = list_head(files); iter; iter = iter->next) { char *targetfile; file = iter->data; if (file->is_deleted || file->do_not_update) { continue; } string_or_die(&targetfile, "%s/staged/%s", state_dir, file->hash); if (lstat(targetfile, &stat) != 0 || !verify_file(file, targetfile)) { need_download = list_append_data(need_download, file); } free_string(&targetfile); } if (!need_download) { /* no file needs to be downloaded */ info("No extra files need to be downloaded\n"); progress_complete_step(); return 0; } /* we need to download some files, so set up curl */ download_handle = swupd_curl_parallel_download_start(get_max_xfer(MAX_XFER)); swupd_curl_parallel_download_set_callbacks(download_handle, download_successful, download_error, NULL); if (!download_handle) { /* If we hit this point, the network is accessible but we were * unable to download the needed files. This is a terminal error * and we need good logging */ return -SWUPD_COULDNT_DOWNLOAD_FILE; } /* getting the size of many files can be very expensive, so if * the files are not too many, get their size, otherwise just use their count * to report progress */ list_length = list_len(need_download); if (list_length < MAX_FILES) { download_progress.total_download_size = fullfile_query_total_download_size(need_download); if (download_progress.total_download_size > 0) { /* enable the progress callback */ swupd_curl_parallel_download_set_progress_callbacks(download_handle, swupd_progress_callback, &download_progress); } else { debug("Couldn't get the size of the files to download, using number of files instead\n"); download_progress.total_download_size = 0; } } else { debug("Too many files to calculate download size (%d files), maximum is %d. Using number of files instead\n", list_length, MAX_FILES); } /* download loop */ info("Starting download of remaining update content. This may take a while...\n"); for (iter = list_head(need_download); iter; iter = iter->next) { file = iter->data; if (file->is_mix) { download_mix_file(file); } else { download_file(download_handle, file); } /* fall back for progress reporting when the download size * could not be determined */ if (download_progress.total_download_size == 0) { complete++; progress_report(complete, list_length); } } info("\n"); list_free_list(need_download); return swupd_curl_parallel_download_end(download_handle, num_downloads); }