/* Allocate a new compressor */ CompressorState *AllocateCompressor(int compression, write_f* writeF) { CompressorState *cs; CompressionAlgorithm alg; int level; ParseCompressionOption(compression, &alg, &level); #ifndef HAVE_LIBZ if (alg == COMPR_ALG_LIBZ) die_horribly(NULL, modulename, "not built with zlib support\n"); #endif cs = (CompressorState *) calloc(1, sizeof(CompressorState)); if (cs == NULL) die_horribly(NULL, modulename, "out of memory\n"); cs->writeF = writeF; cs->comprAlg = alg; /* * Perform compression algorithm specific initialization. */ #ifdef HAVE_LIBZ if (alg == COMPR_ALG_LIBZ) InitCompressorZlib(cs, level); #endif return cs; }
/* * Print data for a given file */ static void _PrintFileData(ArchiveHandle *AH, char *filename, RestoreOptions *ropt) { char buf[4096]; size_t cnt; if (!filename) return; #ifdef HAVE_LIBZ AH->FH = gzopen(filename, "rb"); #else AH->FH = fopen(filename, PG_BINARY_R); #endif if (AH->FH == NULL) die_horribly(AH, modulename, "could not open data file for input\n"); while ((cnt = GZREAD(buf, 1, 4095, AH->FH)) > 0) { buf[cnt] = '\0'; ahwrite(buf, 1, cnt, AH); } if (GZCLOSE(AH->FH) != 0) die_horribly(AH, modulename, "could not close data file after reading\n"); }
static void InitCompressorZlib(CompressorState * cs, int level) { z_streamp zp; zp = cs->zp = (z_streamp) malloc(sizeof(z_stream)); if (cs->zp == NULL) die_horribly(NULL, modulename, "out of memory\n"); zp->zalloc = Z_NULL; zp->zfree = Z_NULL; zp->opaque = Z_NULL; /* * zlibOutSize is the buffer size we tell zlib it can output to. We * actually allocate one extra byte because some routines want to append a * trailing zero byte to the zlib output. */ cs->zlibOut = (char *)malloc(ZLIB_OUT_SIZE + 1); cs->zlibOutSize = ZLIB_OUT_SIZE; if (cs->zlibOut == NULL) die_horribly(NULL, modulename, "out of memory\n"); if (deflateInit(zp, level) != Z_OK) die_horribly(NULL, modulename, "could not initialize compression library: %s\n", zp->msg); /* Just be paranoid - maybe End is called after Start, with no Write */ zp->next_out = (void *)cs->zlibOut; zp->avail_out = cs->zlibOutSize; }
static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt) { Oid oid; lclContext *ctx = (lclContext *) AH->formatData; char fname[K_STD_BUF_SIZE]; StartRestoreBlobs(AH); ctx->blobToc = fopen("blobs.toc", PG_BINARY_R); if (ctx->blobToc == NULL) die_horribly(AH, modulename, "could not open large object TOC for input: %s\n", strerror(errno)); _getBlobTocEntry(AH, &oid, fname); while (oid != 0) { StartRestoreBlob(AH, oid); _PrintFileData(AH, fname, ropt); EndRestoreBlob(AH, oid); _getBlobTocEntry(AH, &oid, fname); } if (fclose(ctx->blobToc) != 0) die_horribly(AH, modulename, "could not close large object TOC file: %s\n", strerror(errno)); EndRestoreBlobs(AH); }
/* * Opens file 'path' in 'mode'. If 'compression' is non-zero, the file * is opened with libz gzopen(), otherwise with plain fopen() */ cfp *cfopen(const char *path, const char *mode, int compression) { cfp *fp = malloc(sizeof(cfp)); if (fp == NULL) die_horribly(NULL, modulename, "Out of memory\n"); if (compression != 0) { #ifdef HAVE_LIBZ fp->compressedfp = gzopen(path, mode); fp->uncompressedfp = NULL; if (fp->compressedfp == NULL) { free(fp); fp = NULL; } #else die_horribly(NULL, modulename, "not built with zlib support\n"); #endif } else { #ifdef HAVE_LIBZ fp->compressedfp = NULL; #endif fp->uncompressedfp = fopen(path, mode); if (fp->uncompressedfp == NULL) { free(fp); fp = NULL; } } return fp; }
/* * Initializer */ void InitArchiveFmt_Null(ArchiveHandle *AH) { /* Assuming static functions, this can be copied for each format. */ AH->WriteDataPtr = _WriteData; AH->EndDataPtr = _EndData; AH->WriteBytePtr = _WriteByte; AH->WriteBufPtr = _WriteBuf; AH->ClosePtr = _CloseArchive; AH->ReopenPtr = NULL; AH->PrintTocDataPtr = _PrintTocData; AH->StartBlobsPtr = _StartBlobs; AH->StartBlobPtr = _StartBlob; AH->EndBlobPtr = _EndBlob; AH->EndBlobsPtr = _EndBlobs; AH->ClonePtr = NULL; AH->DeClonePtr = NULL; /* Initialize LO buffering */ AH->lo_buf_size = LOBBUFSIZE; AH->lo_buf = (void *) malloc(LOBBUFSIZE); if (AH->lo_buf == NULL) die_horribly(AH, NULL, "out of memory\n"); /* * Now prevent reading... */ if (AH->mode == archModeRead) die_horribly(AH, NULL, "this format cannot be read\n"); }
/* * Called by the archiver when the dumper calls StartBlob. * * Mandatory. * * Must save the passed OID for retrieval at restore-time. */ static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) { lclContext *ctx = (lclContext *) AH->formatData; lclTocEntry *tctx = (lclTocEntry *) te->formatData; char fmode[10]; char fname[255]; char *sfx; if (oid == 0) die_horribly(AH, modulename, "invalid OID for large object (%u)\n", oid); if (AH->compression != 0) sfx = ".gz"; else sfx = ""; sprintf(fmode, "wb%d", AH->compression); sprintf(fname, "blob_%u.dat%s", oid, sfx); fprintf(ctx->blobToc, "%u %s\n", oid, fname); #ifdef HAVE_LIBZ tctx->FH = gzopen(fname, fmode); #else tctx->FH = fopen(fname, PG_BINARY_W); #endif if (tctx->FH == NULL) die_horribly(AH, modulename, "could not open large object file\n"); }
/* * Open a file for reading. 'path' is the file to open, and 'mode' should * be either "r" or "rb". * * If the file at 'path' does not exist, we append the ".gz" suffix (if 'path' * doesn't already have it) and try again. So if you pass "foo" as 'path', * this will open either "foo" or "foo.gz". */ cfp *cfopen_read(const char *path, const char *mode) { cfp *fp; #ifdef HAVE_LIBZ if (hasSuffix(path, ".gz")) fp = cfopen(path, mode, 1); else #endif { fp = cfopen(path, mode, 0); #ifdef HAVE_LIBZ if (fp == NULL) { int fnamelen = strlen(path) + 4; char *fname = malloc(fnamelen); if (fname == NULL) die_horribly(NULL, modulename, "Out of memory\n"); snprintf(fname, fnamelen, "%s%s", path, ".gz"); fp = cfopen(fname, mode, 1); free(fname); } #endif } return fp; }
/* * Called by the archiver when the dumper calls EndBlob. * * Optional. * */ static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) { lclTocEntry *tctx = (lclTocEntry *) te->formatData; if (GZCLOSE(tctx->FH) != 0) die_horribly(AH, modulename, "could not close large object file\n"); }
static void DeflateCompressorZlib(ArchiveHandle *AH, CompressorState *cs, bool flush) { z_streamp zp = cs->zp; char *out = cs->zlibOut; int res = Z_OK; while (cs->zp->avail_in != 0 || flush) { res = deflate(zp, flush ? Z_FINISH : Z_NO_FLUSH); if (res == Z_STREAM_ERROR) die_horribly(AH, modulename, "could not compress data: %s\n", zp->msg); if ((flush && (zp->avail_out < cs->zlibOutSize)) || (zp->avail_out == 0) || (zp->avail_in != 0) ) { /* * Extra paranoia: avoid zero-length chunks, since a zero length * chunk is the EOF marker in the custom format. This should never * happen but... */ if (zp->avail_out < cs->zlibOutSize) { /* * Any write function shoud do its own error checking but to * make sure we do a check here as well... */ size_t len = cs->zlibOutSize - zp->avail_out; if (cs->writeF(AH, out, len) != len) die_horribly(AH, modulename, "could not write to output file: %s\n", strerror(errno)); } zp->next_out = (void *) out; zp->avail_out = cs->zlibOutSize; } if (res == Z_STREAM_END) break; } }
/* * Called by the archiver when the dumper calls StartBlob. * * Mandatory. * * Must save the passed OID for retrieval at restore-time. */ static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) { if (oid == 0) die_horribly(AH, NULL, "invalid OID for large object\n"); ahprintf(AH, "SELECT lo_open(lo_create(%u), %d);\n", oid, INV_WRITE); AH->WriteDataPtr = _WriteBlobData; }
static void _EndData(ArchiveHandle *AH, TocEntry *te) { lclTocEntry *tctx = (lclTocEntry *) te->formatData; /* Close the file */ if (GZCLOSE(tctx->FH) != 0) die_horribly(AH, modulename, "could not close data file\n"); tctx->FH = NULL; }
static int _WriteByte(ArchiveHandle *AH, const int i) { lclContext *ctx = (lclContext *) AH->formatData; if (fputc(i, AH->FH) == EOF) die_horribly(AH, modulename, "could not write byte\n"); ctx->filePos += 1; return 1; }
/* * Called by the archiver when finishing saving all BLOB DATA. * * Optional. * */ static void _EndBlobs(ArchiveHandle *AH, TocEntry *te) { lclContext *ctx = (lclContext *) AH->formatData; /* Write out a fake zero OID to mark end-of-blobs. */ /* WriteInt(AH, 0); */ if (fclose(ctx->blobToc) != 0) die_horribly(AH, modulename, "could not close large object TOC file: %s\n", strerror(errno)); }
/* * Called by the archiver when starting to save all BLOB DATA (not schema). * This routine should save whatever format-specific information is needed * to read the BLOBs back into memory. * * It is called just prior to the dumper's DataDumper routine. * * Optional, but strongly recommended. * */ static void _StartBlobs(ArchiveHandle *AH, TocEntry *te) { lclContext *ctx = (lclContext *) AH->formatData; char fname[K_STD_BUF_SIZE]; sprintf(fname, "blobs.toc"); ctx->blobToc = fopen(fname, PG_BINARY_W); if (ctx->blobToc == NULL) die_horribly(AH, modulename, "could not open large object TOC for output: %s\n", strerror(errno)); }
static size_t _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len) { lclContext *ctx = (lclContext *) AH->formatData; size_t res; res = fwrite(buf, 1, len, AH->FH); if (res != len) die_horribly(AH, modulename, "write error in _WriteBuf (%lu != %lu)\n", (unsigned long) res, (unsigned long) len); ctx->filePos += res; return res; }
static size_t WriteDataToArchiveNone(ArchiveHandle * AH, CompressorState * cs, const char *data, size_t dLen) { /* * Any write function should do its own error checking but to make sure we * do a check here as well... */ if (cs->writeF(AH, data, dLen) != dLen) die_horribly(AH, modulename, "could not write to output file: %s\n", strerror(errno)); return dLen; }
static void _CloseArchive(ArchiveHandle *AH) { if (AH->mode == archModeWrite) { WriteHead(AH); WriteToc(AH); if (fclose(AH->FH) != 0) die_horribly(AH, modulename, "could not close TOC file: %s\n", strerror(errno)); WriteDataChunks(AH); } AH->FH = NULL; }
/* * Open a file for writing. 'path' indicates the path name, and 'mode' must * be a filemode as accepted by fopen() and gzopen() that indicates writing * ("w", "wb", "a", or "ab"). * * If 'compression' is non-zero, a gzip compressed stream is opened, and * and 'compression' indicates the compression level used. The ".gz" suffix * is automatically added to 'path' in that case. */ cfp *cfopen_write(const char *path, const char *mode, int compression) { cfp *fp; if (compression == 0) fp = cfopen(path, mode, 0); else { #ifdef HAVE_LIBZ int fnamelen = strlen(path) + 4; char *fname = malloc(fnamelen); if (fname == NULL) die_horribly(NULL, modulename, "Out of memory\n"); snprintf(fname, fnamelen, "%s%s", path, ".gz"); fp = cfopen(fname, mode, 1); free(fname); #else die_horribly(NULL, modulename, "not built with zlib support\n"); fp = NULL; /* keep compiler quiet */ #endif } return fp; }
/* * Compress and write data to the output stream (via writeF). */ size_t WriteDataToArchive(ArchiveHandle * AH, CompressorState * cs, const void *data, size_t dLen) { switch (cs->comprAlg) { case COMPR_ALG_LIBZ: #ifdef HAVE_LIBZ return WriteDataToArchiveZlib(AH, cs, data, dLen); #else die_horribly(NULL, modulename, "not built with zlib support\n"); #endif case COMPR_ALG_NONE: return WriteDataToArchiveNone(AH, cs, data, dLen); } return 0; /* keep compiler quiet */ }
/* * Read all compressed data from the input stream (via readF) and print it * out with ahwrite(). */ void ReadDataFromArchive(ArchiveHandle * AH, int compression, read_f* readF) { CompressionAlgorithm alg; ParseCompressionOption(compression, &alg, NULL); if (alg == COMPR_ALG_NONE) ReadDataFromArchiveNone(AH, readF); if (alg == COMPR_ALG_LIBZ) { #ifdef HAVE_LIBZ ReadDataFromArchiveZlib(AH, readF); #else die_horribly(NULL, modulename, "not built with zlib support\n"); #endif } }
/* * Initializer */ void InitArchiveFmt_Null(ArchiveHandle *AH) { /* Assuming static functions, this can be copied for each format. */ AH->WriteDataPtr = _WriteData; AH->EndDataPtr = _EndData; AH->WriteBytePtr = _WriteByte; AH->WriteBufPtr = _WriteBuf; AH->ClosePtr = _CloseArchive; AH->PrintTocDataPtr = _PrintTocData; /* * Now prevent reading... */ if (AH->mode == archModeRead) die_horribly(AH, NULL, "this format cannot be read\n"); }
static void _StartData(ArchiveHandle *AH, TocEntry *te) { lclTocEntry *tctx = (lclTocEntry *) te->formatData; char fmode[10]; sprintf(fmode, "wb%d", AH->compression); #ifdef HAVE_LIBZ tctx->FH = gzopen(tctx->filename, fmode); #else tctx->FH = fopen(tctx->filename, PG_BINARY_W); #endif if (tctx->FH == NULL) die_horribly(AH, modulename, "could not open data file for output\n"); }
static void ReadDataFromArchiveNone(ArchiveHandle * AH, read_f* readF) { size_t cnt; char *buf; size_t buflen; buf = malloc(ZLIB_OUT_SIZE); if (buf == NULL) die_horribly(NULL, modulename, "out of memory\n"); buflen = ZLIB_OUT_SIZE; while ((cnt = readF(AH, &buf, &buflen))) { ahwrite(buf, 1, cnt, AH); } free(buf); }
/* * Interprets a numeric 'compression' value. The algorithm implied by the * value (zlib or none at the moment), is returned in *alg, and the * zlib compression level in *level. */ static void ParseCompressionOption(int compression, CompressionAlgorithm * alg, int *level) { if (compression == Z_DEFAULT_COMPRESSION || (compression > 0 && compression <= 9)) *alg = COMPR_ALG_LIBZ; else if (compression == 0) *alg = COMPR_ALG_NONE; else { die_horribly(NULL, modulename, "Invalid compression code: %d\n", compression); *alg = COMPR_ALG_NONE; /* keep compiler quiet */ } /* The level is just the passed-in value. */ if (level) *level = compression; }
static void EndCompressorZlib(ArchiveHandle * AH, CompressorState * cs) { z_streamp zp = cs->zp; zp->next_in = NULL; zp->avail_in = 0; /* Flush any remaining data from zlib buffer */ DeflateCompressorZlib(AH, cs, true); if (deflateEnd(zp) != Z_OK) die_horribly(AH, modulename, "could not close compression stream: %s\n", zp->msg); free(cs->zlibOut); free(cs->zp); }
/* * Called by the archiver when the dumper calls StartBlob. * * Mandatory. * * Must save the passed OID for retrieval at restore-time. */ static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) { bool old_blob_style = (AH->version < K_VERS_1_12); if (oid == 0) die_horribly(AH, NULL, "invalid OID for large object\n"); /* With an old archive we must do drop and create logic here */ if (old_blob_style && AH->ropt->dropSchema) DropBlobIfExists(AH, oid); if (old_blob_style) ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n", oid, INV_WRITE); else ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n", oid, INV_WRITE); AH->WriteDataPtr = _WriteBlobData; }
int main(int argc, char **argv) { PQExpBuffer valueBuf = NULL; PQExpBuffer escapeBuf = createPQExpBuffer(); RestoreOptions *opts; int c; int exit_code = 0; Archive *AH; char *inputFileSpec = NULL; extern int optind; extern char *optarg; static int use_setsessauth = 0; static int disable_triggers = 0; SegmentDatabase SegDB; StatusOp *pOp; struct sigaction act; pid_t newpid; /* int i; */ PQExpBuffer pszCmdLine; int status; int rc; char *pszErrorMsg; ArchiveHandle *pAH; int postDataSchemaOnly = 0; #ifdef USE_DDBOOST char *ddp_file_name = NULL; char *dd_boost_dir = NULL; #endif struct option cmdopts[] = { {"clean", 0, NULL, 'c'}, {"create", 0, NULL, 'C'}, {"data-only", 0, NULL, 'a'}, {"dbname", 1, NULL, 'd'}, {"exit-on-error", 0, NULL, 'e'}, {"file", 1, NULL, 'f'}, {"format", 1, NULL, 'F'}, {"function", 1, NULL, 'P'}, {"host", 1, NULL, 'h'}, {"ignore-version", 0, NULL, 'i'}, {"index", 1, NULL, 'I'}, {"list", 0, NULL, 'l'}, {"no-privileges", 0, NULL, 'x'}, {"no-acl", 0, NULL, 'x'}, {"no-owner", 0, NULL, 'O'}, {"no-reconnect", 0, NULL, 'R'}, {"port", 1, NULL, 'p'}, {"password", 0, NULL, 'W'}, {"schema-only", 0, NULL, 's'}, {"superuser", 1, NULL, 'S'}, {"table", 1, NULL, 't'}, {"trigger", 1, NULL, 'T'}, {"use-list", 1, NULL, 'L'}, {"username", 1, NULL, 'U'}, {"verbose", 0, NULL, 'v'}, /* * the following options don't have an equivalent short option letter, * but are available as '-X long-name' */ {"use-set-session-authorization", no_argument, &use_setsessauth, 1}, {"disable-triggers", no_argument, &disable_triggers, 1}, /* * the following are cdb specific, and don't have an equivalent short * option */ {"gp-d", required_argument, NULL, 1}, {"gp-e", no_argument, NULL, 2}, {"gp-k", required_argument, NULL, 3}, {"gp-c", required_argument, NULL, 4}, {"target-dbid", required_argument, NULL, 5}, {"target-host", required_argument, NULL, 6}, {"target-port", required_argument, NULL, 7}, {"post-data-schema-only", no_argument, &postDataSchemaOnly, 1}, {"dd_boost_file", required_argument, NULL, 8}, {"dd_boost_enabled", no_argument, NULL, 9}, {"dd_boost_dir", required_argument, NULL, 10}, {"dd_boost_buf_size", required_argument, NULL, 11}, {"gp-f", required_argument, NULL, 12}, {"prefix", required_argument, NULL, 13}, {"status", required_argument, NULL, 14}, {"netbackup-service-host", required_argument, NULL, 15}, {"netbackup-block-size", required_argument, NULL, 16}, {"change-schema-file", required_argument, NULL, 17}, {"schema-level-file", required_argument, NULL, 18}, {"ddboost-storage-unit",required_argument, NULL, 19}, {NULL, 0, NULL, 0} }; set_pglocale_pgservice(argv[0], "pg_dump"); opts = NewRestoreOptions(); /* set format default */ opts->formatName = "p"; progname = get_progname(argv[0]); if (argc > 1) { if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) { usage(progname); exit(0); } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { puts("pg_restore (Greenplum Database) " PG_VERSION); exit(0); } } while ((c = getopt_long(argc, argv, "acCd:ef:F:h:iI:lL:Op:P:RsS:t:T:uU:vwWxX:", cmdopts, NULL)) != -1) { switch (c) { case 'a': /* Dump data only */ opts->dataOnly = 1; break; case 'c': /* clean (i.e., drop) schema prior to create */ opts->dropSchema = 1; break; case 'C': opts->createDB = 1; break; case 'd': opts->dbname = strdup(optarg); break; case 'e': opts->exit_on_error = true; break; case 'f': /* output file name */ opts->filename = strdup(optarg); break; case 'F': if (strlen(optarg) != 0) opts->formatName = strdup(optarg); break; case 'h': if (strlen(optarg) != 0) opts->pghost = strdup(optarg); break; case 'i': /* obsolete option */ break; case 'l': /* Dump the TOC summary */ opts->tocSummary = 1; break; case 'L': /* input TOC summary file name */ opts->tocFile = strdup(optarg); break; case 'O': opts->noOwner = 1; break; case 'p': if (strlen(optarg) != 0) opts->pgport = strdup(optarg); break; case 'R': /* no-op, still accepted for backwards compatibility */ break; case 'P': /* Function */ opts->selTypes = 1; opts->selFunction = 1; opts->functionNames = strdup(optarg); break; case 'I': /* Index */ opts->selTypes = 1; opts->selIndex = 1; opts->indexNames = strdup(optarg); break; case 'T': /* Trigger */ opts->selTypes = 1; opts->selTrigger = 1; opts->triggerNames = strdup(optarg); break; case 's': /* dump schema only */ opts->schemaOnly = 1; break; case 'S': /* Superuser username */ if (strlen(optarg) != 0) opts->superuser = strdup(optarg); break; case 't': /* Dump data for this table only */ opts->selTypes = 1; opts->selTable = 1; opts->tableNames = strdup(optarg); break; case 'u': opts->promptPassword = TRI_YES; opts->username = simple_prompt("User name: ", 100, true); break; case 'U': opts->username = optarg; break; case 'v': /* verbose */ opts->verbose = 1; break; case 'w': opts->promptPassword = TRI_NO; break; case 'W': opts->promptPassword = TRI_YES; break; case 'x': /* skip ACL dump */ opts->aclsSkip = 1; break; case 'X': /* -X is a deprecated alternative to long options */ if (strcmp(optarg, "use-set-session-authorization") == 0) use_setsessauth = 1; else if (strcmp(optarg, "disable-triggers") == 0) disable_triggers = 1; else { fprintf(stderr, _("%s: invalid -X option -- %s\n"), progname, optarg); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); } break; case 0: /* This covers the long options equivalent to -X xxx. */ break; case 1: /* --gp-d MPP Output Directory */ g_pszMPPOutputDirectory = strdup(optarg); break; case 2: /* --gp-e On Error Stop for psql */ g_bOnErrorStop = opts->exit_on_error = true; break; case 3: /* --gp-k MPP Dump Info Format is * Key_s-dbid_s-role_t-dbid */ g_gpdumpInfo = strdup(optarg); if (!ParseCDBDumpInfo(progname, g_gpdumpInfo, &g_gpdumpKey, &g_role, &g_sourceDBID, &g_MPPPassThroughCredentials)) { exit(1); } break; case 4: /* gp-c */ g_compPg = strdup(optarg); break; case 5: /* target-dbid */ g_targetDBID = atoi(strdup(optarg)); break; case 6: /* target-host */ g_targetHost = strdup(optarg); break; case 7: /* target-port */ g_targetPort = strdup(optarg); break; #ifdef USE_DDBOOST case 9: dd_boost_enabled = 1; break; case 10: dd_boost_dir = strdup(optarg); break; case 11: dd_boost_buf_size = strdup(optarg); break; #endif case 12: table_filter_file = strdup(optarg); break; case 13: dump_prefix = strdup(optarg); break; /* Hack to pass in the status_file name to cdbbackup.c (gp_restore_launch) */ case 14: break; case 15: netbackup_service_host = strdup(optarg); break; case 16: netbackup_block_size = strdup(optarg); break; case 17: change_schema_file = strdup(optarg); break; case 18: schema_level_file = strdup(optarg); break; #ifdef USE_DDBOOST case 19: ddboost_storage_unit = strdup(optarg); break; #endif default: fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); } } /* Should get at most one of -d and -f, else user is confused */ if (opts->dbname) { if (opts->filename) { fprintf(stderr, _("%s: cannot specify both -d and -f output\n"), progname); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); } opts->useDB = 1; } opts->disable_triggers = disable_triggers; opts->use_setsessauth = use_setsessauth; if (opts->formatName) { switch (opts->formatName[0]) { case 'c': case 'C': opts->format = archCustom; break; case 'f': case 'F': opts->format = archFiles; break; case 't': case 'T': opts->format = archTar; break; case 'p': case 'P': bUsePSQL = true; break; default: mpp_err_msg(logInfo, progname, "unrecognized archive format '%s'; please specify 't' or 'c'\n", opts->formatName); exit(1); } } if (g_gpdumpInfo == NULL) { mpp_err_msg(logInfo, progname, "missing required parameter gp-k (backup key)\n"); exit(1); } #ifdef USE_DDBOOST if (dd_boost_enabled) { /* remote is always false when restoring from primary DDR */ int err = DD_ERR_NONE; err = ddp_init("gp_dump"); if (err != DD_ERR_NONE) { mpp_err_msg("ERROR", "ddboost", "ddboost init failed. Err = %d\n", err); exit(1); } if (initDDSystem(&ddp_inst, &ddp_conn, &dd_client_info, &ddboost_storage_unit, false, &DEFAULT_BACKUP_DIRECTORY, false)) { mpp_err_msg(logInfo, progname, "Initializing DD system failed\n"); exit(1); } mpp_err_msg(logInfo, progname, "ddboost is initialized\n"); ddp_file_name = formDDBoostFileName(g_gpdumpKey, postDataSchemaOnly, dd_boost_dir); if (ddp_file_name == NULL) { mpp_err_msg(logInfo, progname, "Error in opening ddboost file\n"); exit(1); } } #endif SegDB.dbid = g_sourceDBID; SegDB.role = g_role; SegDB.port = opts->pgport ? atoi(opts->pgport) : 5432; SegDB.pszHost = opts->pghost ? strdup(opts->pghost) : NULL; SegDB.pszDBName = opts->dbname ? strdup(opts->dbname) : NULL; SegDB.pszDBUser = opts->username ? strdup(opts->username) : NULL; SegDB.pszDBPswd = NULL; if (g_MPPPassThroughCredentials != NULL && *g_MPPPassThroughCredentials != '\0') { unsigned int nBytes; char *pszDBPswd = Base64ToData(g_MPPPassThroughCredentials, &nBytes); if (pszDBPswd == NULL) { mpp_err_msg(logError, progname, "Invalid Greenplum DB Credentials: %s\n", g_MPPPassThroughCredentials); exit(1); } if (nBytes > 0) { SegDB.pszDBPswd = malloc(nBytes + 1); if (SegDB.pszDBPswd == NULL) { mpp_err_msg(logInfo, progname, "Cannot allocate memory for Greenplum Database Credentials\n"); exit(1); } memcpy(SegDB.pszDBPswd, pszDBPswd, nBytes); SegDB.pszDBPswd[nBytes] = '\0'; } } if (g_role == ROLE_MASTER) g_conn = MakeDBConnection(&SegDB, true); else g_conn = MakeDBConnection(&SegDB, false); if (PQstatus(g_conn) == CONNECTION_BAD) { exit_horribly(NULL, NULL, "connection to database \"%s\" failed: %s", PQdb(g_conn), PQerrorMessage(g_conn)); } if (g_gpdumpKey != NULL) { /* * Open the database again, for writing status info */ g_conn_status = MakeDBConnection(&SegDB, false); if (PQstatus(g_conn_status) == CONNECTION_BAD) { exit_horribly(NULL, NULL, "Connection on host %s failed: %s", StringNotNull(SegDB.pszHost, "localhost"), PQerrorMessage(g_conn_status)); } g_main_tid = pthread_self(); g_pStatusOpList = CreateStatusOpList(); if (g_pStatusOpList == NULL) { exit_horribly(NULL, NULL, "cannot allocate memory for gp_backup_status operation\n"); } /* * Create thread for monitoring for cancel requests. If we're running * using PSQL, the monitor is not allowed to start until the worker * process is forked. This is done to prevent the forked process from * being blocked by locks held by library routines (__tz_convert, for * example). */ if (bUsePSQL) { pthread_mutex_lock(&g_threadSyncPoint); } pthread_create(&g_monitor_tid, NULL, monitorThreadProc, NULL); /* Install Ctrl-C interrupt handler, now that we have a connection */ if (!bUsePSQL) { act.sa_handler = myHandler; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_flags |= SA_RESTART; if (sigaction(SIGINT, &act, NULL) < 0) { mpp_err_msg(logInfo, progname, "Error trying to set SIGINT interrupt handler\n"); } act.sa_handler = myHandler; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_flags |= SA_RESTART; if (sigaction(SIGTERM, &act, NULL) < 0) { mpp_err_msg(logInfo, progname, "Error trying to set SIGTERM interrupt handler\n"); } } pOp = CreateStatusOp(TASK_START, TASK_RC_SUCCESS, SUFFIX_START, TASK_MSG_SUCCESS); if (pOp == NULL) { exit_horribly(NULL, NULL, "cannot allocate memory for gp_backup_status operation\n"); } AddToStatusOpList(g_pStatusOpList, pOp); } /* end cdb additions */ if (bUsePSQL) { /* Install Ctrl-C interrupt handler, now that we have a connection */ act.sa_handler = psqlHandler; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_flags |= SA_RESTART; if (sigaction(SIGINT, &act, NULL) < 0) { mpp_err_msg(logInfo, progname, "Error trying to set SIGINT interrupt handler\n"); } act.sa_handler = psqlHandler; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_flags |= SA_RESTART; if (sigaction(SIGTERM, &act, NULL) < 0) { mpp_err_msg(logInfo, progname, "Error trying to set SIGTERM interrupt handler\n"); } /* Establish a SIGCHLD handler to catch termination the psql process */ act.sa_handler = myChildHandler; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_flags |= SA_RESTART; if (sigaction(SIGCHLD, &act, NULL) < 0) { mpp_err_msg(logInfo, progname, "Error trying to set SIGCHLD interrupt handler\n"); exit(1); } mpp_err_msg(logInfo, progname, "Before fork of gp_restore_agent\n"); newpid = fork(); if (newpid < 0) { mpp_err_msg(logError, progname, "Failed to fork\n"); } else if (newpid == 0) { /* TODO: use findAcceptableBackupFilePathName(...) to look for the file name * if user invoked gp_restore_agent directly without supplying a file name. * If the agent is invoked from gp_restore_launch, then we are ok. */ if (optind < argc) { char *rawInputFile = argv[optind]; valueBuf = createPQExpBuffer(); inputFileSpec = shellEscape(rawInputFile, valueBuf, false, false); } if (inputFileSpec == NULL || inputFileSpec[0] == '\0') { mpp_err_msg(logError, progname, "dump file path is empty"); exit(1); } if (postDataSchemaOnly) { if (strstr(inputFileSpec,"_post_data") == NULL) { fprintf(stderr,"Adding _post_data to the end of the file name?\n"); char * newFS = malloc(strlen(inputFileSpec) + strlen("_post_data") + 1); strcpy(newFS, inputFileSpec); strcat(newFS, "_post_data"); inputFileSpec = newFS; } } pszCmdLine = createPQExpBuffer(); #ifdef USE_DDBOOST if (dd_boost_enabled) { formDDBoostPsqlCommandLine(pszCmdLine, argv[0], (postDataSchemaOnly == 1 ? true : false), g_role, g_compPg, ddp_file_name, dd_boost_buf_size, table_filter_file, change_schema_file, schema_level_file, ddboost_storage_unit); } else { #endif formPsqlCommandLine(pszCmdLine, argv[0], (postDataSchemaOnly == 1), g_role, inputFileSpec, g_compPg, table_filter_file, netbackup_service_host, netbackup_block_size, change_schema_file, schema_level_file); #ifdef USE_DDBOOST } #endif appendPQExpBuffer(pszCmdLine, " -h %s -p %s -U %s -d ", g_targetHost, g_targetPort, SegDB.pszDBUser); shellEscape(SegDB.pszDBName, pszCmdLine, true /* quote */, false /* reset */); appendPQExpBuffer(pszCmdLine, " -a "); if (g_bOnErrorStop) appendPQExpBuffer(pszCmdLine, " -v ON_ERROR_STOP="); if (g_role == ROLE_SEGDB) putenv("PGOPTIONS=-c gp_session_role=UTILITY"); if (g_role == ROLE_MASTER) putenv("PGOPTIONS=-c gp_session_role=DISPATCH"); mpp_err_msg(logInfo, progname, "Command Line: %s\n", pszCmdLine->data); /* * Make this new process the process group leader of the children * being launched. This allows a signal to be sent to all * processes in the group simultaneously. */ setpgid(newpid, newpid); execl("/bin/sh", "sh", "-c", pszCmdLine->data, NULL); mpp_err_msg(logInfo, progname, "Error in gp_restore_agent - execl of %s with Command Line %s failed", "/bin/sh", pszCmdLine->data); _exit(127); } else { /* * Make the new child process the process group leader of the * children being launched. This allows a signal to be sent to * all processes in the group simultaneously. * * This is a redundant call to avoid a race condition suggested by * Stevens. */ setpgid(newpid, newpid); /* Allow the monitor thread to begin execution. */ pthread_mutex_unlock(&g_threadSyncPoint); /* Parent . Lets sleep and wake up until we see it's done */ while (!bPSQLDone) { sleep(5); } /* * If this process has been sent a SIGINT or SIGTERM, we need to * send a SIGINT to the psql process GROUP. */ if (bKillPsql) { mpp_err_msg(logInfo, progname, "Terminating psql due to signal.\n"); kill(-newpid, SIGINT); } waitpid(newpid, &status, 0); if (WIFEXITED(status)) { rc = WEXITSTATUS(status); if (rc == 0) { mpp_err_msg(logInfo, progname, "psql finished with rc %d.\n", rc); /* Normal completion falls to end of routine. */ } else { if (rc >= 128) { /* * If the exit code has the 128-bit set, the exit code * represents a shell exited by signal where the * signal number is exitCode - 128. */ rc -= 128; pszErrorMsg = MakeString("psql finished abnormally with signal number %d.\n", rc); } else { pszErrorMsg = MakeString("psql finished abnormally with return code %d.\n", rc); } makeSureMonitorThreadEnds(TASK_RC_FAILURE, pszErrorMsg); free(pszErrorMsg); exit_code = 2; } } else if (WIFSIGNALED(status)) { pszErrorMsg = MakeString("psql finished abnormally with signal number %d.\n", WTERMSIG(status)); mpp_err_msg(logError, progname, pszErrorMsg); makeSureMonitorThreadEnds(TASK_RC_FAILURE, pszErrorMsg); free(pszErrorMsg); exit_code = 2; } else { pszErrorMsg = MakeString("psql crashed or finished badly; status=%#x.\n", status); mpp_err_msg(logError, progname, pszErrorMsg); makeSureMonitorThreadEnds(TASK_RC_FAILURE, pszErrorMsg); free(pszErrorMsg); exit_code = 2; } } } else { AH = OpenArchive(inputFileSpec, opts->format); /* Let the archiver know how noisy to be */ AH->verbose = opts->verbose; /* * Whether to keep submitting sql commands as "pg_restore ... | psql ... " */ AH->exit_on_error = opts->exit_on_error; if (opts->tocFile) SortTocFromFile(AH, opts); if (opts->tocSummary) PrintTOCSummary(AH, opts); else { pAH = (ArchiveHandle *) AH; if (opts->useDB) { /* check for version mismatch */ if (pAH->version < K_VERS_1_3) die_horribly(NULL, NULL, "direct database connections are not supported in pre-1.3 archives\n"); pAH->connection = g_conn; /* XXX Should get this from the archive */ AH->minRemoteVersion = 070100; AH->maxRemoteVersion = 999999; _check_database_version(pAH); } RestoreArchive(AH, opts); /* * The following is necessary when the -C option is used. A new * connection is gotten to the database within RestoreArchive */ if (pAH->connection != g_conn) g_conn = pAH->connection; } /* done, print a summary of ignored errors */ if (AH->n_errors) fprintf(stderr, _("WARNING: errors ignored on restore: %d\n"), AH->n_errors); /* AH may be freed in CloseArchive? */ exit_code = AH->n_errors ? 1 : 0; CloseArchive(AH); } #ifdef USE_DDBOOST if(dd_boost_enabled) cleanupDDSystem(); free(ddboost_storage_unit); #endif makeSureMonitorThreadEnds(TASK_RC_SUCCESS, TASK_MSG_SUCCESS); DestroyStatusOpList(g_pStatusOpList); if (change_schema_file) free(change_schema_file); if (schema_level_file) free(schema_level_file); if (SegDB.pszHost) free(SegDB.pszHost); if (SegDB.pszDBName) free(SegDB.pszDBName); if (SegDB.pszDBUser) free(SegDB.pszDBUser); if (SegDB.pszDBPswd) free(SegDB.pszDBPswd); if (valueBuf) destroyPQExpBuffer(valueBuf); if (escapeBuf) destroyPQExpBuffer(escapeBuf); PQfinish(g_conn); if (exit_code == 0) mpp_err_msg(logInfo, progname, "Finished successfully\n"); else mpp_err_msg(logError, progname, "Finished with errors\n"); return exit_code; }
static void ReadDataFromArchiveZlib(ArchiveHandle * AH, read_f* readF) { z_streamp zp; char *out; int res = Z_OK; size_t cnt; char *buf; size_t buflen; zp = (z_streamp) malloc(sizeof(z_stream)); if (zp == NULL) die_horribly(NULL, modulename, "out of memory\n"); zp->zalloc = Z_NULL; zp->zfree = Z_NULL; zp->opaque = Z_NULL; buf = malloc(ZLIB_IN_SIZE); if (buf == NULL) die_horribly(NULL, modulename, "out of memory\n"); buflen = ZLIB_IN_SIZE; out = malloc(ZLIB_OUT_SIZE + 1); if (out == NULL) die_horribly(NULL, modulename, "out of memory\n"); if (inflateInit(zp) != Z_OK) die_horribly(NULL, modulename, "could not initialize compression library: %s\n", zp->msg); /* no minimal chunk size for zlib */ while ((cnt = readF(AH, &buf, &buflen))) { zp->next_in = (void *)buf; zp->avail_in = cnt; while (zp->avail_in > 0) { zp->next_out = (void *)out; zp->avail_out = ZLIB_OUT_SIZE; res = inflate(zp, 0); if (res != Z_OK && res != Z_STREAM_END) die_horribly(AH, modulename, "could not uncompress data: %s\n", zp->msg); out[ZLIB_OUT_SIZE - zp->avail_out] = '\0'; ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH); } } zp->next_in = NULL; zp->avail_in = 0; while (res != Z_STREAM_END) { zp->next_out = (void *)out; zp->avail_out = ZLIB_OUT_SIZE; res = inflate(zp, 0); if (res != Z_OK && res != Z_STREAM_END) die_horribly(AH, modulename, "could not uncompress data: %s\n", zp->msg); out[ZLIB_OUT_SIZE - zp->avail_out] = '\0'; ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH); } if (inflateEnd(zp) != Z_OK) die_horribly(AH, modulename, "could not close compression library: %s\n", zp->msg); free(buf); free(out); free(zp); }
/* * Initializer */ void InitArchiveFmt_Files(ArchiveHandle *AH) { lclContext *ctx; /* Assuming static functions, this can be copied for each format. */ AH->ArchiveEntryPtr = _ArchiveEntry; AH->StartDataPtr = _StartData; AH->WriteDataPtr = _WriteData; AH->EndDataPtr = _EndData; AH->WriteBytePtr = _WriteByte; AH->ReadBytePtr = _ReadByte; AH->WriteBufPtr = _WriteBuf; AH->ReadBufPtr = _ReadBuf; AH->ClosePtr = _CloseArchive; AH->PrintTocDataPtr = _PrintTocData; AH->ReadExtraTocPtr = _ReadExtraToc; AH->WriteExtraTocPtr = _WriteExtraToc; AH->PrintExtraTocPtr = _PrintExtraToc; AH->StartBlobsPtr = _StartBlobs; AH->StartBlobPtr = _StartBlob; AH->EndBlobPtr = _EndBlob; AH->EndBlobsPtr = _EndBlobs; /* * Set up some special context used in compressing data. */ ctx = (lclContext *) calloc(1, sizeof(lclContext)); AH->formatData = (void *) ctx; ctx->filePos = 0; /* Initialize LO buffering */ AH->lo_buf_size = LOBBUFSIZE; AH->lo_buf = (void *) malloc(LOBBUFSIZE); if (AH->lo_buf == NULL) die_horribly(AH, modulename, "out of memory\n"); /* * Now open the TOC file */ if (AH->mode == archModeWrite) { write_msg(modulename, "WARNING:\n" " This format is for demonstration purposes; it is not intended for\n" " normal use. Files will be written in the current working directory.\n"); if (AH->fSpec && strcmp(AH->fSpec, "") != 0) AH->FH = fopen(AH->fSpec, PG_BINARY_W); else AH->FH = stdout; if (AH->FH == NULL) die_horribly(NULL, modulename, "could not open output file: %s\n", strerror(errno)); ctx->hasSeek = checkSeek(AH->FH); if (AH->compression < 0 || AH->compression > 9) AH->compression = Z_DEFAULT_COMPRESSION; } else { /* Read Mode */ if (AH->fSpec && strcmp(AH->fSpec, "") != 0) AH->FH = fopen(AH->fSpec, PG_BINARY_R); else AH->FH = stdin; if (AH->FH == NULL) die_horribly(NULL, modulename, "could not open input file: %s\n", strerror(errno)); ctx->hasSeek = checkSeek(AH->FH); ReadHead(AH); ReadToc(AH); /* Nothing else in the file... */ if (fclose(AH->FH) != 0) die_horribly(AH, modulename, "could not close TOC file: %s\n", strerror(errno)); } }