/* * do_lo_export() * * Write a large object to a file */ bool do_lo_export(const char *loid_arg, const char *filename_arg) { int status; bool own_transaction; if (!start_lo_xact("\\lo_export", &own_transaction)) return false; SetCancelConn(); status = lo_export(pset.db, atooid(loid_arg), filename_arg); ResetCancelConn(); /* of course this status is documented nowhere :( */ if (status != 1) { psql_error("%s", PQerrorMessage(pset.db)); return fail_lo_xact("\\lo_export", own_transaction); } if (!finish_lo_xact("\\lo_export", own_transaction)) return false; print_lo_result("lo_export"); return true; }
int lobject_export(lobjectObject *self, const char *filename) { PGresult *pgres = NULL; char *error = NULL; int retvalue; Py_BEGIN_ALLOW_THREADS; pthread_mutex_lock(&(self->conn->lock)); retvalue = pq_begin_locked(self->conn, &pgres, &error); if (retvalue < 0) goto end; retvalue = lo_export(self->conn->pgconn, self->oid, filename); if (retvalue < 0) collect_error(self->conn, &error); end: pthread_mutex_unlock(&(self->conn->lock)); Py_END_ALLOW_THREADS; if (retvalue < 0) pq_complete_error(self->conn, &pgres, &error); return retvalue; }
/* * do_lo_export() * * Write a large object to a file */ bool do_lo_export(const char *loid_arg, const char *filename_arg) { int status; bool own_transaction; if (!start_lo_xact("\\lo_export", &own_transaction)) return false; SetCancelConn(); status = lo_export(pset.db, atooid(loid_arg), filename_arg); ResetCancelConn(); /* of course this status is documented nowhere :( */ if (status != 1) { fputs(PQerrorMessage(pset.db), stderr); return fail_lo_xact("\\lo_export", own_transaction); } if (!finish_lo_xact("\\lo_export", own_transaction)) return false; fprintf(pset.queryFout, "lo_export\n"); return true; }
bool SPostgres::ReadLobToFile(SString sTable,SString sLobField,SString sWhere,SString sFile) { LOGBASEDEBUG("Into SPostgres::ReadLobToFile(%s,%s,%s,%s)", sTable.data(),sLobField.data(),sWhere.data(),sFile.data()); SString sql; sql.sprintf("select %s from %s where %s",sLobField.data(),sTable.data(),sWhere.data()); Oid oid = SelectIntoI(sql); if(oid == 0) { LOGERROR("Into SPostgres::ReadLobToFile(%s,%s,%s,%s) get oid error, oid=0", sTable.data(),sLobField.data(),sWhere.data(),sFile.data()); return false; } PGresult *pRes = PQexec(m_pConn, "begin"); if(pRes == NULL) { if(TestConnect() == true)//连接可用 { LOGERROR("Error in SPostgres::ReadLobToFile, exec begin error, err=%s", PQerrorMessage(m_pConn)); return false; } //失败自动重连一次数据库 if(!Reconnect()) { LOGERROR("Error in SPostgres::ReadLobToFile, exec begin error, err=%s", PQerrorMessage(m_pConn)); return false; } pRes = PQexec(m_pConn, "begin"); if(pRes == NULL) { LOGERROR("Error in SPostgres::ReadLobToFile, exec begin error, err=%s", PQerrorMessage(m_pConn)); return false; } } PQclear(pRes); pRes = NULL; if(lo_export(m_pConn,oid,sFile.data()) == 0) { LOGERROR("Error in SPostgres::ReadLobToFile(%s,%s,%s,%s), lo_export error, err:%s", sTable.data(),sLobField.data(),sWhere.data(),sFile.data(),PQerrorMessage(m_pConn)); pRes = PQexec(m_pConn, "end"); if(pRes != NULL) PQclear(pRes); return false; } if(pRes != NULL) PQclear(pRes); pRes = PQexec(m_pConn, "end"); if(pRes != NULL) PQclear(pRes); return pRes != NULL; }
/* ************************************************************************* */ int pg_lo_export(ClipMachine* mp, SQLCONN* c, unsigned int OID, const char *filename){ PG_CONN *conn = (PG_CONN*)c; if(!conn->at){ _clip_trap_err(mp,0,0,0,subsys,ER_START,er_start); return 1; } if (lo_export(conn->conn, OID, filename) > 0){ _clip_trap_err(mp,0,0,0,subsys,ER_START,er_blob_export); return 1; } return 0; }
CAMLprim value lo_export_stub(value v_conn, value v_oid, value v_fname) { CAMLparam1(v_conn); PGconn *conn = get_conn(v_conn); value v_res; size_t len = caml_string_length(v_fname) + 1; char *fname = caml_stat_alloc(len); memcpy(fname, String_val(v_fname), len); caml_enter_blocking_section(); v_res = Val_int(lo_export(conn, Int_val(v_oid), fname)); free(fname); caml_leave_blocking_section(); CAMLreturn(v_res); }
int main(int argc, char **argv) { char *in_filename, *out_filename, *out_filename2; char *database; Oid lobjOid; PGconn *conn; PGresult *res; if (argc != 5) { fprintf(stderr, "Usage: %s database_name in_filename out_filename out_filename2\n", argv[0]); exit(1); } database = argv[1]; in_filename = argv[2]; out_filename = argv[3]; out_filename2 = argv[4]; /* * set up the connection */ conn = PQsetdb(NULL, NULL, NULL, NULL, database); /* check to see that the backend connection was successfully made */ if (PQstatus(conn) != CONNECTION_OK) { fprintf(stderr, "Connection to database failed: %s", PQerrorMessage(conn)); exit_nicely(conn); } res = PQexec(conn, "begin"); PQclear(res); printf("importing file \"%s\" ...\n", in_filename); /* lobjOid = importFile(conn, in_filename); */ lobjOid = lo_import(conn, in_filename); if (lobjOid == 0) fprintf(stderr, "%s\n", PQerrorMessage(conn)); else { printf("\tas large object %u.\n", lobjOid); printf("picking out bytes 4294967000-4294968000 of the large object\n"); pickout(conn, lobjOid, 4294967000U, 1000); printf("overwriting bytes 4294967000-4294968000 of the large object with X's\n"); overwrite(conn, lobjOid, 4294967000U, 1000); printf("exporting large object to file \"%s\" ...\n", out_filename); /* exportFile(conn, lobjOid, out_filename); */ if (lo_export(conn, lobjOid, out_filename) < 0) fprintf(stderr, "%s\n", PQerrorMessage(conn)); printf("truncating to 3294968000 bytes\n"); my_truncate(conn, lobjOid, 3294968000U); printf("exporting truncated large object to file \"%s\" ...\n", out_filename2); if (lo_export(conn, lobjOid, out_filename2) < 0) fprintf(stderr, "%s\n", PQerrorMessage(conn)); } res = PQexec(conn, "end"); PQclear(res); PQfinish(conn); return 0; }
void pglo_export(LODumpMaster * pgLO) { LOlist *ll; int tuples; char path[BUFSIZ], Qbuff[QUERY_BUFSIZ]; if (pgLO->action != ACTION_SHOW) { time_t t; time(&t); fprintf(pgLO->index, "#\n# This is the PostgreSQL large object dump index\n#\n"); fprintf(pgLO->index, "#\tDate: %s", ctime(&t)); fprintf(pgLO->index, "#\tHost: %s\n", pgLO->host); fprintf(pgLO->index, "#\tDatabase: %s\n", pgLO->db); fprintf(pgLO->index, "#\tUser: %s\n", pgLO->user); fprintf(pgLO->index, "#\n# oid\ttable\tattribut\tinfile\tschema\n#\n"); } pgLO->counter = 0; for (ll = pgLO->lolist; ll->lo_table != NULL; ll++) { /* * Query: find the LOs referenced by this column */ snprintf(Qbuff, QUERY_BUFSIZ, "SELECT DISTINCT l.loid FROM \"%s\".\"%s\" x, pg_catalog.pg_largeobject l " "WHERE x.\"%s\" = l.loid", ll->lo_schema, ll->lo_table, ll->lo_attr); /* puts(Qbuff); */ pgLO->res = PQexec(pgLO->conn, Qbuff); if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK) { fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname, PQerrorMessage(pgLO->conn)); } else if ((tuples = PQntuples(pgLO->res)) == 0) { if (!pgLO->quiet && pgLO->action == ACTION_EXPORT_ATTR) printf("%s: no large objects in \"%s\".\"%s\".\"%s\"\n", progname, ll->lo_schema, ll->lo_table, ll->lo_attr); } else { int t; char *val; /* * Create DIR/FILE */ if (pgLO->action != ACTION_SHOW) { snprintf(path, BUFSIZ, "%s/%s/%s", pgLO->space, pgLO->db, ll->lo_schema); if (mkdir(path, DIR_UMASK) == -1) { if (errno != EEXIST) { perror(path); exit(RE_ERROR); } } snprintf(path, BUFSIZ, "%s/%s/%s/%s", pgLO->space, pgLO->db, ll->lo_schema, ll->lo_table); if (mkdir(path, DIR_UMASK) == -1) { if (errno != EEXIST) { perror(path); exit(RE_ERROR); } } snprintf(path, BUFSIZ, "%s/%s/%s/%s/%s", pgLO->space, pgLO->db, ll->lo_schema, ll->lo_table, ll->lo_attr); if (mkdir(path, DIR_UMASK) == -1) { if (errno != EEXIST) { perror(path); exit(RE_ERROR); } } if (!pgLO->quiet) printf("dump %s.%s.%s (%d large obj)\n", ll->lo_schema, ll->lo_table, ll->lo_attr, tuples); } pgLO->counter += tuples; for (t = 0; t < tuples; t++) { Oid lo; val = PQgetvalue(pgLO->res, t, 0); lo = atooid(val); if (pgLO->action == ACTION_SHOW) { printf("%s.%s.%s: %u\n", ll->lo_schema, ll->lo_table, ll->lo_attr, lo); continue; } snprintf(path, BUFSIZ, "%s/%s/%s/%s/%s/%s", pgLO->space, pgLO->db, ll->lo_schema, ll->lo_table, ll->lo_attr, val); if (lo_export(pgLO->conn, lo, path) < 0) fprintf(stderr, "%s: lo_export failed:\n%s", progname, PQerrorMessage(pgLO->conn)); else fprintf(pgLO->index, "%s\t%s\t%s\t%s/%s/%s/%s/%s\t%s\n", val, ll->lo_table, ll->lo_attr, pgLO->db, ll->lo_schema, ll->lo_table, ll->lo_attr, val, ll->lo_schema); } } PQclear(pgLO->res); } }
int main(int argc, char **argv) { char *in_filename, *out_filename; char *database; Oid lobjOid; PGconn *conn; PGresult *res; if (argc != 4) { fprintf(stderr, "Usage: %s database_name in_filename out_filename\n", argv[0]); exit(1); } database = argv[1]; in_filename = argv[2]; out_filename = argv[3]; /* * set up the connection */ conn = PQsetdb(NULL, NULL, NULL, NULL, database); /* check to see that the backend connection was successfully made */ if (PQstatus(conn) != CONNECTION_OK) { fprintf(stderr, "Connection to database failed: %s", PQerrorMessage(conn)); exit_nicely(conn); } /* Set always-secure search path, so malicous users can't take control. */ res = PQexec(conn, "SELECT pg_catalog.set_config('search_path', '', false)"); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); PQclear(res); exit_nicely(conn); } PQclear(res); res = PQexec(conn, "begin"); PQclear(res); printf("importing file \"%s\" ...\n", in_filename); /* lobjOid = importFile(conn, in_filename); */ lobjOid = lo_import(conn, in_filename); if (lobjOid == 0) fprintf(stderr, "%s\n", PQerrorMessage(conn)); else { printf("\tas large object %u.\n", lobjOid); printf("picking out bytes 1000-2000 of the large object\n"); pickout(conn, lobjOid, 1000, 1000); printf("overwriting bytes 1000-2000 of the large object with X's\n"); overwrite(conn, lobjOid, 1000, 1000); printf("exporting large object to file \"%s\" ...\n", out_filename); /* exportFile(conn, lobjOid, out_filename); */ if (lo_export(conn, lobjOid, out_filename) < 0) fprintf(stderr, "%s\n", PQerrorMessage(conn)); } res = PQexec(conn, "end"); PQclear(res); PQfinish(conn); return 0; }