Esempio n. 1
0
/*
 * Insert monitor info, this is basically the time and xlog replayed,
 * applied on standby and current xlog location in primary.
 * Also do the math to see how far are we in bytes for being uptodate
 */
static void
StandbyMonitor(void)
{
	PGresult *res;
	char monitor_standby_timestamp[MAXLEN];
	char last_wal_primary_location[MAXLEN];
	char last_wal_standby_received[MAXLEN];
	char last_wal_standby_applied[MAXLEN];

	unsigned long long int lsn_primary;
	unsigned long long int lsn_standby_received;
	unsigned long long int lsn_standby_applied;

	int	connection_retries;

	/*
	 * Check if the master is still available, if after 5 minutes of retries
	 * we cannot reconnect, try to get a new master.
	 */
	CheckPrimaryConnection(); // this take up to NUM_RETRY * SLEEP_RETRY seconds

	if (PQstatus(primaryConn) != CONNECTION_OK)
	{
		if (local_options.failover == MANUAL_FAILOVER)
		{
			log_err(_("We couldn't reconnect to master. Now checking if another node has been promoted.\n"));
			for (connection_retries = 0; connection_retries < 6; connection_retries++)
			{
				primaryConn = getMasterConnection(myLocalConn, repmgr_schema, local_options.node,
				                                  local_options.cluster_name, &primary_options.node, NULL);
				if (PQstatus(primaryConn) == CONNECTION_OK)
				{
					/* Connected, we can continue the process so break the loop */
					log_err(_("Connected to node %d, continue monitoring.\n"), primary_options.node);
					break;
				}
				else
				{
					log_err(_("We haven't found a new master, waiting before retry...\n"));
					/* wait 5 minutes before retries, after 6 failures (30 minutes) we stop trying */
					sleep(300);
				}
			}

			if (PQstatus(primaryConn) != CONNECTION_OK)
			{
				log_err(_("We couldn't reconnect for long enough, exiting...\n"));
				exit(ERR_DB_CON);
			}
		}
		else if (local_options.failover == AUTOMATIC_FAILOVER)
		{
			/*
			 * When we returns from this function we will have a new primary and
			 * a new primaryConn
			 */
			do_failover();
		}
	}

	/* Check if we still are a standby, we could have been promoted */
	if (!is_standby(myLocalConn))
	{
		log_err(_("It seems like we have been promoted, so exit from monitoring...\n"));
		CloseConnections();
		exit(ERR_PROMOTED);
	}

	/*
	 * first check if there is a command being executed,
	 * and if that is the case, cancel the query so i can
	 * insert the current record
	 */
	if (PQisBusy(primaryConn) == 1)
		CancelQuery();

	/* Get local xlog info */
	sqlquery_snprintf(
	    sqlquery,
	    "SELECT CURRENT_TIMESTAMP, pg_last_xlog_receive_location(), "
	    "pg_last_xlog_replay_location()");

	res = PQexec(myLocalConn, sqlquery);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		log_err(_("PQexec failed: %s\n"), PQerrorMessage(myLocalConn));
		PQclear(res);
		/* if there is any error just let it be and retry in next loop */
		return;
	}

	strncpy(monitor_standby_timestamp, PQgetvalue(res, 0, 0), MAXLEN);
	strncpy(last_wal_standby_received , PQgetvalue(res, 0, 1), MAXLEN);
	strncpy(last_wal_standby_applied , PQgetvalue(res, 0, 2), MAXLEN);
	PQclear(res);

	/* Get primary xlog info */
	sqlquery_snprintf(sqlquery, "SELECT pg_current_xlog_location() ");

	res = PQexec(primaryConn, sqlquery);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		log_err(_("PQexec failed: %s\n"), PQerrorMessage(primaryConn));
		PQclear(res);
		return;
	}

	strncpy(last_wal_primary_location, PQgetvalue(res, 0, 0), MAXLEN);
	PQclear(res);

	/* Calculate the lag */
	lsn_primary = walLocationToBytes(last_wal_primary_location);
	lsn_standby_received = walLocationToBytes(last_wal_standby_received);
	lsn_standby_applied = walLocationToBytes(last_wal_standby_applied);

	/*
	 * Build the SQL to execute on primary
	 */
	sqlquery_snprintf(sqlquery,
	                  "INSERT INTO %s.repl_monitor "
	                  "VALUES(%d, %d, '%s'::timestamp with time zone, "
	                  " '%s', '%s', "
	                  " %lld, %lld)", repmgr_schema,
	                  primary_options.node, local_options.node, monitor_standby_timestamp,
	                  last_wal_primary_location,
	                  last_wal_standby_received,
	                  (lsn_primary - lsn_standby_received),
	                  (lsn_standby_received - lsn_standby_applied));

	/*
	 * Execute the query asynchronously, but don't check for a result. We
	 * will check the result next time we pause for a monitor step.
	 */
	if (PQsendQuery(primaryConn, sqlquery) == 0)
		log_warning(_("Query could not be sent to primary. %s\n"),
		            PQerrorMessage(primaryConn));
}
Esempio n. 2
0
/*
 * lo_initialize
 *
 * Initialize the large object interface for an existing connection.
 * We ask the backend about the functions OID's in pg_proc for all
 * functions that are required for large object operations.
 */
static int
lo_initialize(PGconn *conn)
{
	PGresult   *res;
	PGlobjfuncs *lobjfuncs;
	int			n;
	const char *query;
	const char *fname;
	Oid			foid;

	/*
	 * Allocate the structure to hold the functions OID's
	 */
	lobjfuncs = (PGlobjfuncs *) malloc(sizeof(PGlobjfuncs));
	if (lobjfuncs == NULL)
	{
		printfPQExpBuffer(&conn->errorMessage,
						  libpq_gettext("out of memory\n"));
		return -1;
	}
	MemSet((char *) lobjfuncs, 0, sizeof(PGlobjfuncs));

	/*
	 * Execute the query to get all the functions at once.	In 7.3 and later
	 * we need to be schema-safe.  lo_create only exists in 8.1 and up.
	 * lo_truncate only exists in 8.3 and up.
	 */
	if (conn->sversion >= 70300)
		query = "select proname, oid from pg_catalog.pg_proc "
			"where proname in ("
			"'lo_open', "
			"'lo_close', "
			"'lo_creat', "
			"'lo_create', "
			"'lo_unlink', "
			"'lo_lseek', "
			"'lo_tell', "
			"'lo_truncate', "
			"'loread', "
			"'lowrite') "
			"and pronamespace = (select oid from pg_catalog.pg_namespace "
			"where nspname = 'pg_catalog')";
	else
		query = "select proname, oid from pg_proc "
			"where proname = 'lo_open' "
			"or proname = 'lo_close' "
			"or proname = 'lo_creat' "
			"or proname = 'lo_unlink' "
			"or proname = 'lo_lseek' "
			"or proname = 'lo_tell' "
			"or proname = 'loread' "
			"or proname = 'lowrite'";

	res = PQexec(conn, query);
	if (res == NULL)
	{
		free(lobjfuncs);
		return -1;
	}

	if (res->resultStatus != PGRES_TUPLES_OK)
	{
		free(lobjfuncs);
		PQclear(res);
		printfPQExpBuffer(&conn->errorMessage,
						  libpq_gettext("query to initialize large object functions did not return data\n"));
		return -1;
	}

	/*
	 * Examine the result and put the OID's into the struct
	 */
	for (n = 0; n < PQntuples(res); n++)
	{
		fname = PQgetvalue(res, n, 0);
		foid = (Oid) atoi(PQgetvalue(res, n, 1));
		if (strcmp(fname, "lo_open") == 0)
			lobjfuncs->fn_lo_open = foid;
		else if (strcmp(fname, "lo_close") == 0)
			lobjfuncs->fn_lo_close = foid;
		else if (strcmp(fname, "lo_creat") == 0)
			lobjfuncs->fn_lo_creat = foid;
		else if (strcmp(fname, "lo_create") == 0)
			lobjfuncs->fn_lo_create = foid;
		else if (strcmp(fname, "lo_unlink") == 0)
			lobjfuncs->fn_lo_unlink = foid;
		else if (strcmp(fname, "lo_lseek") == 0)
			lobjfuncs->fn_lo_lseek = foid;
		else if (strcmp(fname, "lo_tell") == 0)
			lobjfuncs->fn_lo_tell = foid;
		else if (strcmp(fname, "lo_truncate") == 0)
			lobjfuncs->fn_lo_truncate = foid;
		else if (strcmp(fname, "loread") == 0)
			lobjfuncs->fn_lo_read = foid;
		else if (strcmp(fname, "lowrite") == 0)
			lobjfuncs->fn_lo_write = foid;
	}

	PQclear(res);

	/*
	 * Finally check that we really got all large object interface functions
	 */
	if (lobjfuncs->fn_lo_open == 0)
	{
		printfPQExpBuffer(&conn->errorMessage,
				libpq_gettext("cannot determine OID of function lo_open\n"));
		free(lobjfuncs);
		return -1;
	}
	if (lobjfuncs->fn_lo_close == 0)
	{
		printfPQExpBuffer(&conn->errorMessage,
			   libpq_gettext("cannot determine OID of function lo_close\n"));
		free(lobjfuncs);
		return -1;
	}
	if (lobjfuncs->fn_lo_creat == 0)
	{
		printfPQExpBuffer(&conn->errorMessage,
			   libpq_gettext("cannot determine OID of function lo_creat\n"));
		free(lobjfuncs);
		return -1;
	}
	if (lobjfuncs->fn_lo_unlink == 0)
	{
		printfPQExpBuffer(&conn->errorMessage,
			  libpq_gettext("cannot determine OID of function lo_unlink\n"));
		free(lobjfuncs);
		return -1;
	}
	if (lobjfuncs->fn_lo_lseek == 0)
	{
		printfPQExpBuffer(&conn->errorMessage,
			   libpq_gettext("cannot determine OID of function lo_lseek\n"));
		free(lobjfuncs);
		return -1;
	}
	if (lobjfuncs->fn_lo_tell == 0)
	{
		printfPQExpBuffer(&conn->errorMessage,
				libpq_gettext("cannot determine OID of function lo_tell\n"));
		free(lobjfuncs);
		return -1;
	}
	if (lobjfuncs->fn_lo_read == 0)
	{
		printfPQExpBuffer(&conn->errorMessage,
				 libpq_gettext("cannot determine OID of function loread\n"));
		free(lobjfuncs);
		return -1;
	}
	if (lobjfuncs->fn_lo_write == 0)
	{
		printfPQExpBuffer(&conn->errorMessage,
				libpq_gettext("cannot determine OID of function lowrite\n"));
		free(lobjfuncs);
		return -1;
	}

	/*
	 * Put the structure into the connection control
	 */
	conn->lobjfuncs = lobjfuncs;
	return 0;
}
Esempio n. 3
0
/*
 * new_9_0_populate_pg_largeobject_metadata()
 *	new >= 9.0, old <= 8.4
 *	9.0 has a new pg_largeobject permission table
 */
void
new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for large objects");

	snprintf(output_path, sizeof(output_path), "pg_largeobject.sql");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		int			i_count;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* find if there are any large objects */
		res = executeQueryOrDie(conn,
								"SELECT count(*) "
								"FROM	pg_catalog.pg_largeobject ");

		i_count = PQfnumber(res, "count");
		if (atoi(PQgetvalue(res, 0, i_count)) != 0)
		{
			found = true;
			if (!check_mode)
			{
				PQExpBufferData connectbuf;

				if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
					pg_fatal("could not open file \"%s\": %s\n", output_path,
							 strerror(errno));

				initPQExpBuffer(&connectbuf);
				appendPsqlMetaConnect(&connectbuf, active_db->db_name);
				fputs(connectbuf.data, script);
				termPQExpBuffer(&connectbuf);

				fprintf(script,
						"SELECT pg_catalog.lo_create(t.loid)\n"
						"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
			}
		}

		PQclear(res);
		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		report_status(PG_WARNING, "warning");
		if (check_mode)
			pg_log(PG_WARNING, "\n"
				   "Your installation contains large objects.  The new database has an\n"
				   "additional large object permission table.  After upgrading, you will be\n"
				   "given a command to populate the pg_largeobject_metadata table with\n"
				   "default permissions.\n\n");
		else
			pg_log(PG_WARNING, "\n"
				   "Your installation contains large objects.  The new database has an\n"
				   "additional large object permission table, so default permissions must be\n"
				   "defined for all large objects.  The file\n"
				   "    %s\n"
				   "when executed by psql by the database superuser will set the default\n"
				   "permissions.\n\n",
				   output_path);
	}
	else
		check_ok();
}
Esempio n. 4
0
/*
 * Start the log streaming
 */
static void
StreamLog(void)
{
    PGresult   *res;
    uint32		timeline;
    XLogRecPtr	startpos;
    int			minServerMajor,
                maxServerMajor;
    int			serverMajor;

    /*
     * Connect in replication mode to the server
     */
    conn = GetConnection();
    if (!conn)
        /* Error message already written in GetConnection() */
        return;

    /*
     * Check server version. IDENTIFY_SYSTEM didn't return the current xlog
     * position before 9.1, so we can't work with servers older than 9.1. And
     * we don't support servers newer than the client.
     */
    minServerMajor = 901;
    maxServerMajor = PG_VERSION_NUM / 100;
    serverMajor = PQserverVersion(conn) / 100;
    if (serverMajor < minServerMajor || serverMajor > maxServerMajor)
    {
        fprintf(stderr, _("%s: unsupported server version %s\n"),
                progname, PQparameterStatus(conn, "server_version"));
        disconnect_and_exit(1);
    }

    /*
     * Run IDENTIFY_SYSTEM so we can get the timeline and current xlog
     * position.
     */
    res = PQexec(conn, "IDENTIFY_SYSTEM");
    if (PQresultStatus(res) != PGRES_TUPLES_OK)
    {
        fprintf(stderr, _("%s: could not send replication command \"%s\": %s"),
                progname, "IDENTIFY_SYSTEM", PQerrorMessage(conn));

        disconnect_and_exit(1);
    }
    if (PQntuples(res) != 1 || PQnfields(res) != 3)
    {
        fprintf(stderr,
                _("%s: could not identify system: got %d rows and %d fields, expected %d rows and %d fields\n"),
                progname, PQntuples(res), PQnfields(res), 1, 3);

        disconnect_and_exit(1);
    }
    timeline = atoi(PQgetvalue(res, 0, 1));
    if (sscanf(PQgetvalue(res, 0, 2), "%X/%X", &startpos.xlogid, &startpos.xrecoff) != 2)
    {
        fprintf(stderr,
                _("%s: could not parse transaction log location \"%s\"\n"),
                progname, PQgetvalue(res, 0, 2));
        disconnect_and_exit(1);
    }
    PQclear(res);

    /*
     * Figure out where to start streaming.
     */
    startpos = FindStreamingStart(startpos, timeline);

    /*
     * Always start streaming at the beginning of a segment
     */
    startpos.xrecoff -= startpos.xrecoff % XLOG_SEG_SIZE;

    /*
     * Start the replication
     */
    if (verbose)
        fprintf(stderr,
                _("%s: starting log streaming at %X/%X (timeline %u)\n"),
                progname, startpos.xlogid, startpos.xrecoff, timeline);

    ReceiveXlogStream(conn, startpos, timeline, NULL, basedir,
                      stop_streaming, standby_message_timeout, false);

    PQfinish(conn);
}
Esempio n. 5
0
/*
 * Vacuum/analyze all connectable databases.
 *
 * In analyze-in-stages mode, we process all databases in one stage before
 * moving on to the next stage.  That ensure minimal stats are available
 * quickly everywhere before generating more detailed ones.
 */
static void
vacuum_all_databases(vacuumingOptions *vacopts,
					 bool analyze_in_stages,
					 const char *maintenance_db, const char *host,
					 const char *port, const char *username,
					 enum trivalue prompt_password,
					 int concurrentCons,
					 const char *progname, bool echo, bool quiet)
{
	PGconn	   *conn;
	PGresult   *result;
	PQExpBufferData connstr;
	int			stage;
	int			i;

	conn = connectMaintenanceDatabase(maintenance_db, host, port, username,
									  prompt_password, progname, echo);
	result = executeQuery(conn,
						  "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;",
						  progname, echo);
	PQfinish(conn);

	initPQExpBuffer(&connstr);
	if (analyze_in_stages)
	{
		/*
		 * When analyzing all databases in stages, we analyze them all in the
		 * fastest stage first, so that initial statistics become available
		 * for all of them as soon as possible.
		 *
		 * This means we establish several times as many connections, but
		 * that's a secondary consideration.
		 */
		for (stage = 0; stage < ANALYZE_NUM_STAGES; stage++)
		{
			for (i = 0; i < PQntuples(result); i++)
			{
				resetPQExpBuffer(&connstr);
				appendPQExpBuffer(&connstr, "dbname=");
				appendConnStrVal(&connstr, PQgetvalue(result, i, 0));

				vacuum_one_database(connstr.data, vacopts,
									stage,
									NULL,
									host, port, username, prompt_password,
									concurrentCons,
									progname, echo, quiet);
			}
		}
	}
	else
	{
		for (i = 0; i < PQntuples(result); i++)
		{
			resetPQExpBuffer(&connstr);
			appendPQExpBuffer(&connstr, "dbname=");
			appendConnStrVal(&connstr, PQgetvalue(result, i, 0));

			vacuum_one_database(connstr.data, vacopts,
								ANALYZE_NO_STAGE,
								NULL,
								host, port, username, prompt_password,
								concurrentCons,
								progname, echo, quiet);
		}
	}
	termPQExpBuffer(&connstr);

	PQclear(result);
}
Esempio n. 6
0
/*
 * get_loadable_libraries()
 *
 *	Fetch the names of all old libraries containing C-language functions.
 *	We will later check that they all exist in the new installation.
 */
void
get_loadable_libraries(void)
{
	PGresult  **ress;
	int			totaltups;
	int			dbnum;

	ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *));
	totaltups = 0;

	/* Fetch all library names, removing duplicates within each DB */
	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
	{
		DbInfo	   *active_db = &old_cluster.dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(&old_cluster, active_db->db_name);

		/*
		 *	Fetch all libraries referenced in this DB.  We can't exclude
		 *	the "pg_catalog" schema because, while such functions are not
		 *	explicitly dumped by pg_dump, they do reference implicit objects
		 *	that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
		 */
		ress[dbnum] = executeQueryOrDie(conn,
										"SELECT DISTINCT probin "
										"FROM	pg_catalog.pg_proc "
										"WHERE	prolang = 13 /* C */ AND "
										"probin IS NOT NULL AND "
										"oid >= %u;",
										FirstNormalObjectId);
		totaltups += PQntuples(ress[dbnum]);

		PQfinish(conn);
	}

	totaltups++;	/* reserve for pg_upgrade_support */

	/* Allocate what's certainly enough space */
	os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *));

	/*
	 * Now remove duplicates across DBs.  This is pretty inefficient code, but
	 * there probably aren't enough entries to matter.
	 */
	totaltups = 0;
	os_info.libraries[totaltups++] = pg_strdup(PG_UPGRADE_SUPPORT);

	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
	{
		PGresult   *res = ress[dbnum];
		int			ntups;
		int			rowno;

		ntups = PQntuples(res);
		for (rowno = 0; rowno < ntups; rowno++)
		{
			char	   *lib = PQgetvalue(res, rowno, 0);
			bool		dup = false;
			int			n;

			for (n = 0; n < totaltups; n++)
			{
				if (strcmp(lib, os_info.libraries[n]) == 0)
				{
					dup = true;
					break;
				}
			}
			if (!dup)
				os_info.libraries[totaltups++] = pg_strdup(lib);
		}

		PQclear(res);
	}

	os_info.num_libraries = totaltups;

	pg_free(ress);
}
Esempio n. 7
0
/*
 * get_rel_infos()
 *
 * gets the relinfos for all the user tables and indexes of the database
 * referred to by "dbinfo".
 *
 * Note: the resulting RelInfo array is assumed to be sorted by OID.
 * This allows later processing to match up old and new databases efficiently.
 */
static void
get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
{
	PGconn	   *conn = connectToServer(cluster,
									   dbinfo->db_name);
	PGresult   *res;
	RelInfo    *relinfos;
	int			ntups;
	int			relnum;
	int			num_rels = 0;
	char	   *nspname = NULL;
	char	   *relname = NULL;
	char	   *tablespace = NULL;
	int			i_spclocation,
				i_nspname,
				i_relname,
				i_reloid,
				i_indtable,
				i_toastheap,
				i_relfilenode,
				i_reltablespace;
	char		query[QUERY_ALLOC];
	char	   *last_namespace = NULL,
			   *last_tablespace = NULL;

	query[0] = '\0';			/* initialize query string to empty */

	/*
	 * Create a CTE that collects OIDs of regular user tables, including
	 * matviews and sequences, but excluding toast tables and indexes.  We
	 * assume that relations with OIDs >= FirstNormalObjectId belong to the
	 * user.  (That's probably redundant with the namespace-name exclusions,
	 * but let's be safe.)
	 *
	 * pg_largeobject contains user data that does not appear in pg_dump
	 * output, so we have to copy that system table.  It's easiest to do that
	 * by treating it as a user table.
	 */
	snprintf(query + strlen(query), sizeof(query) - strlen(query),
			 "WITH regular_heap (reloid, indtable, toastheap) AS ( "
			 "  SELECT c.oid, 0::oid, 0::oid "
			 "  FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
			 "         ON c.relnamespace = n.oid "
			 "  WHERE relkind IN (" CppAsString2(RELKIND_RELATION) ", "
			 CppAsString2(RELKIND_MATVIEW) ") AND "
	/* exclude possible orphaned temp tables */
			 "    ((n.nspname !~ '^pg_temp_' AND "
			 "      n.nspname !~ '^pg_toast_temp_' AND "
			 "      n.nspname NOT IN ('pg_catalog', 'information_schema', "
			 "                        'binary_upgrade', 'pg_toast') AND "
			 "      c.oid >= %u::pg_catalog.oid) OR "
			 "     (n.nspname = 'pg_catalog' AND "
			 "      relname IN ('pg_largeobject') ))), ",
			 FirstNormalObjectId);

	/*
	 * Add a CTE that collects OIDs of toast tables belonging to the tables
	 * selected by the regular_heap CTE.  (We have to do this separately
	 * because the namespace-name rules above don't work for toast tables.)
	 */
	snprintf(query + strlen(query), sizeof(query) - strlen(query),
			 "  toast_heap (reloid, indtable, toastheap) AS ( "
			 "  SELECT c.reltoastrelid, 0::oid, c.oid "
			 "  FROM regular_heap JOIN pg_catalog.pg_class c "
			 "      ON regular_heap.reloid = c.oid "
			 "  WHERE c.reltoastrelid != 0), ");

	/*
	 * Add a CTE that collects OIDs of all valid indexes on the previously
	 * selected tables.  We can ignore invalid indexes since pg_dump does.
	 * Testing indisready is necessary in 9.2, and harmless in earlier/later
	 * versions.
	 */
	snprintf(query + strlen(query), sizeof(query) - strlen(query),
			 "  all_index (reloid, indtable, toastheap) AS ( "
			 "  SELECT indexrelid, indrelid, 0::oid "
			 "  FROM pg_catalog.pg_index "
			 "  WHERE indisvalid AND indisready "
			 "    AND indrelid IN "
			 "        (SELECT reloid FROM regular_heap "
			 "         UNION ALL "
			 "         SELECT reloid FROM toast_heap)) ");

	/*
	 * And now we can write the query that retrieves the data we want for each
	 * heap and index relation.  Make sure result is sorted by OID.
	 */
	snprintf(query + strlen(query), sizeof(query) - strlen(query),
			 "SELECT all_rels.*, n.nspname, c.relname, "
			 "  c.relfilenode, c.reltablespace, %s "
			 "FROM (SELECT * FROM regular_heap "
			 "      UNION ALL "
			 "      SELECT * FROM toast_heap "
			 "      UNION ALL "
			 "      SELECT * FROM all_index) all_rels "
			 "  JOIN pg_catalog.pg_class c "
			 "      ON all_rels.reloid = c.oid "
			 "  JOIN pg_catalog.pg_namespace n "
			 "     ON c.relnamespace = n.oid "
			 "  LEFT OUTER JOIN pg_catalog.pg_tablespace t "
			 "     ON c.reltablespace = t.oid "
			 "ORDER BY 1;",
	/* 9.2 removed the pg_tablespace.spclocation column */
			 (GET_MAJOR_VERSION(cluster->major_version) >= 902) ?
			 "pg_catalog.pg_tablespace_location(t.oid) AS spclocation" :
			 "t.spclocation");

	res = executeQueryOrDie(conn, "%s", query);

	ntups = PQntuples(res);

	relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups);

	i_reloid = PQfnumber(res, "reloid");
	i_indtable = PQfnumber(res, "indtable");
	i_toastheap = PQfnumber(res, "toastheap");
	i_nspname = PQfnumber(res, "nspname");
	i_relname = PQfnumber(res, "relname");
	i_relfilenode = PQfnumber(res, "relfilenode");
	i_reltablespace = PQfnumber(res, "reltablespace");
	i_spclocation = PQfnumber(res, "spclocation");

	for (relnum = 0; relnum < ntups; relnum++)
	{
		RelInfo    *curr = &relinfos[num_rels++];

		curr->reloid = atooid(PQgetvalue(res, relnum, i_reloid));
		curr->indtable = atooid(PQgetvalue(res, relnum, i_indtable));
		curr->toastheap = atooid(PQgetvalue(res, relnum, i_toastheap));

		nspname = PQgetvalue(res, relnum, i_nspname);
		curr->nsp_alloc = false;

		/*
		 * Many of the namespace and tablespace strings are identical, so we
		 * try to reuse the allocated string pointers where possible to reduce
		 * memory consumption.
		 */
		/* Can we reuse the previous string allocation? */
		if (last_namespace && strcmp(nspname, last_namespace) == 0)
			curr->nspname = last_namespace;
		else
		{
			last_namespace = curr->nspname = pg_strdup(nspname);
			curr->nsp_alloc = true;
		}

		relname = PQgetvalue(res, relnum, i_relname);
		curr->relname = pg_strdup(relname);

		curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));
		curr->tblsp_alloc = false;

		/* Is the tablespace oid non-default? */
		if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0)
		{
			/*
			 * The tablespace location might be "", meaning the cluster
			 * default location, i.e. pg_default or pg_global.
			 */
			tablespace = PQgetvalue(res, relnum, i_spclocation);

			/* Can we reuse the previous string allocation? */
			if (last_tablespace && strcmp(tablespace, last_tablespace) == 0)
				curr->tablespace = last_tablespace;
			else
			{
				last_tablespace = curr->tablespace = pg_strdup(tablespace);
				curr->tblsp_alloc = true;
			}
		}
		else
			/* A zero reltablespace oid indicates the database tablespace. */
			curr->tablespace = dbinfo->db_tablespace;
	}
	PQclear(res);

	PQfinish(conn);

	dbinfo->rel_arr.rels = relinfos;
	dbinfo->rel_arr.nrels = num_rels;
}
Esempio n. 8
0
int create_store_func(PGconn *__con,char *place_num,char *store_name,char *password,char *__sendBuf){
  char sql[BUFSIZE];
  char sql0[BUFSIZE];
  char sql1[BUFSIZE];
  char sql2[BUFSIZE];
  PGresult *res;  //PGresultオブジェクト
  PGresult *res0;  //PGresultオブジェクト
  int resultRows;
  int max;

  int max_row;
  int store_num;

  /* 新店舗追加SQLを作成 */
  sprintf(sql, "INSERT INTO store VALUES(%s, %s, '%s', '%s')","nextval ('item_code_seq')", place_num, store_name, password);
  /* SQLコマンド実行 */
  res = PQexec(__con, sql);
  /* SQLコマンド実行結果状態を確認 */
  if( PQresultStatus(res) != PGRES_COMMAND_OK){
	printf("%s", PQresultErrorMessage(res));
	sprintf(__sendBuf, "%s %s%s.\n", "ER_STAT", "E_CODE", "\n");
	return -1;
  }


  sprintf(sql0, "select *  from store");
  /* SQLコマンド実行 */
  res0 = PQexec(__con, sql0);
  /* SQLコマンド実行結果状態を確認 */
  if( PQresultStatus(res0) != PGRES_TUPLES_OK){
	printf("%s", PQresultErrorMessage(res0));
	sprintf(__sendBuf, "%s %s%s.\n", "ER_STAT2", "E_CODE", "\n");
	return -1;
  }


  /*CREATE TABLE item_management*/
  max_row = PQntuples(res0);

  printf("t:%d\n", max_row); //test
  store_num   = atoi(PQgetvalue(res0, max_row-1, 0));


  /* 新店舗追加SQLを作成 */
  sprintf(sql0, "CREATE TABLE item_management_%d (\
                  purchase_code	  serial,\
                  item_code	      serial,\
                  available_period   bigint,\
                  inventory_num      integer,\
                  sale_unit_price    integer,\
                  profit	      integer,\
                  procurement_period bigint,\
                  order_system         bool,\
                  purchase_day       bigint,\
                  order_confirm      bool,\
                  order_interval 	  integer,\
                  safe_stock		  integer,\
                  PRIMARY KEY(purchase_code))"
		  , store_num);

  /* SQLコマンド実行 */
  res0 = PQexec(__con, sql0);

  sprintf(__sendBuf, "%s%s.\n", "テーブルを登録しました", ENTER);



  /* 新店舗追加SQLを作成 */
  sprintf(sql1,  "CREATE TABLE tax_%d(\
   tax_rate        real,\
   mutual_tax_rate real)"
		  ,store_num);


  /* SQLコマンド実行 */
  res0 = PQexec(__con, sql1);

  sprintf(__sendBuf, "%s%s.\n", "テーブルを登録しました", ENTER);


  /* 新店舗追加SQLを作成 */
  sprintf(sql2, "CREATE TABLE mod_info_%d (\
                  slip_num	             serial,\
                  before_tax_rate	     real,\
                  before_multiple_tax_rate  real,\
                  before_point_rate         real,\
                  PRIMARY KEY(slip_num))"
		  , store_num);

  /* SQLコマンド実行 */
  res0 = PQexec(__con, sql2);

  sprintf(__sendBuf, "%s%s.\n", "テーブルを登録しました", ENTER);



  /* PGresultに割当られた記憶領域を開放 */
  PQclear(res);
  PQclear(res0);
  return 0;
}//END
Esempio n. 9
0
bool v_find_nodes(const DS::Vault::Node& nodeTemplate, std::vector<uint32_t>& nodes)
{
    if (nodeTemplate.isNull())
        return false;

    /* This should be plenty to store everything we need without a bunch
     * of dynamic reallocations
     */
    PostgresStrings<31> parms;
    char fieldbuf[1024];

    size_t parmcount = 0;
    char* fieldp = fieldbuf;

    #define SET_FIELD(name, value) \
        { \
            parms.set(parmcount++, value); \
            fieldp += sprintf(fieldp, "\"" #name "\"=$%Zu AND ", parmcount); \
        }
    #define SET_FIELD_I(name, value) \
        { \
            parms.set(parmcount++, value); \
            fieldp += sprintf(fieldp, "LOWER(\"" #name "\")=LOWER($%Zu) AND ", parmcount); \
        }
    if (nodeTemplate.has_CreateTime())
        SET_FIELD(CreateTime, nodeTemplate.m_CreateTime);
    if (nodeTemplate.has_ModifyTime())
        SET_FIELD(ModifyTime, nodeTemplate.m_ModifyTime);
    if (nodeTemplate.has_CreateAgeName())
        SET_FIELD(CreateAgeName, nodeTemplate.m_CreateAgeName);
    if (nodeTemplate.has_CreateAgeUuid())
        SET_FIELD(CreateAgeUuid, nodeTemplate.m_CreateAgeUuid.toString());
    if (nodeTemplate.has_CreatorUuid())
        SET_FIELD(CreatorUuid, nodeTemplate.m_CreatorUuid.toString());
    if (nodeTemplate.has_CreatorIdx())
        SET_FIELD(CreatorIdx, nodeTemplate.m_CreatorIdx);
    if (nodeTemplate.has_NodeType())
        SET_FIELD(NodeType, nodeTemplate.m_NodeType);
    if (nodeTemplate.has_Int32_1())
        SET_FIELD(Int32_1, nodeTemplate.m_Int32_1);
    if (nodeTemplate.has_Int32_2())
        SET_FIELD(Int32_2, nodeTemplate.m_Int32_2);
    if (nodeTemplate.has_Int32_3())
        SET_FIELD(Int32_3, nodeTemplate.m_Int32_3);
    if (nodeTemplate.has_Int32_4())
        SET_FIELD(Int32_4, nodeTemplate.m_Int32_4);
    if (nodeTemplate.has_Uint32_1())
        SET_FIELD(Uint32_1, nodeTemplate.m_Uint32_1);
    if (nodeTemplate.has_Uint32_2())
        SET_FIELD(Uint32_2, nodeTemplate.m_Uint32_2);
    if (nodeTemplate.has_Uint32_3())
        SET_FIELD(Uint32_3, nodeTemplate.m_Uint32_3);
    if (nodeTemplate.has_Uint32_4())
        SET_FIELD(Uint32_4, nodeTemplate.m_Uint32_4);
    if (nodeTemplate.has_Uuid_1())
        SET_FIELD(Uuid_1, nodeTemplate.m_Uuid_1.toString());
    if (nodeTemplate.has_Uuid_2())
        SET_FIELD(Uuid_2, nodeTemplate.m_Uuid_2.toString());
    if (nodeTemplate.has_Uuid_3())
        SET_FIELD(Uuid_3, nodeTemplate.m_Uuid_3.toString());
    if (nodeTemplate.has_Uuid_4())
        SET_FIELD(Uuid_4, nodeTemplate.m_Uuid_4.toString());
    if (nodeTemplate.has_String64_1())
        SET_FIELD(String64_1, nodeTemplate.m_String64_1);
    if (nodeTemplate.has_String64_2())
        SET_FIELD(String64_2, nodeTemplate.m_String64_2);
    if (nodeTemplate.has_String64_3())
        SET_FIELD(String64_3, nodeTemplate.m_String64_3);
    if (nodeTemplate.has_String64_4())
        SET_FIELD(String64_4, nodeTemplate.m_String64_4);
    if (nodeTemplate.has_String64_5())
        SET_FIELD(String64_5, nodeTemplate.m_String64_5);
    if (nodeTemplate.has_String64_6())
        SET_FIELD(String64_6, nodeTemplate.m_String64_6);
    if (nodeTemplate.has_IString64_1())
        SET_FIELD_I(IString64_1, nodeTemplate.m_IString64_1);
    if (nodeTemplate.has_IString64_2())
        SET_FIELD_I(IString64_2, nodeTemplate.m_IString64_2);
    if (nodeTemplate.has_Text_1())
        SET_FIELD(Text_1, nodeTemplate.m_Text_1);
    if (nodeTemplate.has_Text_2())
        SET_FIELD(Text_2, nodeTemplate.m_Text_2);
    if (nodeTemplate.has_Blob_1())
        SET_FIELD(Blob_1, DS::Base64Encode(nodeTemplate.m_Blob_1.buffer(), nodeTemplate.m_Blob_1.size()));
    if (nodeTemplate.has_Blob_2())
        SET_FIELD(Blob_2, DS::Base64Encode(nodeTemplate.m_Blob_2.buffer(), nodeTemplate.m_Blob_2.size()));
    #undef SET_FIELD
    #undef SET_FIELD_I

    DS_DASSERT(parmcount > 0);
    DS_DASSERT(fieldp - fieldbuf < 1024);
    *(fieldp - 5) = 0;  // Get rid of the last ' AND '
    DS::String queryStr = "SELECT idx FROM vault.\"Nodes\"\n    WHERE ";
    queryStr += fieldbuf;

    check_postgres();
    PGresult* result = PQexecParams(s_postgres, queryStr.c_str(),
                                    parmcount, 0, parms.m_values, 0, 0, 0);
    if (PQresultStatus(result) != PGRES_TUPLES_OK) {
        fprintf(stderr, "%s:%d:\n    Postgres SELECT error: %s\n",
                __FILE__, __LINE__, PQerrorMessage(s_postgres));
        PQclear(result);
        return false;
    }

    nodes.resize(PQntuples(result));
    for (size_t i=0; i<nodes.size(); ++i)
        nodes[i] = strtoul(PQgetvalue(result, i, 0), 0, 10);
    PQclear(result);
    return true;
}
Esempio n. 10
0
bool PgTblFmncQPrsList::loadRec(
			PGresult* res
			, FmncQPrsList** rec
		) {
	char* ptr;

	FmncQPrsList* _rec = NULL;
	bool retval = false;

	if (PQntuples(res) == 1) {
		_rec = new FmncQPrsList();

		int fnum[] = {
			PQfnumber(res, "qref"),
			PQfnumber(res, "jref"),
			PQfnumber(res, "jnum"),
			PQfnumber(res, "ref"),
			PQfnumber(res, "title"),
			PQfnumber(res, "firstname"),
			PQfnumber(res, "lastname"),
			PQfnumber(res, "grp"),
			PQfnumber(res, "own"),
			PQfnumber(res, "reffmncmorg"),
			PQfnumber(res, "reffmncmaddress"),
			PQfnumber(res, "ixvsex"),
			PQfnumber(res, "tel"),
			PQfnumber(res, "eml")
		};

		ptr = PQgetvalue(res, 0, fnum[0]); _rec->qref = atoll(ptr);
		ptr = PQgetvalue(res, 0, fnum[1]); _rec->jref = atoll(ptr);
		ptr = PQgetvalue(res, 0, fnum[2]); _rec->jnum = atol(ptr);
		ptr = PQgetvalue(res, 0, fnum[3]); _rec->ref = atoll(ptr);
		ptr = PQgetvalue(res, 0, fnum[4]); _rec->Title.assign(ptr, PQgetlength(res, 0, fnum[4]));
		ptr = PQgetvalue(res, 0, fnum[5]); _rec->Firstname.assign(ptr, PQgetlength(res, 0, fnum[5]));
		ptr = PQgetvalue(res, 0, fnum[6]); _rec->Lastname.assign(ptr, PQgetlength(res, 0, fnum[6]));
		ptr = PQgetvalue(res, 0, fnum[7]); _rec->grp = atoll(ptr);
		ptr = PQgetvalue(res, 0, fnum[8]); _rec->own = atoll(ptr);
		ptr = PQgetvalue(res, 0, fnum[9]); _rec->refFmncMOrg = atoll(ptr);
		ptr = PQgetvalue(res, 0, fnum[10]); _rec->refFmncMAddress = atoll(ptr);
		ptr = PQgetvalue(res, 0, fnum[11]); _rec->ixVSex = atol(ptr);
		ptr = PQgetvalue(res, 0, fnum[12]); _rec->Tel.assign(ptr, PQgetlength(res, 0, fnum[12]));
		ptr = PQgetvalue(res, 0, fnum[13]); _rec->Eml.assign(ptr, PQgetlength(res, 0, fnum[13]));

		retval = true;
	};

	PQclear(res);

	*rec = _rec;
	return retval;
};
Esempio n. 11
0
ubigint PgTblFmncQPrsList::loadRst(
			PGresult* res
			, const bool append
			, ListFmncQPrsList& rst
		) {
	ubigint numrow; ubigint numread = 0; char* ptr;
	FmncQPrsList* rec;

	if (!append) rst.clear();

	numrow = PQntuples(res);

	if (numrow > 0) {
		rst.nodes.reserve(rst.nodes.size() + numrow);

		int fnum[] = {
			PQfnumber(res, "qref"),
			PQfnumber(res, "jref"),
			PQfnumber(res, "jnum"),
			PQfnumber(res, "ref"),
			PQfnumber(res, "title"),
			PQfnumber(res, "firstname"),
			PQfnumber(res, "lastname"),
			PQfnumber(res, "grp"),
			PQfnumber(res, "own"),
			PQfnumber(res, "reffmncmorg"),
			PQfnumber(res, "reffmncmaddress"),
			PQfnumber(res, "ixvsex"),
			PQfnumber(res, "tel"),
			PQfnumber(res, "eml")
		};

		while (numread < numrow) {
			rec = new FmncQPrsList();

			ptr = PQgetvalue(res, numread, fnum[0]); rec->qref = atoll(ptr);
			ptr = PQgetvalue(res, numread, fnum[1]); rec->jref = atoll(ptr);
			ptr = PQgetvalue(res, numread, fnum[2]); rec->jnum = atol(ptr);
			ptr = PQgetvalue(res, numread, fnum[3]); rec->ref = atoll(ptr);
			ptr = PQgetvalue(res, numread, fnum[4]); rec->Title.assign(ptr, PQgetlength(res, numread, fnum[4]));
			ptr = PQgetvalue(res, numread, fnum[5]); rec->Firstname.assign(ptr, PQgetlength(res, numread, fnum[5]));
			ptr = PQgetvalue(res, numread, fnum[6]); rec->Lastname.assign(ptr, PQgetlength(res, numread, fnum[6]));
			ptr = PQgetvalue(res, numread, fnum[7]); rec->grp = atoll(ptr);
			ptr = PQgetvalue(res, numread, fnum[8]); rec->own = atoll(ptr);
			ptr = PQgetvalue(res, numread, fnum[9]); rec->refFmncMOrg = atoll(ptr);
			ptr = PQgetvalue(res, numread, fnum[10]); rec->refFmncMAddress = atoll(ptr);
			ptr = PQgetvalue(res, numread, fnum[11]); rec->ixVSex = atol(ptr);
			ptr = PQgetvalue(res, numread, fnum[12]); rec->Tel.assign(ptr, PQgetlength(res, numread, fnum[12]));
			ptr = PQgetvalue(res, numread, fnum[13]); rec->Eml.assign(ptr, PQgetlength(res, numread, fnum[13]));

			rst.nodes.push_back(rec);

			numread++;
		};
	};

	PQclear(res);

	return numread;
};
Esempio n. 12
0
/*
 * Output the pivoted resultset with the printTable* functions.  Return true
 * if successful, false otherwise.
 */
static bool
printCrosstab(const PGresult *results,
			  int num_columns, pivot_field *piv_columns, int field_for_columns,
			  int num_rows, pivot_field *piv_rows, int field_for_rows,
			  int field_for_data)
{
	printQueryOpt popt = pset.popt;
	printTableContent cont;
	int			i,
				rn;
	char		col_align;
	int		   *horiz_map;
	bool		retval = false;

	printTableInit(&cont, &popt.topt, popt.title, num_columns + 1, num_rows);

	/* Step 1: set target column names (horizontal header) */

	/* The name of the first column is kept unchanged by the pivoting */
	printTableAddHeader(&cont,
						PQfname(results, field_for_rows),
						false,
						column_type_alignment(PQftype(results,
													  field_for_rows)));

	/*
	 * To iterate over piv_columns[] by piv_columns[].rank, create a reverse
	 * map associating each piv_columns[].rank to its index in piv_columns.
	 * This avoids an O(N^2) loop later.
	 */
	horiz_map = (int *) pg_malloc(sizeof(int) * num_columns);
	for (i = 0; i < num_columns; i++)
		horiz_map[piv_columns[i].rank] = i;

	/*
	 * The display alignment depends on its PQftype().
	 */
	col_align = column_type_alignment(PQftype(results, field_for_data));

	for (i = 0; i < num_columns; i++)
	{
		char	   *colname;

		colname = piv_columns[horiz_map[i]].name ?
			piv_columns[horiz_map[i]].name :
			(popt.nullPrint ? popt.nullPrint : "");

		printTableAddHeader(&cont, colname, false, col_align);
	}
	pg_free(horiz_map);

	/* Step 2: set row names in the first output column (vertical header) */
	for (i = 0; i < num_rows; i++)
	{
		int			k = piv_rows[i].rank;

		cont.cells[k * (num_columns + 1)] = piv_rows[i].name ?
			piv_rows[i].name :
			(popt.nullPrint ? popt.nullPrint : "");
	}
	cont.cellsadded = num_rows * (num_columns + 1);

	/*
	 * Step 3: fill in the content cells.
	 */
	for (rn = 0; rn < PQntuples(results); rn++)
	{
		int			row_number;
		int			col_number;
		pivot_field *p;
		pivot_field elt;

		/* Find target row */
		if (!PQgetisnull(results, rn, field_for_rows))
			elt.name = PQgetvalue(results, rn, field_for_rows);
		else
			elt.name = NULL;
		p = (pivot_field *) bsearch(&elt,
									piv_rows,
									num_rows,
									sizeof(pivot_field),
									pivotFieldCompare);
		Assert(p != NULL);
		row_number = p->rank;

		/* Find target column */
		if (!PQgetisnull(results, rn, field_for_columns))
			elt.name = PQgetvalue(results, rn, field_for_columns);
		else
			elt.name = NULL;

		p = (pivot_field *) bsearch(&elt,
									piv_columns,
									num_columns,
									sizeof(pivot_field),
									pivotFieldCompare);
		Assert(p != NULL);
		col_number = p->rank;

		/* Place value into cell */
		if (col_number >= 0 && row_number >= 0)
		{
			int			idx;

			/* index into the cont.cells array */
			idx = 1 + col_number + row_number * (num_columns + 1);

			/*
			 * If the cell already contains a value, raise an error.
			 */
			if (cont.cells[idx] != NULL)
			{
				psql_error(_("data cell already contains a value: (row: \"%s\", column: \"%s\")\n"),
							 piv_rows[row_number].name ? piv_rows[row_number].name :
							 popt.nullPrint ? popt.nullPrint : "(null)",
							 piv_columns[col_number].name ? piv_columns[col_number].name :
							 popt.nullPrint ? popt.nullPrint : "(null)");
				goto error;
			}

			cont.cells[idx] = !PQgetisnull(results, rn, field_for_data) ?
				PQgetvalue(results, rn, field_for_data) :
				(popt.nullPrint ? popt.nullPrint : "");
		}
	}

	/*
	 * The non-initialized cells must be set to an empty string for the print
	 * functions
	 */
	for (i = 0; i < cont.cellsadded; i++)
	{
		if (cont.cells[i] == NULL)
			cont.cells[i] = "";
	}

	printTable(&cont, pset.queryFout, false, pset.logfile);
	retval = true;

error:
	printTableCleanup(&cont);

	return retval;
}
Esempio n. 13
0
/*
 * Main entry point to this module.
 *
 * Process the data from *res according the display options in pset (global),
 * to generate the horizontal and vertical headers contents,
 * then call printCrosstab() for the actual output.
 */
bool
PrintResultsInCrosstab(const PGresult *res)
{
	char	   *opt_field_for_rows = pset.ctv_col_V;
	char	   *opt_field_for_columns = pset.ctv_col_H;
	char	   *opt_field_for_data = pset.ctv_col_D;
	int			rn;
	avl_tree	piv_columns;
	avl_tree	piv_rows;
	pivot_field *array_columns = NULL;
	pivot_field *array_rows = NULL;
	int			num_columns = 0;
	int			num_rows = 0;
	int		   *colsV = NULL,
			   *colsH = NULL,
			   *colsD = NULL;
	int			n;
	int			field_for_columns;
	int			sort_field_for_columns = -1;
	int			field_for_rows;
	int			field_for_data = -1;
	bool		retval = false;

	avlInit(&piv_rows);
	avlInit(&piv_columns);

	if (res == NULL)
	{
		psql_error(_("No result\n"));
		goto error_return;
	}

	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		psql_error(_("The query must return results to be shown in crosstab\n"));
		goto error_return;
	}

	if (opt_field_for_rows && !opt_field_for_columns)
	{
		psql_error(_("A second column must be specified for the horizontal header\n"));
		goto error_return;
	}

	if (PQnfields(res) <= 2)
	{
		psql_error(_("The query must return at least two columns to be shown in crosstab\n"));
		goto error_return;
	}

	/*
	 * Arguments processing for the vertical header (1st arg) displayed in the
	 * left-most column. Only a reference to a field is accepted (no sort
	 * column).
	 */

	if (opt_field_for_rows == NULL)
	{
		field_for_rows = 0;
	}
	else
	{
		n = parseColumnRefs(opt_field_for_rows, res, &colsV, 1, ':');
		if (n != 1)
			goto error_return;
		field_for_rows = colsV[0];
	}

	if (field_for_rows < 0)
		goto error_return;

	/*----------
	 * Arguments processing for the horizontal header (2nd arg)
	 * (pivoted column that gets displayed as the first row).
	 * Determine:
	 * - the field number for the horizontal header column
	 * - the field number of the associated sort column, if any
	 */

	if (opt_field_for_columns == NULL)
		field_for_columns = 1;
	else
	{
		n = parseColumnRefs(opt_field_for_columns, res, &colsH, 2, ':');
		if (n <= 0)
			goto error_return;
		if (n == 1)
			field_for_columns = colsH[0];
		else
		{
			field_for_columns = colsH[0];
			sort_field_for_columns = colsH[1];
		}

		if (field_for_columns < 0)
			goto error_return;
	}

	if (field_for_columns == field_for_rows)
	{
		psql_error(_("The same column cannot be used for both vertical and horizontal headers\n"));
		goto error_return;
	}

	/*
	 * Arguments processing for the data columns (3rd arg).  Determine the
	 * column to display in the grid.
	 */
	if (opt_field_for_data == NULL)
	{
		int		i;

		/*
		 * If the data column was not specified, we search for the one not
		 * used as either vertical or horizontal headers.  If the result has
		 * more than three columns, raise an error.
		 */
		if (PQnfields(res) > 3)
		{
			psql_error(_("Data column must be specified when the result set has more than three columns\n"));
			goto error_return;
		}

		for (i = 0; i < PQnfields(res); i++)
		{
			if (i != field_for_rows && i != field_for_columns)
			{
				field_for_data = i;
				break;
			}
		}
		Assert(field_for_data >= 0);
	}
	else
	{
		int		num_fields;

		/* If a field was given, find out what it is.  Only one is allowed. */
		num_fields = parseColumnRefs(opt_field_for_data, res, &colsD, 1, ',');
		if (num_fields < 1)
			goto error_return;
		field_for_data = colsD[0];
	}

	/*
	 * First part: accumulate the names that go into the vertical and
	 * horizontal headers, each into an AVL binary tree to build the set of
	 * DISTINCT values.
	 */

	for (rn = 0; rn < PQntuples(res); rn++)
	{
		char	   *val;
		char	   *val1;

		/* horizontal */
		val = PQgetisnull(res, rn, field_for_columns) ? NULL :
			PQgetvalue(res, rn, field_for_columns);
		val1 = NULL;

		if (sort_field_for_columns >= 0 &&
			!PQgetisnull(res, rn, sort_field_for_columns))
			val1 = PQgetvalue(res, rn, sort_field_for_columns);

		avlMergeValue(&piv_columns, val, val1);

		if (piv_columns.count > CROSSTABVIEW_MAX_COLUMNS)
		{
			psql_error(_("Maximum number of columns (%d) exceeded\n"),
					   CROSSTABVIEW_MAX_COLUMNS);
			goto error_return;
		}

		/* vertical */
		val = PQgetisnull(res, rn, field_for_rows) ? NULL :
			PQgetvalue(res, rn, field_for_rows);

		avlMergeValue(&piv_rows, val, NULL);
	}

	/*
	 * Second part: Generate sorted arrays from the AVL trees.
	 */

	num_columns = piv_columns.count;
	num_rows = piv_rows.count;

	array_columns = (pivot_field *)
		pg_malloc(sizeof(pivot_field) * num_columns);

	array_rows = (pivot_field *)
		pg_malloc(sizeof(pivot_field) * num_rows);

	avlCollectFields(&piv_columns, piv_columns.root, array_columns, 0);
	avlCollectFields(&piv_rows, piv_rows.root, array_rows, 0);

	/*
	 * Third part: optionally, process the ranking data for the horizontal
	 * header
	 */
	if (sort_field_for_columns >= 0)
		rankSort(num_columns, array_columns);

	/*
	 * Fourth part: print the crosstab'ed results.
	 */
	retval = printCrosstab(res,
						   num_columns, array_columns, field_for_columns,
						   num_rows, array_rows, field_for_rows,
						   field_for_data);

error_return:
	avlFree(&piv_columns, piv_columns.root);
	avlFree(&piv_rows, piv_rows.root);
	pg_free(array_columns);
	pg_free(array_rows);
	pg_free(colsV);
	pg_free(colsH);
	pg_free(colsD);

	return retval;
}
Esempio n. 14
0
static void
do_failover(void)
{
	PGresult *res1;
	PGresult *res2;
	char 	sqlquery[8192];

	int		total_nodes = 0;
	int		visible_nodes = 0;
	bool	find_best = false;

	int		i;
	int		r;

	int 	node;
	char	nodeConninfo[MAXLEN];

	unsigned int uxlogid;
	unsigned int uxrecoff;
	char last_wal_standby_applied[MAXLEN];

	PGconn	*nodeConn = NULL;

	/*
	 * will get info about until 50 nodes,
	 * which seems to be large enough for most scenarios
	 */
	nodeInfo nodes[50];
	nodeInfo best_candidate;

	/* first we get info about this node, and update shared memory */
	sprintf(sqlquery, "SELECT pg_last_xlog_replay_location()");
	res1 = PQexec(myLocalConn, sqlquery);
	if (PQresultStatus(res1) != PGRES_TUPLES_OK)
	{
		log_err(_("PQexec failed: %s.\nReport an invalid value to not be considered as new primary and exit.\n"), PQerrorMessage(myLocalConn));
		PQclear(res1);
		sprintf(last_wal_standby_applied, "'%X/%X'", 0, 0);
		update_shared_memory(last_wal_standby_applied);
		exit(ERR_DB_QUERY);
	}

	/* write last location in shared memory */
	update_shared_memory(PQgetvalue(res1, 0, 0));

	/*
	 * we sleep the monitor time + one second
	 * we bet it should be enough for other repmgrd to update their own data
	 */
	sleep(SLEEP_MONITOR + 1);

	/* get a list of standby nodes, including myself */
	sprintf(sqlquery, "SELECT id, conninfo "
	        "  FROM %s.repl_nodes "
	        " WHERE id IN (SELECT standby_node FROM %s.repl_status) "
	        "   AND cluster = '%s' "
	        " ORDER BY priority ",
	        repmgr_schema, repmgr_schema, local_options.cluster_name);

	res1 = PQexec(myLocalConn, sqlquery);
	if (PQresultStatus(res1) != PGRES_TUPLES_OK)
	{
		log_err(_("Can't get nodes info: %s\n"), PQerrorMessage(myLocalConn));
		PQclear(res1);
		PQfinish(myLocalConn);
		exit(ERR_DB_QUERY);
	}

	/* ask for the locations */
	for (i = 0; i < PQntuples(res1); i++)
	{
		node = atoi(PQgetvalue(res1, i, 0));
		/* Initialize on false so if we can't reach this node we know that later */
		nodes[i].is_ready = false;
		strncpy(nodeConninfo, PQgetvalue(res1, i, 1), MAXLEN);
		nodeConn = establishDBConnection(nodeConninfo, false);
		/* if we can't see the node just skip it */
		if (PQstatus(nodeConn) != CONNECTION_OK)
			continue;

		sqlquery_snprintf(sqlquery, "SELECT repmgr_get_last_standby_location()");
		res2 = PQexec(nodeConn, sqlquery);
		if (PQresultStatus(res2) != PGRES_TUPLES_OK)
		{
			log_info(_("Can't get node's last standby location: %s\n"), PQerrorMessage(nodeConn));
			log_info(_("Connection details: %s\n"), nodeConninfo);
			PQclear(res2);
			PQfinish(nodeConn);
			continue;
		}

		visible_nodes++;

		if (sscanf(PQgetvalue(res2, 0, 0), "%X/%X", &uxlogid, &uxrecoff) != 2)
			log_info(_("could not parse transaction log location \"%s\"\n"), PQgetvalue(res2, 0, 0));

		nodes[i].nodeId = node;
		nodes[i].xlog_location.xlogid = uxlogid;
		nodes[i].xlog_location.xrecoff = uxrecoff;
		nodes[i].is_ready = true;

		PQclear(res2);
		PQfinish(nodeConn);
	}
	PQclear(res1);
	/* Close the connection to this server */
	PQfinish(myLocalConn);

	/*
	 * total nodes that are registered, include master which is a node but was
	 * not counted because it's not a standby
	 */
	total_nodes = i + 1;

	/*
	 * am i on the group that should keep alive?
	 * if i see less than half of total_nodes then i should do nothing
	 */
	if (visible_nodes < (total_nodes / 2.0))
	{
		log_err(_("Can't reach most of the nodes.\n"
		          "Let the other standby servers decide which one will be the primary.\n"
		          "Manual action will be needed to readd this node to the cluster.\n"));
		exit(ERR_FAILOVER_FAIL);
	}

	/*
	 * determine which one is the best candidate to promote to primary
	 */
	for (i = 0; i < total_nodes - 1; i++)
	{
		if (!nodes[i].is_ready)
			continue;
		else if (!find_best)
		{
			/* start with the first ready node, and then move on to the next one */
			best_candidate.nodeId                = nodes[i].nodeId;
			best_candidate.xlog_location.xlogid  = nodes[i].xlog_location.xlogid;
			best_candidate.xlog_location.xrecoff = nodes[i].xlog_location.xrecoff;
			best_candidate.is_ready              = nodes[i].is_ready;
			find_best = true;
		}

		/* we use the macros provided by xlogdefs.h to compare XLogPtr */
		/*
		 * Nodes are retrieved ordered by priority, so if the current
		 * best candidate is lower or equal to the next node's wal location
		 * then assign next node as the new best candidate.
		 */
		if (XLByteLE(best_candidate.xlog_location, nodes[i].xlog_location))
		{
			best_candidate.nodeId                = nodes[i].nodeId;
			best_candidate.xlog_location.xlogid  = nodes[i].xlog_location.xlogid;
			best_candidate.xlog_location.xrecoff = nodes[i].xlog_location.xrecoff;
			best_candidate.is_ready              = nodes[i].is_ready;
		}
	}

	/* once we know who is the best candidate, promote it */
	if (find_best && (best_candidate.nodeId == local_options.node))
	{
		if (verbose)
			log_info(_("%s: This node is the best candidate to be the new primary, promoting...\n"),
			         progname);
		log_debug(_("promote command is: \"%s\"\n"), local_options.promote_command);
		r = system(local_options.promote_command);
		if (r != 0)
		{
			log_err(_("%s: promote command failed. You could check and try it manually.\n"), progname);
			exit(ERR_BAD_CONFIG);
		}
	}
	else if (find_best)
	{
		if (verbose)
			log_info(_("%s: Node %d is the best candidate to be the new primary, we should follow it...\n"),
			         progname, best_candidate.nodeId);
		log_debug(_("follow command is: \"%s\"\n"), local_options.follow_command);
		/*
		 * New Primary need some time to be promoted.
		 * The follow command should take care of that.
		 */
		r = system(local_options.follow_command);
		if (r != 0)
		{
			log_err(_("%s: follow command failed. You could check and try it manually.\n"), progname);
			exit(ERR_BAD_CONFIG);
		}
	}
	else
	{
		log_err(_("%s: Did not find candidates. You should check and try manually.\n"), progname);
		exit(ERR_FAILOVER_FAIL);
	}

	/* and reconnect to the local database */
	myLocalConn = establishDBConnection(local_options.conninfo, true);
}
Esempio n. 15
0
/*
 * Receive a tar format stream from the connection to the server, and unpack
 * the contents of it into a directory. Only files, directories and
 * symlinks are supported, no other kinds of special files.
 *
 * If the data is for the main data directory, it will be restored in the
 * specified directory. If it's for another tablespace, it will be restored
 * in the original directory, since relocation of tablespaces is not
 * supported.
 */
static void
ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
{
	char		current_path[MAXPGPATH];
	char		filename[MAXPGPATH];
	int			current_len_left;
	int			current_padding = 0;
	char	   *copybuf = NULL;
	FILE	   *file = NULL;

	if (PQgetisnull(res, rownum, 0))
		strcpy(current_path, basedir);
	else
		strcpy(current_path, PQgetvalue(res, rownum, 1));

	/*
	 * Get the COPY data
	 */
	res = PQgetResult(conn);
	if (PQresultStatus(res) != PGRES_COPY_OUT)
	{
		fprintf(stderr, _("%s: could not get COPY data stream: %s"),
				progname, PQerrorMessage(conn));
		disconnect_and_exit(1);
	}

	while (1)
	{
		int			r;

		if (copybuf != NULL)
		{
			PQfreemem(copybuf);
			copybuf = NULL;
		}

		r = PQgetCopyData(conn, &copybuf, 0);

		if (r == -1)
		{
			/*
			 * End of chunk
			 */
			if (file)
				fclose(file);

			break;
		}
		else if (r == -2)
		{
			fprintf(stderr, _("%s: could not read COPY data: %s"),
					progname, PQerrorMessage(conn));
			disconnect_and_exit(1);
		}

		if (file == NULL)
		{
			int			filemode;

			/*
			 * No current file, so this must be the header for a new file
			 */
			if (r != 512)
			{
				fprintf(stderr, _("%s: invalid tar block header size: %d\n"),
						progname, r);
				disconnect_and_exit(1);
			}
			totaldone += 512;

			if (sscanf(copybuf + 124, "%11o", &current_len_left) != 1)
			{
				fprintf(stderr, _("%s: could not parse file size\n"),
						progname);
				disconnect_and_exit(1);
			}

			/* Set permissions on the file */
			if (sscanf(&copybuf[100], "%07o ", &filemode) != 1)
			{
				fprintf(stderr, _("%s: could not parse file mode\n"),
						progname);
				disconnect_and_exit(1);
			}

			/*
			 * All files are padded up to 512 bytes
			 */
			current_padding =
				((current_len_left + 511) & ~511) - current_len_left;

			/*
			 * First part of header is zero terminated filename
			 */
			snprintf(filename, sizeof(filename), "%s/%s", current_path,
					 copybuf);
			if (filename[strlen(filename) - 1] == '/')
			{
				/*
				 * Ends in a slash means directory or symlink to directory
				 */
				if (copybuf[156] == '5')
				{
					/*
					 * Directory
					 */
					filename[strlen(filename) - 1] = '\0';		/* Remove trailing slash */
					if (mkdir(filename, S_IRWXU) != 0)
					{
						/*
						 * When streaming WAL, pg_xlog will have been created
						 * by the wal receiver process, so just ignore failure
						 * on that.
						 */
						if (!streamwal || strcmp(filename + strlen(filename) - 8, "/pg_xlog") != 0)
						{
							fprintf(stderr,
							_("%s: could not create directory \"%s\": %s\n"),
									progname, filename, strerror(errno));
							disconnect_and_exit(1);
						}
					}
#ifndef WIN32
					if (chmod(filename, (mode_t) filemode))
						fprintf(stderr,
								_("%s: could not set permissions on directory \"%s\": %s\n"),
								progname, filename, strerror(errno));
#endif
				}
				else if (copybuf[156] == '2')
				{
					/*
					 * Symbolic link
					 */
					filename[strlen(filename) - 1] = '\0';		/* Remove trailing slash */
					if (symlink(&copybuf[157], filename) != 0)
					{
						fprintf(stderr,
								_("%s: could not create symbolic link from \"%s\" to \"%s\": %s\n"),
						 progname, filename, &copybuf[157], strerror(errno));
						disconnect_and_exit(1);
					}
				}
				else
				{
					fprintf(stderr,
							_("%s: unrecognized link indicator \"%c\"\n"),
							progname, copybuf[156]);
					disconnect_and_exit(1);
				}
				continue;		/* directory or link handled */
			}

			/*
			 * regular file
			 */
			file = fopen(filename, "wb");
			if (!file)
			{
				fprintf(stderr, _("%s: could not create file \"%s\": %s\n"),
						progname, filename, strerror(errno));
				disconnect_and_exit(1);
			}

#ifndef WIN32
			if (chmod(filename, (mode_t) filemode))
				fprintf(stderr, _("%s: could not set permissions on file \"%s\": %s\n"),
						progname, filename, strerror(errno));
#endif

			if (current_len_left == 0)
			{
				/*
				 * Done with this file, next one will be a new tar header
				 */
				fclose(file);
				file = NULL;
				continue;
			}
		}						/* new file */
		else
		{
			/*
			 * Continuing blocks in existing file
			 */
			if (current_len_left == 0 && r == current_padding)
			{
				/*
				 * Received the padding block for this file, ignore it and
				 * close the file, then move on to the next tar header.
				 */
				fclose(file);
				file = NULL;
				totaldone += r;
				continue;
			}

			if (fwrite(copybuf, r, 1, file) != 1)
			{
				fprintf(stderr, _("%s: could not write to file \"%s\": %s\n"),
						progname, filename, strerror(errno));
				disconnect_and_exit(1);
			}
			totaldone += r;
			if (showprogress)
				progress_report(rownum, filename);

			current_len_left -= r;
			if (current_len_left == 0 && current_padding == 0)
			{
				/*
				 * Received the last block, and there is no padding to be
				 * expected. Close the file and move on to the next tar
				 * header.
				 */
				fclose(file);
				file = NULL;
				continue;
			}
		}						/* continuing data in existing file */
	}							/* loop over all data blocks */

	if (file != NULL)
	{
		fprintf(stderr,
				_("%s: COPY stream ended before last file was finished\n"),
				progname);
		disconnect_and_exit(1);
	}

	if (copybuf != NULL)
		PQfreemem(copybuf);
}
Esempio n. 16
0
bool dm_vault_init()
{
    PostgresStrings<1> sparm;
    sparm.set(0, DS::Vault::e_NodeSystem);
    PGresult* result = PQexecParams(s_postgres,
            "SELECT \"idx\" FROM vault.\"Nodes\""
            "    WHERE \"NodeType\"=$1",
            1, 0, sparm.m_values, 0, 0, 0);
    if (PQresultStatus(result) != PGRES_TUPLES_OK) {
        fprintf(stderr, "%s:%d:\n    Postgres SELECT error: %s\n",
                __FILE__, __LINE__, PQerrorMessage(s_postgres));
        PQclear(result);
        return false;
    }

    int count = PQntuples(result);
    if (count == 0) {
        PQclear(result);

        fprintf(stderr, "[Vault] Initializing empty DirtSand vault\n");

        // Create system and global inbox nodes
        DS::Vault::Node node;
        node.set_NodeType(DS::Vault::e_NodeSystem);
        s_systemNode = v_create_node(node);
        if (s_systemNode == 0)
            return false;

        node.set_NodeType(DS::Vault::e_NodeFolder);
        node.set_Int32_1(DS::Vault::e_GlobalInboxFolder);
        uint32_t globalInbox = v_create_node(node);
        if (globalInbox == 0)
            return false;

        if (!v_ref_node(s_systemNode, globalInbox, 0))
            return false;

        AuthServer_AgeInfo age;
        age.m_ageId = gen_uuid();
        age.m_filename = "city";
        age.m_instName = "Ae'gura";
        if (v_create_age(age, e_AgePublic).first == 0)
            return false;

        age.m_ageId = gen_uuid();
        age.m_filename = "Neighborhood02";
        age.m_instName = "Kirel";
        if (v_create_age(age, e_AgePublic).first == 0)
            return false;

        age.m_ageId = gen_uuid();
        age.m_filename = "Kveer";
        age.m_instName = "K'veer";
        if (v_create_age(age, e_AgePublic).first == 0)
            return false;

        age.m_ageId = gen_uuid();
        age.m_filename = "GreatTreePub";
        age.m_instName = "The Watcher's Pub";
        if (v_create_age(age, e_AgePublic).first == 0)
            return false;

        age.m_ageId = gen_uuid();
        age.m_filename = "GuildPub-Cartographers";
        age.m_instName = "The Cartographers' Guild Pub";
        if (v_create_age(age, e_AgePublic).first == 0)
            return false;

        age.m_ageId = gen_uuid();
        age.m_filename = "GuildPub-Greeters";
        age.m_instName = "The Greeters' Guild Pub";
        if (v_create_age(age, e_AgePublic).first == 0)
            return false;

        age.m_ageId = gen_uuid();
        age.m_filename = "GuildPub-Maintainers";
        age.m_instName = "The Maintainers' Guild Pub";
        if (v_create_age(age, e_AgePublic).first == 0)
            return false;

        age.m_ageId = gen_uuid();
        age.m_filename = "GuildPub-Messengers";
        age.m_instName = "The Messengers' Guild Pub";
        if (v_create_age(age, e_AgePublic).first == 0)
            return false;

        age.m_ageId = gen_uuid();
        age.m_filename = "GuildPub-Writers";
        age.m_instName = "The Writers' Guild Pub";
        if (v_create_age(age, e_AgePublic).first == 0)
            return false;
    } else {
        DS_DASSERT(count == 1);
        s_systemNode = strtoul(PQgetvalue(result, 0, 0), 0, 10);
        PQclear(result);
    }

    return true;
}
Esempio n. 17
0
static void
BaseBackup(void)
{
	PGresult   *res;
	char	   *sysidentifier;
	uint32		timeline;
	char		current_path[MAXPGPATH];
	char		escaped_label[MAXPGPATH];
	int			i;
	char		xlogstart[64];
	char		xlogend[64];

	/*
	 * Connect in replication mode to the server
	 */
	conn = GetConnection();
	if (!conn)
		/* Error message already written in GetConnection() */
		exit(1);

	/*
	 * Run IDENTIFY_SYSTEM so we can get the timeline
	 */
	res = PQexec(conn, "IDENTIFY_SYSTEM");
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		fprintf(stderr, _("%s: could not send replication command \"%s\": %s"),
				progname, "IDENTIFY_SYSTEM", PQerrorMessage(conn));
		disconnect_and_exit(1);
	}
	if (PQntuples(res) != 1 || PQnfields(res) != 3)
	{
		fprintf(stderr,
				_("%s: could not identify system: got %d rows and %d fields, expected %d rows and %d fields\n"),
				progname, PQntuples(res), PQnfields(res), 1, 3);
		disconnect_and_exit(1);
	}
	sysidentifier = pg_strdup(PQgetvalue(res, 0, 0));
	timeline = atoi(PQgetvalue(res, 0, 1));
	PQclear(res);

	/*
	 * Start the actual backup
	 */
	PQescapeStringConn(conn, escaped_label, label, sizeof(escaped_label), &i);
	snprintf(current_path, sizeof(current_path),
			 "BASE_BACKUP LABEL '%s' %s %s %s %s",
			 escaped_label,
			 showprogress ? "PROGRESS" : "",
			 includewal && !streamwal ? "WAL" : "",
			 fastcheckpoint ? "FAST" : "",
			 includewal ? "NOWAIT" : "");

	if (PQsendQuery(conn, current_path) == 0)
	{
		fprintf(stderr, _("%s: could not send replication command \"%s\": %s"),
				progname, "BASE_BACKUP", PQerrorMessage(conn));
		disconnect_and_exit(1);
	}

	/*
	 * Get the starting xlog position
	 */
	res = PQgetResult(conn);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		fprintf(stderr, _("%s: could not initiate base backup: %s"),
				progname, PQerrorMessage(conn));
		disconnect_and_exit(1);
	}
	if (PQntuples(res) != 1)
	{
		fprintf(stderr, _("%s: no start point returned from server\n"),
				progname);
		disconnect_and_exit(1);
	}
	strcpy(xlogstart, PQgetvalue(res, 0, 0));
	if (verbose && includewal)
		fprintf(stderr, "transaction log start point: %s\n", xlogstart);
	PQclear(res);
	MemSet(xlogend, 0, sizeof(xlogend));

	/*
	 * Get the header
	 */
	res = PQgetResult(conn);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		fprintf(stderr, _("%s: could not get backup header: %s"),
				progname, PQerrorMessage(conn));
		disconnect_and_exit(1);
	}
	if (PQntuples(res) < 1)
	{
		fprintf(stderr, _("%s: no data returned from server\n"), progname);
		disconnect_and_exit(1);
	}

	/*
	 * Sum up the total size, for progress reporting
	 */
	totalsize = totaldone = 0;
	tablespacecount = PQntuples(res);
	for (i = 0; i < PQntuples(res); i++)
	{
		if (showprogress)
			totalsize += atol(PQgetvalue(res, i, 2));

		/*
		 * Verify tablespace directories are empty. Don't bother with the
		 * first once since it can be relocated, and it will be checked before
		 * we do anything anyway.
		 */
		if (format == 'p' && !PQgetisnull(res, i, 1))
			verify_dir_is_empty_or_create(PQgetvalue(res, i, 1));
	}

	/*
	 * When writing to stdout, require a single tablespace
	 */
	if (format == 't' && strcmp(basedir, "-") == 0 && PQntuples(res) > 1)
	{
		fprintf(stderr,
				_("%s: can only write single tablespace to stdout, database has %d\n"),
				progname, PQntuples(res));
		disconnect_and_exit(1);
	}

	/*
	 * If we're streaming WAL, start the streaming session before we start
	 * receiving the actual data chunks.
	 */
	if (streamwal)
	{
		if (verbose)
			fprintf(stderr, _("%s: starting background WAL receiver\n"),
					progname);
		StartLogStreamer(xlogstart, timeline, sysidentifier);
	}

	/*
	 * Start receiving chunks
	 */
	for (i = 0; i < PQntuples(res); i++)
	{
		if (format == 't')
			ReceiveTarFile(conn, res, i);
		else
			ReceiveAndUnpackTarFile(conn, res, i);
	}							/* Loop over all tablespaces */

	if (showprogress)
	{
		progress_report(PQntuples(res), NULL);
		fprintf(stderr, "\n");	/* Need to move to next line */
	}
	PQclear(res);

	/*
	 * Get the stop position
	 */
	res = PQgetResult(conn);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		fprintf(stderr,
		 _("%s: could not get transaction log end position from server: %s"),
				progname, PQerrorMessage(conn));
		disconnect_and_exit(1);
	}
	if (PQntuples(res) != 1)
	{
		fprintf(stderr,
			 _("%s: no transaction log end position returned from server\n"),
				progname);
		disconnect_and_exit(1);
	}
	strcpy(xlogend, PQgetvalue(res, 0, 0));
	if (verbose && includewal)
		fprintf(stderr, "transaction log end point: %s\n", xlogend);
	PQclear(res);

	res = PQgetResult(conn);
	if (PQresultStatus(res) != PGRES_COMMAND_OK)
	{
		fprintf(stderr, _("%s: final receive failed: %s"),
				progname, PQerrorMessage(conn));
		disconnect_and_exit(1);
	}

	if (bgchild > 0)
	{
#ifndef WIN32
		int			status;
		int			r;
#else
		DWORD		status;
		uint32		hi,
					lo;
#endif

		if (verbose)
			fprintf(stderr,
					_("%s: waiting for background process to finish streaming...\n"), progname);

#ifndef WIN32
		if (write(bgpipe[1], xlogend, strlen(xlogend)) != strlen(xlogend))
		{
			fprintf(stderr,
					_("%s: could not send command to background pipe: %s\n"),
					progname, strerror(errno));
			disconnect_and_exit(1);
		}

		/* Just wait for the background process to exit */
		r = waitpid(bgchild, &status, 0);
		if (r == -1)
		{
			fprintf(stderr, _("%s: could not wait for child process: %s\n"),
					progname, strerror(errno));
			disconnect_and_exit(1);
		}
		if (r != bgchild)
		{
			fprintf(stderr, _("%s: child %d died, expected %d\n"),
					progname, r, (int) bgchild);
			disconnect_and_exit(1);
		}
		if (!WIFEXITED(status))
		{
			fprintf(stderr, _("%s: child process did not exit normally\n"),
					progname);
			disconnect_and_exit(1);
		}
		if (WEXITSTATUS(status) != 0)
		{
			fprintf(stderr, _("%s: child process exited with error %d\n"),
					progname, WEXITSTATUS(status));
			disconnect_and_exit(1);
		}
		/* Exited normally, we're happy! */
#else							/* WIN32 */

		/*
		 * On Windows, since we are in the same process, we can just store the
		 * value directly in the variable, and then set the flag that says
		 * it's there.
		 */
		if (sscanf(xlogend, "%X/%X", &hi, &lo) != 2)
		{
			fprintf(stderr,
				  _("%s: could not parse transaction log location \"%s\"\n"),
					progname, xlogend);
			disconnect_and_exit(1);
		}
		xlogendptr = ((uint64) hi) << 32 | lo;
		InterlockedIncrement(&has_xlogendptr);

		/* First wait for the thread to exit */
		if (WaitForSingleObjectEx((HANDLE) bgchild, INFINITE, FALSE) !=
			WAIT_OBJECT_0)
		{
			_dosmaperr(GetLastError());
			fprintf(stderr, _("%s: could not wait for child thread: %s\n"),
					progname, strerror(errno));
			disconnect_and_exit(1);
		}
		if (GetExitCodeThread((HANDLE) bgchild, &status) == 0)
		{
			_dosmaperr(GetLastError());
			fprintf(stderr, _("%s: could not get child thread exit status: %s\n"),
					progname, strerror(errno));
			disconnect_and_exit(1);
		}
		if (status != 0)
		{
			fprintf(stderr, _("%s: child thread exited with error %u\n"),
					progname, (unsigned int) status);
			disconnect_and_exit(1);
		}
		/* Exited normally, we're happy */
#endif
	}

	/*
	 * End of copy data. Final result is already checked inside the loop.
	 */
	PQclear(res);
	PQfinish(conn);

	if (verbose)
		fprintf(stderr, "%s: base backup completed\n", progname);
}
Esempio n. 18
0
std::pair<uint32_t, uint32_t>
v_create_age(const AuthServer_AgeInfo& age, uint32_t flags)
{
    int seqNumber = age.m_seqNumber;
    if (seqNumber < 0) {
        check_postgres();

        PGresult* result = PQexec(s_postgres, "SELECT nextval('game.\"AgeSeqNumber\"'::regclass)");
        if (PQresultStatus(result) != PGRES_TUPLES_OK) {
            fprintf(stderr, "%s:%d:\n    Postgres SELECT error: %s\n",
                    __FILE__, __LINE__, PQerrorMessage(s_postgres));
            PQclear(result);
            return std::make_pair(0, 0);
        }
        DS_DASSERT(PQntuples(result) == 1);
        seqNumber = strtol(PQgetvalue(result, 0, 0), 0, 10);
        PQclear(result);
    }

    DS::Vault::Node node;
    node.set_NodeType(DS::Vault::e_NodeAge);
    node.set_CreatorUuid(age.m_ageId);
    node.set_Uuid_1(age.m_ageId);
    if (!age.m_parentId.isNull())
        node.set_Uuid_2(age.m_parentId);
    node.set_String64_1(age.m_filename);
    uint32_t ageNode = v_create_node(node);
    if (ageNode == 0)
        return std::make_pair(0, 0);

    // TODO: Global SDL node

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeFolder);
    node.set_CreatorUuid(age.m_ageId);
    node.set_CreatorIdx(ageNode);
    node.set_Int32_1(DS::Vault::e_ChronicleFolder);
    uint32_t chronFolder = v_create_node(node);
    if (chronFolder == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodePlayerInfoList);
    node.set_CreatorUuid(age.m_ageId);
    node.set_CreatorIdx(ageNode);
    node.set_Int32_1(DS::Vault::e_PeopleIKnowAboutFolder);
    uint32_t knownFolder = v_create_node(node);
    if (knownFolder == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeAgeInfoList);
    node.set_CreatorUuid(age.m_ageId);
    node.set_CreatorIdx(ageNode);
    node.set_Int32_1(DS::Vault::e_SubAgesFolder);
    uint32_t subAgesFolder = v_create_node(node);
    if (subAgesFolder == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeAgeInfo);
    node.set_CreatorUuid(age.m_ageId);
    node.set_CreatorIdx(ageNode);
    node.set_Int32_1(seqNumber);
    node.set_Int32_2((flags & e_AgePublic) != 0 ? 1 : 0);
    node.set_Int32_3(age.m_language);
    node.set_Uint32_1(ageNode);
    node.set_Uint32_2(0);   // Czar ID
    node.set_Uint32_3(0);   // Flags
    node.set_Uuid_1(age.m_ageId);
    if (!age.m_parentId.isNull())
        node.set_Uuid_2(age.m_parentId);
    node.set_String64_2(age.m_filename);
    if (!age.m_instName.isNull())
        node.set_String64_3(age.m_instName);
    if (!age.m_userName.isEmpty())
        node.set_String64_4(age.m_userName);
    if (!age.m_description.isEmpty())
        node.set_Text_1(age.m_description);
    uint32_t ageInfoNode = v_create_node(node);
    if (ageInfoNode == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeFolder);
    node.set_CreatorUuid(age.m_ageId);
    node.set_CreatorIdx(ageNode);
    node.set_Int32_1(DS::Vault::e_AgeDevicesFolder);
    uint32_t devsFolder = v_create_node(node);
    if (devsFolder == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodePlayerInfoList);
    node.set_CreatorUuid(age.m_ageId);
    node.set_CreatorIdx(ageNode);
    node.set_Int32_1(DS::Vault::e_CanVisitFolder);
    uint32_t canVisitList = v_create_node(node);
    if (canVisitList == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeSDL);
    node.set_CreatorUuid(age.m_ageId);
    node.set_CreatorIdx(ageNode);
    node.set_Int32_1(0);
    node.set_String64_1(age.m_filename);
    node.set_Blob_1(gen_default_sdl(age.m_filename));
    uint32_t ageSdlNode = v_create_node(node);
    if (ageSdlNode == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodePlayerInfoList);
    node.set_CreatorUuid(age.m_ageId);
    node.set_CreatorIdx(ageNode);
    node.set_Int32_1(DS::Vault::e_AgeOwnersFolder);
    uint32_t ageOwners = v_create_node(node);
    if (ageOwners == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeAgeInfoList);
    node.set_CreatorUuid(age.m_ageId);
    node.set_CreatorIdx(ageNode);
    node.set_Int32_1(DS::Vault::e_ChildAgesFolder);
    uint32_t childAges = v_create_node(node);
    if (childAges == 0)
        return std::make_pair(0, 0);

    if (!v_ref_node(ageNode, s_systemNode, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(ageNode, chronFolder, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(ageNode, knownFolder, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(ageNode, subAgesFolder, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(ageNode, ageInfoNode, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(ageNode, devsFolder, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(ageInfoNode, canVisitList, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(ageInfoNode, ageSdlNode, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(ageInfoNode, ageOwners, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(ageInfoNode, childAges, 0))
        return std::make_pair(0, 0);

    // Register with the server database
    {
        DS::String agedesc = !age.m_description.isEmpty() ? age.m_description
                           : !age.m_instName.isEmpty() ? age.m_instName
                           : age.m_filename;

        PostgresStrings<5> parms;
        parms.set(0, age.m_ageId.toString());
        parms.set(1, age.m_filename);
        parms.set(2, agedesc);
        parms.set(3, ageNode);
        parms.set(4, ageSdlNode);
        PGresult* result = PQexecParams(s_postgres,
                "INSERT INTO game.\"Servers\""
                "    (\"AgeUuid\", \"AgeFilename\", \"DisplayName\", \"AgeIdx\", \"SdlIdx\")"
                "    VALUES ($1, $2, $3, $4, $5)",
                5, 0, parms.m_values, 0, 0, 0);
        if (PQresultStatus(result) != PGRES_COMMAND_OK) {
            fprintf(stderr, "%s:%d:\n    Postgres INSERT error: %s\n",
                    __FILE__, __LINE__, PQerrorMessage(s_postgres));
            PQclear(result);
            return std::make_pair(0, 0);
        }
        PQclear(result);
    }

    // Register with the database if it's a public age
    if (flags & e_AgePublic) {
        PostgresStrings<8> parms;
        parms.set(0, age.m_ageId.toString());
        parms.set(1, age.m_filename);
        parms.set(2, age.m_instName.isEmpty() ? "" : age.m_instName);
        parms.set(3, age.m_userName.isEmpty() ? "" : age.m_userName);
        parms.set(4, age.m_description.isEmpty() ? "" : age.m_description);
        parms.set(5, seqNumber);
        parms.set(6, age.m_language);
        parms.set(7, 0);    // Population
        PGresult* result = PQexecParams(s_postgres,
                "INSERT INTO game.\"PublicAges\""
                "    (\"AgeUuid\", \"AgeFilename\", \"AgeInstName\", \"AgeUserName\","
                "     \"AgeDesc\", \"SeqNumber\", \"Language\", \"Population\")"
                "    VALUES ($1, $2, $3, $4, $5, $6, $7, $8)",
                8, 0, parms.m_values, 0, 0, 0);
        if (PQresultStatus(result) != PGRES_COMMAND_OK) {
            fprintf(stderr, "%s:%d:\n    Postgres INSERT error: %s\n",
                    __FILE__, __LINE__, PQerrorMessage(s_postgres));
            PQclear(result);
            return std::make_pair(0, 0);
        }
        PQclear(result);
    }

    return std::make_pair(ageNode, ageInfoNode);
}
Esempio n. 19
0
static isc_result_t
pgsqldb_lookup(const char *zone, const char *name, void *dbdata,
	       dns_sdblookup_t *lookup)
#endif /* DNS_CLIENTINFO_VERSION */
{
	isc_result_t result;
	struct dbinfo *dbi = dbdata;
	PGresult *res;
	char str[1500];
	char *canonname;
	int i;

	UNUSED(zone);
#ifdef DNS_CLIENTINFO_VERSION
	UNUSED(methods);
	UNUSED(clientinfo);
#endif /* DNS_CLIENTINFO_VERSION */

	canonname = isc_mem_get(ns_g_mctx, strlen(name) * 2 + 1);
	if (canonname == NULL)
		return (ISC_R_NOMEMORY);
	quotestring(name, canonname);
	snprintf(str, sizeof(str),
		 "SELECT TTL,RDTYPE,RDATA FROM \"%s\" WHERE "
		 "lower(NAME) = lower('%s')", dbi->table, canonname);
	isc_mem_put(ns_g_mctx, canonname, strlen(name) * 2 + 1);

	result = maybe_reconnect(dbi);
	if (result != ISC_R_SUCCESS)
		return (result);

	res = PQexec(dbi->conn, str);
	if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) {
		PQclear(res);
		return (ISC_R_FAILURE);
	}
	if (PQntuples(res) == 0) {
		PQclear(res);
		return (ISC_R_NOTFOUND);
	}

	for (i = 0; i < PQntuples(res); i++) {
		char *ttlstr = PQgetvalue(res, i, 0);
		char *type = PQgetvalue(res, i, 1);
		char *data = PQgetvalue(res, i, 2);
		dns_ttl_t ttl;
		char *endp;
		ttl = strtol(ttlstr, &endp, 10);
		if (*endp != '\0') {
			PQclear(res);
			return (DNS_R_BADTTL);
		}
		result = dns_sdb_putrr(lookup, type, ttl, data);
		if (result != ISC_R_SUCCESS) {
			PQclear(res);
			return (ISC_R_FAILURE);
		}
	}

	PQclear(res);
	return (ISC_R_SUCCESS);
}
Esempio n. 20
0
std::pair<uint32_t, uint32_t>
v_create_player(DS::Uuid acctId, const AuthServer_PlayerInfo& player)
{
    DS::Vault::Node node;
    node.set_NodeType(DS::Vault::e_NodePlayer);
    node.set_CreatorUuid(acctId);
    node.set_Int32_2(player.m_explorer);
    node.set_Uuid_1(acctId);
    node.set_String64_1(player.m_avatarModel);
    node.set_IString64_1(player.m_playerName);
    uint32_t playerIdx = v_create_node(node);
    if (playerIdx == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodePlayerInfo);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Uint32_1(playerIdx);
    node.set_IString64_1(player.m_playerName);
    uint32_t playerInfoNode = v_create_node(node);
    if (playerInfoNode == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodePlayerInfoList);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_BuddyListFolder);
    uint32_t buddyList = v_create_node(node);
    if (buddyList == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodePlayerInfoList);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_IgnoreListFolder);
    uint32_t ignoreList = v_create_node(node);
    if (ignoreList == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeFolder);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_PlayerInviteFolder);
    uint32_t invites = v_create_node(node);
    if (invites == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeAgeInfoList);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_AgesIOwnFolder);
    uint32_t agesNode = v_create_node(node);
    if (agesNode == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeFolder);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_AgeJournalsFolder);
    uint32_t journals = v_create_node(node);
    if (journals == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeFolder);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_ChronicleFolder);
    uint32_t chronicles = v_create_node(node);
    if (chronicles == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeAgeInfoList);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_AgesICanVisitFolder);
    uint32_t visitFolder = v_create_node(node);
    if (visitFolder == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeFolder);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_AvatarOutfitFolder);
    uint32_t outfit = v_create_node(node);
    if (outfit == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeFolder);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_AvatarClosetFolder);
    uint32_t closet = v_create_node(node);
    if (closet == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeFolder);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_InboxFolder);
    uint32_t inbox = v_create_node(node);
    if (inbox == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodePlayerInfoList);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Int32_1(DS::Vault::e_PeopleIKnowAboutFolder);
    uint32_t peopleNode = v_create_node(node);
    if (peopleNode == 0)
        return std::make_pair(0, 0);

    DS::Blob link(reinterpret_cast<const uint8_t*>("Default:LinkInPointDefault:;"),
                  strlen("Default:LinkInPointDefault:;"));
    node.clear();
    node.set_NodeType(DS::Vault::e_NodeAgeLink);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Blob_1(link);
    uint32_t reltoLink = v_create_node(node);
    if (reltoLink == 0)
        return std::make_pair(0, 0);

    node.clear();
    node.set_NodeType(DS::Vault::e_NodeAgeLink);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Blob_1(link);
    uint32_t hoodLink = v_create_node(node);
    if (hoodLink == 0)
        return std::make_pair(0, 0);

    link = DS::Blob(reinterpret_cast<const uint8_t*>("Ferry Terminal:LinkInPointFerry:;"),
                    strlen("Ferry Terminal:LinkInPointFerry:;"));
    node.clear();
    node.set_NodeType(DS::Vault::e_NodeAgeLink);
    node.set_CreatorUuid(acctId);
    node.set_CreatorIdx(playerIdx);
    node.set_Blob_1(link);
    uint32_t cityLink = v_create_node(node);
    if (hoodLink == 0)
        return std::make_pair(0, 0);

    AuthServer_AgeInfo relto;
    relto.m_ageId = gen_uuid();
    relto.m_filename = "Personal";
    relto.m_instName = "Relto";
    relto.m_userName = player.m_playerName + "'s";
    relto.m_description = relto.m_userName + " " + relto.m_instName;
    std::pair<uint32_t, uint32_t> reltoAge = v_create_age(relto, 0);
    if (reltoAge.first == 0)
        return std::make_pair(0, 0);

    {
        PostgresStrings<2> parms;
        parms.set(0, reltoAge.second);
        parms.set(1, DS::Vault::e_AgeOwnersFolder);
        PGresult* result = PQexecParams(s_postgres,
                "SELECT idx FROM vault.find_folder($1, $2);",
                2, 0, parms.m_values, 0, 0, 0);
        if (PQresultStatus(result) != PGRES_TUPLES_OK) {
            fprintf(stderr, "%s:%d:\n    Postgres SELECT error: %s\n",
                    __FILE__, __LINE__, PQerrorMessage(s_postgres));
            PQclear(result);
            return std::make_pair(0, 0);
        }
        DS_DASSERT(PQntuples(result) == 1);
        uint32_t ownerFolder = strtoul(PQgetvalue(result, 0, 0), 0, 10);
        PQclear(result);

        if (!v_ref_node(ownerFolder, playerInfoNode, 0))
            return std::make_pair(0, 0);
    }

    uint32_t hoodAge = find_a_friendly_neighborhood_for_our_new_visitor(playerInfoNode);
    if (hoodAge == 0)
        return std::make_pair(0, 0);
    uint32_t cityAge = find_public_age_1("city");
    if (cityAge == 0)
        return std::make_pair(0, 0);

    if (!v_ref_node(playerIdx, s_systemNode, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, playerInfoNode, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, buddyList, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, ignoreList, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, invites, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, agesNode, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, journals, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, chronicles, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, visitFolder, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, outfit, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, closet, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, inbox, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(playerIdx, peopleNode, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(agesNode, reltoLink, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(agesNode, hoodLink, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(agesNode, cityLink, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(reltoLink, reltoAge.second, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(hoodLink, hoodAge, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(cityLink, cityAge, 0))
        return std::make_pair(0, 0);
    if (!v_ref_node(reltoAge.first, agesNode, 0))
        return std::make_pair(0, 0);

    return std::make_pair(playerIdx, playerInfoNode);
}
Esempio n. 21
0
int main(int ac, char* av[]) {
    po::options_description od_desc("Allowed options");
    get_options_description(od_desc);
    
    po::variables_map vm;
    po::store(po::parse_command_line(ac, av, od_desc), vm);
    if (vm.count("help")) {
       std::cout << od_desc << "\n"; 
       return 0;
    }

    try{
      po::notify(vm);
    }
    catch(...){
	std::cout << od_desc << "\n"; 
	return 0;	
    } 

    auto ret_val = process_command_line(vm, od_desc);
    if (ret_val != 2) return ret_val;


    auto db_dbase(vm["dbname"].as<std::string>());
    db_dbase = "dbname = " + db_dbase;
    auto db_host(vm["host"].as<std::string>());
    auto db_port(vm["port"].as<std::string>());
    auto db_username(vm["username"].as<std::string>());
    auto db_pwd(vm["password"].as<std::string>());
    auto test(vm["test"].as<bool>());
    //auto db_no_pwd(vm["no-password"].as<std::string>());

    const char *conninfo = db_dbase.c_str();
    PGconn     *conn;
    PGresult   *res;
    int rec_count, col_count;


    conn = PQconnectdb(conninfo);
 /* Check to see that the backend connection was successfully made */
    if (PQstatus(conn) != CONNECTION_OK)
    {
        fprintf(stderr, "Connection to database failed: %s",
                PQerrorMessage(conn));
        exit_nicely(conn);
        exit(0);
    }

   std::string data_sql;
   if (test) {
     data_sql = "select id, source, target, cost, -1 as reverse_cost  from table1 order by id";
     // data_sql = "select id, source, target, cost, reverse_cost from edge_table order by id";
   } else {
     std::cout << "Input data query: ";
     std::getline (std::cin,data_sql);
   }
   std::cout << "\nThe data is from:" << data_sql <<"\n";

   res = PQexec(conn, data_sql.c_str());

      if (PQresultStatus(res) != PGRES_TUPLES_OK) {
        std::cout << "We did not get any data!\n";
        exit_nicely(conn);
        exit(1);
      }

      rec_count = PQntuples(res);
      col_count = PQnfields(res);
      if (col_count > 5 || col_count < 4) {
        std::cout << "Max number of columns 5\n";
        std::cout << "Min number of columns 4\n";
        exit_nicely(conn);
        exit(1);
      }

      auto id_fnum = PQfnumber(res, "id");
      auto source_fnum = PQfnumber(res, "source");
      auto target_fnum = PQfnumber(res, "target");
      auto cost_fnum = PQfnumber(res, "cost");
      auto reverse_fnum = PQfnumber(res, "reverse_cost");



      pgr_edge_t *data_edges;
      data_edges = (pgr_edge_t *) malloc(rec_count * sizeof(pgr_edge_t));
	

      printf("We received %d records.\n", rec_count);
      puts("==========================");

      

      std::string::size_type sz;
      std::string str;

      for (int row = 0; row < rec_count; ++row) {
        str = std::string(PQgetvalue(res, row, id_fnum));
        data_edges[row].id = stol(str, &sz);

        str = std::string(PQgetvalue(res, row, source_fnum));
        data_edges[row].source = stol(str, &sz);

        str = std::string(PQgetvalue(res, row, target_fnum));
        data_edges[row].target = stol(str, &sz);

        str = std::string(PQgetvalue(res, row, cost_fnum));
        data_edges[row].cost = stod(str, &sz);

        if (reverse_fnum != -1) {
          str = std::string(PQgetvalue(res, row, reverse_fnum));
          data_edges[row].reverse_cost = stod(str, &sz);
        } 
#if 0
        std::cout << "\tid: " << data_edges[row].id << "\t";
        std::cout << "\tsource: " << data_edges[row].source << "\t";
        std::cout << "\ttarget: " << data_edges[row].target << "\t";
        std::cout << "\tcost: " << data_edges[row].cost << "\t";
        if (reverse_fnum != -1) {
          std::cout << "\treverse: " << data_edges[row].reverse_cost << "\t";
        }
        std::cout << "\n";
#endif
      }


      puts("==========================");

      PQclear(res);

      PQfinish(conn);


//////////////////////  END READING DATA FROM DATABASE ///////////////////

    std::string directed;
    std::cout << "Is the graph directed [N,n]? default[Y]";
    std::getline(std::cin,directed);
    graphType gType =  (directed.compare("N")==0 || directed.compare("n")==0)? UNDIRECTED: DIRECTED;
    bool directedFlag =  (directed.compare("N")==0 || directed.compare("n")==0)? false: true;


    const int initial_size = rec_count;

    
    Pgr_base_graph< DirectedGraph > digraph(gType, initial_size);
    Pgr_base_graph< UndirectedGraph > undigraph(gType, initial_size);
    
    if (directedFlag) {
      process(digraph, data_edges, rec_count);
    } else {
      process(undigraph, data_edges, rec_count);
    }

}
Esempio n. 22
0
static uint32_t find_a_friendly_neighborhood_for_our_new_visitor(uint32_t playerInfoId)
{
    PGresult* result = PQexec(s_postgres,
            "SELECT \"AgeUuid\" FROM game.\"PublicAges\""
            "    WHERE \"AgeFilename\" = 'Neighborhood'"
            "    AND \"Population\" < 20 AND \"SeqNumber\" <> 0");
    if (PQresultStatus(result) != PGRES_TUPLES_OK) {
        fprintf(stderr, "%s:%d:\n    Postgres SELECT error: %s\n",
                __FILE__, __LINE__, PQerrorMessage(s_postgres));
        PQclear(result);
        return 0;
    }

    std::pair<uint32_t, uint32_t> ageNode;
    DS::Uuid ageId;
    if (PQntuples(result) != 0) {
        ageId = DS::Uuid(PQgetvalue(result, 0, 0));
        PQclear(result);

        PostgresStrings<2> parms;
        parms.set(0, DS::Vault::e_NodeAgeInfo);
        parms.set(1, ageId.toString());
        result = PQexecParams(s_postgres,
                "SELECT idx FROM vault.\"Nodes\""
                "    WHERE \"NodeType\"=$1 AND \"Uuid_1\"=$2",
                2, 0, parms.m_values, 0, 0, 0);
        if (PQresultStatus(result) != PGRES_TUPLES_OK) {
            fprintf(stderr, "%s:%d:\n    Postgres SELECT error: %s\n",
                    __FILE__, __LINE__, PQerrorMessage(s_postgres));
            PQclear(result);
            return 0;
        }
        DS_DASSERT(PQntuples(result) == 1);
        ageNode.second = strtoul(PQgetvalue(result, 0, 0), 0, 10);
        PQclear(result);
    } else {
        PQclear(result);
        AuthServer_AgeInfo age;
        age.m_ageId = gen_uuid();
        age.m_filename = "Neighborhood";
        age.m_instName = "Neighborhood";
        age.m_userName = "******";
        age.m_description = "DS Neighborhood";
        age.m_seqNumber = -1;   // Auto-generate
        ageNode = v_create_age(age, e_AgePublic);
        if (ageNode.second == 0)
            return 0;
    }

    {
        PostgresStrings<2> parms;
        parms.set(0, ageNode.second);
        parms.set(1, DS::Vault::e_AgeOwnersFolder);
        result = PQexecParams(s_postgres,
                "SELECT idx FROM vault.find_folder($1, $2);",
                2, 0, parms.m_values, 0, 0, 0);
    }
    if (PQresultStatus(result) != PGRES_TUPLES_OK) {
        fprintf(stderr, "%s:%d:\n    Postgres SELECT error: %s\n",
                __FILE__, __LINE__, PQerrorMessage(s_postgres));
        PQclear(result);
        return 0;
    }
    DS_DASSERT(PQntuples(result) == 1);
    uint32_t ownerFolder = strtoul(PQgetvalue(result, 0, 0), 0, 10);
    PQclear(result);

    if (!v_ref_node(ownerFolder, playerInfoId, 0))
        return 0;

    {
        PostgresStrings<1> parms;
        parms.set(0, ageId.toString());
        result = PQexecParams(s_postgres,
                "UPDATE game.\"PublicAges\""
                "    SET \"Population\" = \"Population\"+1"
                "    WHERE \"AgeUuid\" = $1",
                1, 0, parms.m_values, 0, 0, 0);
    }
    if (PQresultStatus(result) != PGRES_COMMAND_OK) {
        fprintf(stderr, "%s:%d:\n    Postgres SELECT error: %s\n",
                __FILE__, __LINE__, PQerrorMessage(s_postgres));
        PQclear(result);
        return 0;
    }
    PQclear(result);

    return ageNode.second;
}
Esempio n. 23
0
/*
 * vacuum_one_database
 *
 * Process tables in the given database.  If the 'tables' list is empty,
 * process all tables in the database.
 *
 * Note that this function is only concerned with running exactly one stage
 * when in analyze-in-stages mode; caller must iterate on us if necessary.
 *
 * If concurrentCons is > 1, multiple connections are used to vacuum tables
 * in parallel.  In this case and if the table list is empty, we first obtain
 * a list of tables from the database.
 */
static void
vacuum_one_database(const char *dbname, vacuumingOptions *vacopts,
					int stage,
					SimpleStringList *tables,
					const char *host, const char *port,
					const char *username, enum trivalue prompt_password,
					int concurrentCons,
					const char *progname, bool echo, bool quiet)
{
	PQExpBufferData sql;
	PGconn	   *conn;
	SimpleStringListCell *cell;
	ParallelSlot *slots = NULL;
	SimpleStringList dbtables = {NULL, NULL};
	int			i;
	bool		failed = false;
	bool		parallel = concurrentCons > 1;
	const char *stage_commands[] = {
		"SET default_statistics_target=1; SET vacuum_cost_delay=0;",
		"SET default_statistics_target=10; RESET vacuum_cost_delay;",
		"RESET default_statistics_target;"
	};
	const char *stage_messages[] = {
		gettext_noop("Generating minimal optimizer statistics (1 target)"),
		gettext_noop("Generating medium optimizer statistics (10 targets)"),
		gettext_noop("Generating default (full) optimizer statistics")
	};

	Assert(stage == ANALYZE_NO_STAGE ||
		   (stage >= 0 && stage < ANALYZE_NUM_STAGES));

	conn = connectDatabase(dbname, host, port, username, prompt_password,
						   progname, echo, false, true);

	if (!quiet)
	{
		if (stage != ANALYZE_NO_STAGE)
			printf(_("%s: processing database \"%s\": %s\n"),
				   progname, PQdb(conn), stage_messages[stage]);
		else
			printf(_("%s: vacuuming database \"%s\"\n"),
				   progname, PQdb(conn));
		fflush(stdout);
	}

	initPQExpBuffer(&sql);

	/*
	 * If a table list is not provided and we're using multiple connections,
	 * prepare the list of tables by querying the catalogs.
	 */
	if (parallel && (!tables || !tables->head))
	{
		PQExpBufferData buf;
		PGresult   *res;
		int			ntups;
		int			i;

		initPQExpBuffer(&buf);

		res = executeQuery(conn,
						   "SELECT c.relname, ns.nspname"
						   " FROM pg_class c, pg_namespace ns\n"
						   " WHERE relkind IN ("
						   CppAsString2(RELKIND_RELATION) ", "
						   CppAsString2(RELKIND_MATVIEW) ")"
						   " AND c.relnamespace = ns.oid\n"
						   " ORDER BY c.relpages DESC;",
						   progname, echo);

		ntups = PQntuples(res);
		for (i = 0; i < ntups; i++)
		{
			appendPQExpBufferStr(&buf,
								 fmtQualifiedId(PQserverVersion(conn),
												PQgetvalue(res, i, 1),
												PQgetvalue(res, i, 0)));

			simple_string_list_append(&dbtables, buf.data);
			resetPQExpBuffer(&buf);
		}

		termPQExpBuffer(&buf);
		tables = &dbtables;

		/*
		 * If there are more connections than vacuumable relations, we don't
		 * need to use them all.
		 */
		if (concurrentCons > ntups)
			concurrentCons = ntups;
		if (concurrentCons <= 1)
			parallel = false;
		PQclear(res);
	}

	/*
	 * Setup the database connections. We reuse the connection we already have
	 * for the first slot.  If not in parallel mode, the first slot in the
	 * array contains the connection.
	 */
	slots = (ParallelSlot *) pg_malloc(sizeof(ParallelSlot) * concurrentCons);
	init_slot(slots, conn, progname);
	if (parallel)
	{
		for (i = 1; i < concurrentCons; i++)
		{
			conn = connectDatabase(dbname, host, port, username, prompt_password,
								   progname, echo, false, true);
			init_slot(slots + i, conn, progname);
		}
	}

	/*
	 * Prepare all the connections to run the appropriate analyze stage, if
	 * caller requested that mode.
	 */
	if (stage != ANALYZE_NO_STAGE)
	{
		int			j;

		/* We already emitted the message above */

		for (j = 0; j < concurrentCons; j++)
			executeCommand((slots + j)->connection,
						   stage_commands[stage], progname, echo);
	}

	cell = tables ? tables->head : NULL;
	do
	{
		ParallelSlot *free_slot;
		const char *tabname = cell ? cell->val : NULL;

		prepare_vacuum_command(&sql, conn, vacopts, tabname,
							   tables == &dbtables, progname, echo);

		if (CancelRequested)
		{
			failed = true;
			goto finish;
		}

		/*
		 * Get the connection slot to use.  If in parallel mode, here we wait
		 * for one connection to become available if none already is.  In
		 * non-parallel mode we simply use the only slot we have, which we
		 * know to be free.
		 */
		if (parallel)
		{
			/*
			 * Get a free slot, waiting until one becomes free if none
			 * currently is.
			 */
			free_slot = GetIdleSlot(slots, concurrentCons, progname);
			if (!free_slot)
			{
				failed = true;
				goto finish;
			}

			free_slot->isFree = false;
		}
		else
			free_slot = slots;

		/*
		 * Execute the vacuum.  If not in parallel mode, this terminates the
		 * program in case of an error.  (The parallel case handles query
		 * errors in GetQueryResult through GetIdleSlot.)
		 */
		run_vacuum_command(free_slot->connection, sql.data,
						   echo, tabname, progname, parallel);

		if (cell)
			cell = cell->next;
	} while (cell != NULL);

	if (parallel)
	{
		int			j;

		for (j = 0; j < concurrentCons; j++)
		{
			/* wait for all connection to return the results */
			if (!GetQueryResult((slots + j)->connection, progname))
				goto finish;

			(slots + j)->isFree = true;
		}
	}

finish:
	for (i = 0; i < concurrentCons; i++)
		DisconnectDatabase(slots + i);
	pfree(slots);

	termPQExpBuffer(&sql);

	if (failed)
		exit(1);
}
Esempio n. 24
0
uint32_t v_create_node(const DS::Vault::Node& node)
{
    /* This should be plenty to store everything we need without a bunch
     * of dynamic reallocations
     */
    PostgresStrings<31> parms;
    char fieldbuf[1024];

    size_t parmcount = 0;
    char* fieldp = fieldbuf;

    #define SET_FIELD(name, value) \
        { \
            parms.set(parmcount++, value); \
            fieldp += sprintf(fieldp, "\"" #name "\","); \
        }
    int now = time(0);
    SET_FIELD(CreateTime, now);
    SET_FIELD(ModifyTime, now);
    if (node.has_CreateAgeName())
        SET_FIELD(CreateAgeName, node.m_CreateAgeName);
    if (node.has_CreateAgeUuid())
        SET_FIELD(CreateAgeUuid, node.m_CreateAgeUuid.toString());
    if (node.has_CreatorUuid())
        SET_FIELD(CreatorUuid, node.m_CreatorUuid.toString());
    if (node.has_CreatorIdx())
        SET_FIELD(CreatorIdx, node.m_CreatorIdx);
    if (node.has_NodeType())
        SET_FIELD(NodeType, node.m_NodeType);
    if (node.has_Int32_1())
        SET_FIELD(Int32_1, node.m_Int32_1);
    if (node.has_Int32_2())
        SET_FIELD(Int32_2, node.m_Int32_2);
    if (node.has_Int32_3())
        SET_FIELD(Int32_3, node.m_Int32_3);
    if (node.has_Int32_4())
        SET_FIELD(Int32_4, node.m_Int32_4);
    if (node.has_Uint32_1())
        SET_FIELD(Uint32_1, node.m_Uint32_1);
    if (node.has_Uint32_2())
        SET_FIELD(Uint32_2, node.m_Uint32_2);
    if (node.has_Uint32_3())
        SET_FIELD(Uint32_3, node.m_Uint32_3);
    if (node.has_Uint32_4())
        SET_FIELD(Uint32_4, node.m_Uint32_4);
    if (node.has_Uuid_1())
        SET_FIELD(Uuid_1, node.m_Uuid_1.toString());
    if (node.has_Uuid_2())
        SET_FIELD(Uuid_2, node.m_Uuid_2.toString());
    if (node.has_Uuid_3())
        SET_FIELD(Uuid_3, node.m_Uuid_3.toString());
    if (node.has_Uuid_4())
        SET_FIELD(Uuid_4, node.m_Uuid_4.toString());
    if (node.has_String64_1())
        SET_FIELD(String64_1, node.m_String64_1);
    if (node.has_String64_2())
        SET_FIELD(String64_2, node.m_String64_2);
    if (node.has_String64_3())
        SET_FIELD(String64_3, node.m_String64_3);
    if (node.has_String64_4())
        SET_FIELD(String64_4, node.m_String64_4);
    if (node.has_String64_5())
        SET_FIELD(String64_5, node.m_String64_5);
    if (node.has_String64_6())
        SET_FIELD(String64_6, node.m_String64_6);
    if (node.has_IString64_1())
        SET_FIELD(IString64_1, node.m_IString64_1);
    if (node.has_IString64_2())
        SET_FIELD(IString64_2, node.m_IString64_2);
    if (node.has_Text_1())
        SET_FIELD(Text_1, node.m_Text_1);
    if (node.has_Text_2())
        SET_FIELD(Text_2, node.m_Text_2);
    if (node.has_Blob_1())
        SET_FIELD(Blob_1, DS::Base64Encode(node.m_Blob_1.buffer(), node.m_Blob_1.size()));
    if (node.has_Blob_2())
        SET_FIELD(Blob_2, DS::Base64Encode(node.m_Blob_2.buffer(), node.m_Blob_2.size()));
    #undef SET_FIELD

    DS_DASSERT(fieldp - fieldbuf < 1024);
    *(fieldp - 1) = ')';    // Get rid of the last comma
    DS::String queryStr = "INSERT INTO vault.\"Nodes\" (";
    queryStr += fieldbuf;

    fieldp = fieldbuf;
    for (size_t i=0; i<parmcount; ++i) {
        sprintf(fieldp, "$%Zu,", i+1);
        fieldp += strlen(fieldp);
    }
    DS_DASSERT(fieldp - fieldbuf < 1024);
    *(fieldp - 1) = ')';    // Get rid of the last comma
    queryStr += "\n    VALUES (";
    queryStr += fieldbuf;
    queryStr += "\n    RETURNING idx";

    check_postgres();
    PGresult* result = PQexecParams(s_postgres, queryStr.c_str(),
                                    parmcount, 0, parms.m_values, 0, 0, 0);
    if (PQresultStatus(result) != PGRES_TUPLES_OK) {
        fprintf(stderr, "%s:%d:\n    Postgres INSERT error: %s\n",
                __FILE__, __LINE__, PQerrorMessage(s_postgres));
        PQclear(result);
        return 0;
    }
    DS_DASSERT(PQntuples(result) == 1);
    uint32_t idx = strtoul(PQgetvalue(result, 0, 0), 0, 10);
    PQclear(result);
    return idx;
}
Esempio n. 25
0
/**********************************
 * pg_result
 get information about the results of a query

 syntax:

	pg_result result ?option?

 the options are:

	-status		the status of the result

	-error		the error message, if the status indicates error; otherwise
				an empty string

	-conn		the connection that produced the result

	-oid		if command was an INSERT, the OID of the inserted tuple

	-numTuples	the number of tuples in the query

	-cmdTuples	the number of tuples affected by the query

	-numAttrs	returns the number of attributes returned by the query

	-assign arrayName
				assign the results to an array, using subscripts of the form
				(tupno,attributeName)

	-assignbyidx arrayName ?appendstr?
				assign the results to an array using the first field's value
				as a key.
				All but the first field of each tuple are stored, using
				subscripts of the form (field0value,attributeNameappendstr)

	-getTuple tupleNumber
				returns the values of the tuple in a list

	-tupleArray tupleNumber arrayName
				stores the values of the tuple in array arrayName, indexed
				by the attributes returned

	-attributes
				returns a list of the name/type pairs of the tuple attributes

	-lAttributes
				returns a list of the {name type len} entries of the tuple
				attributes

	-clear		clear the result buffer. Do not reuse after this

 **********************************/
int
Pg_result(ClientData cData, Tcl_Interp *interp, int argc, CONST84 char *argv[])
{
	PGresult   *result;
	const char *opt;
	int			i;
	int			tupno;
	CONST84 char *arrVar;
	char		nameBuffer[256];
	const char *appendstr;

	if (argc < 3 || argc > 5)
	{
		Tcl_AppendResult(interp, "Wrong # of arguments\n", 0);
		goto Pg_result_errReturn;		/* append help info */
	}

	result = PgGetResultId(interp, argv[1]);
	if (result == (PGresult *) NULL)
	{
		Tcl_AppendResult(interp, "\n",
						 argv[1], " is not a valid query result", 0);
		return TCL_ERROR;
	}

	opt = argv[2];

	if (strcmp(opt, "-status") == 0)
	{
		Tcl_AppendResult(interp, PQresStatus(PQresultStatus(result)), 0);
		return TCL_OK;
	}
	else if (strcmp(opt, "-error") == 0)
	{
		Tcl_SetResult(interp, (char *) PQresultErrorMessage(result),
					  TCL_STATIC);
		return TCL_OK;
	}
	else if (strcmp(opt, "-conn") == 0)
		return PgGetConnByResultId(interp, argv[1]);
	else if (strcmp(opt, "-oid") == 0)
	{
		sprintf(interp->result, "%u", PQoidValue(result));
		return TCL_OK;
	}
	else if (strcmp(opt, "-clear") == 0)
	{
		PgDelResultId(interp, argv[1]);
		PQclear(result);
		return TCL_OK;
	}
	else if (strcmp(opt, "-numTuples") == 0)
	{
		sprintf(interp->result, "%d", PQntuples(result));
		return TCL_OK;
	}
	else if (strcmp(opt, "-cmdTuples") == 0)
	{
		sprintf(interp->result, "%s", PQcmdTuples(result));
		return TCL_OK;
	}
	else if (strcmp(opt, "-numAttrs") == 0)
	{
		sprintf(interp->result, "%d", PQnfields(result));
		return TCL_OK;
	}
	else if (strcmp(opt, "-assign") == 0)
	{
		if (argc != 4)
		{
			Tcl_AppendResult(interp, "-assign option must be followed by a variable name", 0);
			return TCL_ERROR;
		}
		arrVar = argv[3];

		/*
		 * this assignment assigns the table of result tuples into a giant
		 * array with the name given in the argument. The indices of the
		 * array are of the form (tupno,attrName). Note we expect field
		 * names not to exceed a few dozen characters, so truncating to
		 * prevent buffer overflow shouldn't be a problem.
		 */
		for (tupno = 0; tupno < PQntuples(result); tupno++)
		{
			for (i = 0; i < PQnfields(result); i++)
			{
				sprintf(nameBuffer, "%d,%.200s", tupno, PQfname(result, i));
				if (Tcl_SetVar2(interp, arrVar, nameBuffer,
#ifdef TCL_ARRAYS
								tcl_value(PQgetvalue(result, tupno, i)),
#else
								PQgetvalue(result, tupno, i),
#endif
								TCL_LEAVE_ERR_MSG) == NULL)
					return TCL_ERROR;
			}
		}
		Tcl_AppendResult(interp, arrVar, 0);
		return TCL_OK;
	}
	else if (strcmp(opt, "-assignbyidx") == 0)
	{
		if (argc != 4 && argc != 5)
		{
			Tcl_AppendResult(interp, "-assignbyidx option requires an array name and optionally an append string", 0);
			return TCL_ERROR;
		}
		arrVar = argv[3];
		appendstr = (argc == 5) ? (const char *) argv[4] : "";

		/*
		 * this assignment assigns the table of result tuples into a giant
		 * array with the name given in the argument.  The indices of the
		 * array are of the form (field0Value,attrNameappendstr). Here, we
		 * still assume PQfname won't exceed 200 characters, but we dare
		 * not make the same assumption about the data in field 0 nor the
		 * append string.
		 */
		for (tupno = 0; tupno < PQntuples(result); tupno++)
		{
			const char *field0 =
#ifdef TCL_ARRAYS
			tcl_value(PQgetvalue(result, tupno, 0));

#else
			PQgetvalue(result, tupno, 0);
#endif
			char	   *workspace = malloc(strlen(field0) + strlen(appendstr) + 210);

			for (i = 1; i < PQnfields(result); i++)
			{
				sprintf(workspace, "%s,%.200s%s", field0, PQfname(result, i),
						appendstr);
				if (Tcl_SetVar2(interp, arrVar, workspace,
#ifdef TCL_ARRAYS
								tcl_value(PQgetvalue(result, tupno, i)),
#else
								PQgetvalue(result, tupno, i),
#endif
								TCL_LEAVE_ERR_MSG) == NULL)
				{
					free(workspace);
					return TCL_ERROR;
				}
			}
			free(workspace);
		}
		Tcl_AppendResult(interp, arrVar, 0);
		return TCL_OK;
	}
	else if (strcmp(opt, "-getTuple") == 0)
	{
		if (argc != 4)
		{
			Tcl_AppendResult(interp, "-getTuple option must be followed by a tuple number", 0);
			return TCL_ERROR;
		}
		tupno = atoi(argv[3]);
		if (tupno < 0 || tupno >= PQntuples(result))
		{
			Tcl_AppendResult(interp, "argument to getTuple cannot exceed number of tuples - 1", 0);
			return TCL_ERROR;
		}
#ifdef TCL_ARRAYS
		for (i = 0; i < PQnfields(result); i++)
			Tcl_AppendElement(interp, tcl_value(PQgetvalue(result, tupno, i)));
#else
		for (i = 0; i < PQnfields(result); i++)
			Tcl_AppendElement(interp, PQgetvalue(result, tupno, i));
#endif
		return TCL_OK;
	}
	else if (strcmp(opt, "-tupleArray") == 0)
	{
		if (argc != 5)
		{
			Tcl_AppendResult(interp, "-tupleArray option must be followed by a tuple number and array name", 0);
			return TCL_ERROR;
		}
		tupno = atoi(argv[3]);
		if (tupno < 0 || tupno >= PQntuples(result))
		{
			Tcl_AppendResult(interp, "argument to tupleArray cannot exceed number of tuples - 1", 0);
			return TCL_ERROR;
		}
		for (i = 0; i < PQnfields(result); i++)
		{
			if (Tcl_SetVar2(interp, argv[4], PQfname(result, i),
#ifdef TCL_ARRAYS
							tcl_value(PQgetvalue(result, tupno, i)),
#else
							PQgetvalue(result, tupno, i),
#endif
							TCL_LEAVE_ERR_MSG) == NULL)
				return TCL_ERROR;
		}
		return TCL_OK;
	}
	else if (strcmp(opt, "-attributes") == 0)
	{
		for (i = 0; i < PQnfields(result); i++)
			Tcl_AppendElement(interp, PQfname(result, i));
		return TCL_OK;
	}
	else if (strcmp(opt, "-lAttributes") == 0)
	{
		for (i = 0; i < PQnfields(result); i++)
		{
			/* start a sublist */
			if (i > 0)
				Tcl_AppendResult(interp, " {", 0);
			else
				Tcl_AppendResult(interp, "{", 0);
			Tcl_AppendElement(interp, PQfname(result, i));
			sprintf(nameBuffer, "%ld", (long) PQftype(result, i));
			Tcl_AppendElement(interp, nameBuffer);
			sprintf(nameBuffer, "%ld", (long) PQfsize(result, i));
			Tcl_AppendElement(interp, nameBuffer);
			/* end the sublist */
			Tcl_AppendResult(interp, "}", 0);
		}
		return TCL_OK;
	}
	else
	{
		Tcl_AppendResult(interp, "Invalid option\n", 0);
		goto Pg_result_errReturn;		/* append help info */
	}


Pg_result_errReturn:
	Tcl_AppendResult(interp,
					 "pg_result result ?option? where option is\n",
					 "\t-status\n",
					 "\t-error\n",
					 "\t-conn\n",
					 "\t-oid\n",
					 "\t-numTuples\n",
					 "\t-cmdTuples\n",
					 "\t-numAttrs\n"
					 "\t-assign arrayVarName\n",
					 "\t-assignbyidx arrayVarName ?appendstr?\n",
					 "\t-getTuple tupleNumber\n",
					 "\t-tupleArray tupleNumber arrayVarName\n",
					 "\t-attributes\n"
					 "\t-lAttributes\n"
					 "\t-clear\n",
					 (char *) 0);
	return TCL_ERROR;


}
Esempio n. 26
0
DS::Vault::Node v_fetch_node(uint32_t nodeIdx)
{
    PostgresStrings<1> parm;
    parm.set(0, nodeIdx);
    PGresult* result = PQexecParams(s_postgres,
        "SELECT idx, \"CreateTime\", \"ModifyTime\", \"CreateAgeName\","
        "    \"CreateAgeUuid\", \"CreatorUuid\", \"CreatorIdx\", \"NodeType\","
        "    \"Int32_1\", \"Int32_2\", \"Int32_3\", \"Int32_4\","
        "    \"Uint32_1\", \"Uint32_2\", \"Uint32_3\", \"Uint32_4\","
        "    \"Uuid_1\", \"Uuid_2\", \"Uuid_3\", \"Uuid_4\","
        "    \"String64_1\", \"String64_2\", \"String64_3\", \"String64_4\","
        "    \"String64_5\", \"String64_6\", \"IString64_1\", \"IString64_2\","
        "    \"Text_1\", \"Text_2\", \"Blob_1\", \"Blob_2\""
        "    FROM vault.\"Nodes\" WHERE idx=$1",
        1, 0, parm.m_values, 0, 0, 0);
    if (PQresultStatus(result) != PGRES_TUPLES_OK) {
        fprintf(stderr, "%s:%d:\n    Postgres SELECT error: %s\n",
                __FILE__, __LINE__, PQerrorMessage(s_postgres));
        PQclear(result);
        return DS::Vault::Node();
    }
    if (PQntuples(result) == 0) {
        PQclear(result);
        return DS::Vault::Node();
    }
    DS_DASSERT(PQntuples(result) == 1);

    DS::Vault::Node node;
    node.set_NodeIdx(strtoul(PQgetvalue(result, 0, 0), 0, 10));
    node.set_CreateTime(strtoul(PQgetvalue(result, 0, 1), 0, 10));
    node.set_ModifyTime(strtoul(PQgetvalue(result, 0, 2), 0, 10));
    if (!PQgetisnull(result, 0, 3))
        node.set_CreateAgeName(PQgetvalue(result, 0, 3));
    if (!PQgetisnull(result, 0, 4))
        node.set_CreateAgeUuid(PQgetvalue(result, 0, 4));
    if (!PQgetisnull(result, 0, 5))
        node.set_CreatorUuid(PQgetvalue(result, 0, 5));
    if (!PQgetisnull(result, 0, 6))
        node.set_CreatorIdx(strtoul(PQgetvalue(result, 0, 6), 0, 10));
    node.set_NodeType(strtoul(PQgetvalue(result, 0, 7), 0, 10));
    if (!PQgetisnull(result, 0, 8))
        node.set_Int32_1(strtol(PQgetvalue(result, 0, 8), 0, 10));
    if (!PQgetisnull(result, 0, 9))
        node.set_Int32_2(strtol(PQgetvalue(result, 0, 9), 0, 10));
    if (!PQgetisnull(result, 0, 10))
        node.set_Int32_3(strtol(PQgetvalue(result, 0, 10), 0, 10));
    if (!PQgetisnull(result, 0, 11))
        node.set_Int32_4(strtol(PQgetvalue(result, 0, 11), 0, 10));
    if (!PQgetisnull(result, 0, 12))
        node.set_Uint32_1(strtoul(PQgetvalue(result, 0, 12), 0, 10));
    if (!PQgetisnull(result, 0, 13))
        node.set_Uint32_2(strtoul(PQgetvalue(result, 0, 13), 0, 10));
    if (!PQgetisnull(result, 0, 14))
        node.set_Uint32_3(strtoul(PQgetvalue(result, 0, 14), 0, 10));
    if (!PQgetisnull(result, 0, 15))
        node.set_Uint32_4(strtoul(PQgetvalue(result, 0, 15), 0, 10));
    if (!PQgetisnull(result, 0, 16))
        node.set_Uuid_1(PQgetvalue(result, 0, 16));
    if (!PQgetisnull(result, 0, 17))
        node.set_Uuid_2(PQgetvalue(result, 0, 17));
    if (!PQgetisnull(result, 0, 18))
        node.set_Uuid_3(PQgetvalue(result, 0, 18));
    if (!PQgetisnull(result, 0, 19))
        node.set_Uuid_4(PQgetvalue(result, 0, 19));
    if (!PQgetisnull(result, 0, 20))
        node.set_String64_1(PQgetvalue(result, 0, 20));
    if (!PQgetisnull(result, 0, 21))
        node.set_String64_2(PQgetvalue(result, 0, 21));
    if (!PQgetisnull(result, 0, 22))
        node.set_String64_3(PQgetvalue(result, 0, 22));
    if (!PQgetisnull(result, 0, 23))
        node.set_String64_4(PQgetvalue(result, 0, 23));
    if (!PQgetisnull(result, 0, 24))
        node.set_String64_5(PQgetvalue(result, 0, 24));
    if (!PQgetisnull(result, 0, 25))
        node.set_String64_6(PQgetvalue(result, 0, 25));
    if (!PQgetisnull(result, 0, 26))
        node.set_IString64_1(PQgetvalue(result, 0, 26));
    if (!PQgetisnull(result, 0, 27))
        node.set_IString64_2(PQgetvalue(result, 0, 27));
    if (!PQgetisnull(result, 0, 28))
        node.set_Text_1(PQgetvalue(result, 0, 28));
    if (!PQgetisnull(result, 0, 29))
        node.set_Text_2(PQgetvalue(result, 0, 29));
    if (!PQgetisnull(result, 0, 30))
        node.set_Blob_1(DS::Base64Decode(PQgetvalue(result, 0, 30)));
    if (!PQgetisnull(result, 0, 31))
        node.set_Blob_2(DS::Base64Decode(PQgetvalue(result, 0, 31)));

    PQclear(result);
    return node;
}
Esempio n. 27
0
/*
 * old_9_6_check_for_unknown_data_type_usage()
 *	9.6 -> 10
 *	It's no longer allowed to create tables or views with "unknown"-type
 *	columns.  We do not complain about views with such columns, because
 *	they should get silently converted to "text" columns during the DDL
 *	dump and reload; it seems unlikely to be worth making users do that
 *	by hand.  However, if there's a table with such a column, the DDL
 *	reload will fail, so we should pre-detect that rather than failing
 *	mid-upgrade.  Worse, if there's a matview with such a column, the
 *	DDL reload will silently change it to "text" which won't match the
 *	on-disk storage (which is like "cstring").  So we *must* reject that.
 *	Also check composite types, in case they are used for table columns.
 *	We needn't check indexes, because "unknown" has no opclasses.
 */
void
old_9_6_check_for_unknown_data_type_usage(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for invalid \"unknown\" user columns");

	snprintf(output_path, sizeof(output_path), "tables_using_unknown.txt");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.oid = a.attrelid AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.unknown'::pg_catalog.regtype AND "
								"		c.relkind IN ("
								CppAsString2(RELKIND_RELATION) ", "
								CppAsString2(RELKIND_COMPOSITE_TYPE) ", "
								CppAsString2(RELKIND_MATVIEW) ") AND "
								"		c.relnamespace = n.oid AND "
		/* exclude possible orphaned temp tables */
								"		n.nspname !~ '^pg_temp_' AND "
								"		n.nspname !~ '^pg_toast_temp_' AND "
								"		n.nspname NOT IN ('pg_catalog', 'information_schema')");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
				pg_fatal("could not open file \"%s\": %s\n", output_path,
						 strerror(errno));
			if (!db_used)
			{
				fprintf(script, "Database: %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_relname),
					PQgetvalue(res, rowno, i_attname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_fatal("Your installation contains the \"unknown\" data type in user tables.  This\n"
				 "data type is no longer allowed in tables, so this cluster cannot currently\n"
				 "be upgraded.  You can remove the problem tables and restart the upgrade.\n"
				 "A list of the problem columns is in the file:\n"
				 "    %s\n\n", output_path);
	}
	else
		check_ok();
}
Esempio n. 28
0
/*
 * Receive a tar format file from the connection to the server, and write
 * the data from this file directly into a tar file. If compression is
 * enabled, the data will be compressed while written to the file.
 *
 * The file will be named base.tar[.gz] if it's for the main data directory
 * or <tablespaceoid>.tar[.gz] if it's for another tablespace.
 *
 * No attempt to inspect or validate the contents of the file is done.
 */
static void
ReceiveTarFile(PGconn *conn, PGresult *res, int rownum)
{
	char		filename[MAXPGPATH];
	char	   *copybuf = NULL;
	FILE	   *tarfile = NULL;

#ifdef HAVE_LIBZ
	gzFile		ztarfile = NULL;
#endif

	if (PQgetisnull(res, rownum, 0))
	{
		/*
		 * Base tablespaces
		 */
		if (strcmp(basedir, "-") == 0)
		{
#ifdef HAVE_LIBZ
			if (compresslevel != 0)
			{
				ztarfile = gzdopen(dup(fileno(stdout)), "wb");
				if (gzsetparams(ztarfile, compresslevel,
								Z_DEFAULT_STRATEGY) != Z_OK)
				{
					fprintf(stderr,
							_("%s: could not set compression level %d: %s\n"),
							progname, compresslevel, get_gz_error(ztarfile));
					disconnect_and_exit(1);
				}
			}
			else
#endif
				tarfile = stdout;
		}
		else
		{
#ifdef HAVE_LIBZ
			if (compresslevel != 0)
			{
				snprintf(filename, sizeof(filename), "%s/base.tar.gz", basedir);
				ztarfile = gzopen(filename, "wb");
				if (gzsetparams(ztarfile, compresslevel,
								Z_DEFAULT_STRATEGY) != Z_OK)
				{
					fprintf(stderr,
							_("%s: could not set compression level %d: %s\n"),
							progname, compresslevel, get_gz_error(ztarfile));
					disconnect_and_exit(1);
				}
			}
			else
#endif
			{
				snprintf(filename, sizeof(filename), "%s/base.tar", basedir);
				tarfile = fopen(filename, "wb");
			}
		}
	}
	else
	{
		/*
		 * Specific tablespace
		 */
#ifdef HAVE_LIBZ
		if (compresslevel != 0)
		{
			snprintf(filename, sizeof(filename), "%s/%s.tar.gz", basedir,
					 PQgetvalue(res, rownum, 0));
			ztarfile = gzopen(filename, "wb");
			if (gzsetparams(ztarfile, compresslevel,
							Z_DEFAULT_STRATEGY) != Z_OK)
			{
				fprintf(stderr,
						_("%s: could not set compression level %d: %s\n"),
						progname, compresslevel, get_gz_error(ztarfile));
				disconnect_and_exit(1);
			}
		}
		else
#endif
		{
			snprintf(filename, sizeof(filename), "%s/%s.tar", basedir,
					 PQgetvalue(res, rownum, 0));
			tarfile = fopen(filename, "wb");
		}
	}

#ifdef HAVE_LIBZ
	if (compresslevel != 0)
	{
		if (!ztarfile)
		{
			/* Compression is in use */
			fprintf(stderr,
					_("%s: could not create compressed file \"%s\": %s\n"),
					progname, filename, get_gz_error(ztarfile));
			disconnect_and_exit(1);
		}
	}
	else
#endif
	{
		/* Either no zlib support, or zlib support but compresslevel = 0 */
		if (!tarfile)
		{
			fprintf(stderr, _("%s: could not create file \"%s\": %s\n"),
					progname, filename, strerror(errno));
			disconnect_and_exit(1);
		}
	}

	/*
	 * Get the COPY data stream
	 */
	res = PQgetResult(conn);
	if (PQresultStatus(res) != PGRES_COPY_OUT)
	{
		fprintf(stderr, _("%s: could not get COPY data stream: %s"),
				progname, PQerrorMessage(conn));
		disconnect_and_exit(1);
	}

	while (1)
	{
		int			r;

		if (copybuf != NULL)
		{
			PQfreemem(copybuf);
			copybuf = NULL;
		}

		r = PQgetCopyData(conn, &copybuf, 0);
		if (r == -1)
		{
			/*
			 * End of chunk. Close file (but not stdout).
			 *
			 * Also, write two completely empty blocks at the end of the tar
			 * file, as required by some tar programs.
			 */
			char		zerobuf[1024];

			MemSet(zerobuf, 0, sizeof(zerobuf));
#ifdef HAVE_LIBZ
			if (ztarfile != NULL)
			{
				if (gzwrite(ztarfile, zerobuf, sizeof(zerobuf)) !=
					sizeof(zerobuf))
				{
					fprintf(stderr,
					_("%s: could not write to compressed file \"%s\": %s\n"),
							progname, filename, get_gz_error(ztarfile));
					disconnect_and_exit(1);
				}
			}
			else
#endif
			{
				if (fwrite(zerobuf, sizeof(zerobuf), 1, tarfile) != 1)
				{
					fprintf(stderr,
							_("%s: could not write to file \"%s\": %s\n"),
							progname, filename, strerror(errno));
					disconnect_and_exit(1);
				}
			}

#ifdef HAVE_LIBZ
			if (ztarfile != NULL)
			{
				if (gzclose(ztarfile) != 0)
				{
					fprintf(stderr,
					   _("%s: could not close compressed file \"%s\": %s\n"),
							progname, filename, get_gz_error(ztarfile));
					disconnect_and_exit(1);
				}
			}
			else
#endif
			{
				if (strcmp(basedir, "-") != 0)
				{
					if (fclose(tarfile) != 0)
					{
						fprintf(stderr,
								_("%s: could not close file \"%s\": %s\n"),
								progname, filename, strerror(errno));
						disconnect_and_exit(1);
					}
				}
			}

			break;
		}
		else if (r == -2)
		{
			fprintf(stderr, _("%s: could not read COPY data: %s"),
					progname, PQerrorMessage(conn));
			disconnect_and_exit(1);
		}

#ifdef HAVE_LIBZ
		if (ztarfile != NULL)
		{
			if (gzwrite(ztarfile, copybuf, r) != r)
			{
				fprintf(stderr,
					_("%s: could not write to compressed file \"%s\": %s\n"),
						progname, filename, get_gz_error(ztarfile));
				disconnect_and_exit(1);
			}
		}
		else
#endif
		{
			if (fwrite(copybuf, r, 1, tarfile) != 1)
			{
				fprintf(stderr, _("%s: could not write to file \"%s\": %s\n"),
						progname, filename, strerror(errno));
				disconnect_and_exit(1);
			}
		}
		totaldone += r;
		if (showprogress)
			progress_report(rownum, filename);
	}							/* while (1) */

	if (copybuf != NULL)
		PQfreemem(copybuf);
}
Esempio n. 29
0
/*
 * old_9_6_invalidate_hash_indexes()
 *	9.6 -> 10
 *	Hash index binary format has changed from 9.6->10.0
 */
void
old_9_6_invalidate_hash_indexes(ClusterInfo *cluster, bool check_mode)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char	   *output_path = "reindex_hash.sql";

	prep_status("Checking for hash indexes");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* find hash indexes */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_index i, "
								"		pg_catalog.pg_am a, "
								"		pg_catalog.pg_namespace n "
								"WHERE	i.indexrelid = c.oid AND "
								"		c.relam = a.oid AND "
								"		c.relnamespace = n.oid AND "
								"		a.amname = 'hash'"
			);

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (!check_mode)
			{
				if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
					pg_fatal("could not open file \"%s\": %s\n", output_path,
							 strerror(errno));
				if (!db_used)
				{
					PQExpBufferData connectbuf;

					initPQExpBuffer(&connectbuf);
					appendPsqlMetaConnect(&connectbuf, active_db->db_name);
					fputs(connectbuf.data, script);
					termPQExpBuffer(&connectbuf);
					db_used = true;
				}
				fprintf(script, "REINDEX INDEX %s.%s;\n",
						quote_identifier(PQgetvalue(res, rowno, i_nspname)),
						quote_identifier(PQgetvalue(res, rowno, i_relname)));
			}
		}

		PQclear(res);

		if (!check_mode && db_used)
		{
			/* mark hash indexes as invalid */
			PQclear(executeQueryOrDie(conn,
									  "UPDATE pg_catalog.pg_index i "
									  "SET	indisvalid = false "
									  "FROM	pg_catalog.pg_class c, "
									  "		pg_catalog.pg_am a, "
									  "		pg_catalog.pg_namespace n "
									  "WHERE	i.indexrelid = c.oid AND "
									  "		c.relam = a.oid AND "
									  "		c.relnamespace = n.oid AND "
									  "		a.amname = 'hash'"));
		}

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		report_status(PG_WARNING, "warning");
		if (check_mode)
			pg_log(PG_WARNING, "\n"
				   "Your installation contains hash indexes.  These indexes have different\n"
				   "internal formats between your old and new clusters, so they must be\n"
				   "reindexed with the REINDEX command.  After upgrading, you will be given\n"
				   "REINDEX instructions.\n\n");
		else
			pg_log(PG_WARNING, "\n"
				   "Your installation contains hash indexes.  These indexes have different\n"
				   "internal formats between your old and new clusters, so they must be\n"
				   "reindexed with the REINDEX command.  The file\n"
				   "    %s\n"
				   "when executed by psql by the database superuser will recreate all invalid\n"
				   "indexes; until then, none of these indexes will be used.\n\n",
				   output_path);
	}
	else
		check_ok();
}
Esempio n. 30
0
static void
WitnessMonitor(void)
{
	char monitor_witness_timestamp[MAXLEN];
	PGresult	*res;

	/*
	 * Check if the master is still available, if after 5 minutes of retries
	 * we cannot reconnect, return false.
	 */
	CheckPrimaryConnection(); // this take up to NUM_RETRY * SLEEP_RETRY seconds

	if (PQstatus(primaryConn) != CONNECTION_OK)
	{
		/*
		 * If we can't reconnect, just exit...
		 * XXX we need to make witness connect to the new master
		 */
		PQfinish(myLocalConn);
		exit(0);
	}

	/*
	 * first check if there is a command being executed,
	 * and if that is the case, cancel the query so i can
	 * insert the current record
	 */
	if (PQisBusy(primaryConn) == 1)
		CancelQuery();

	/* Get local xlog info */
	sqlquery_snprintf(sqlquery, "SELECT CURRENT_TIMESTAMP ");

	res = PQexec(myLocalConn, sqlquery);
	if (PQresultStatus(res) != PGRES_TUPLES_OK)
	{
		log_err(_("PQexec failed: %s\n"), PQerrorMessage(myLocalConn));
		PQclear(res);
		/* if there is any error just let it be and retry in next loop */
		return;
	}

	strcpy(monitor_witness_timestamp, PQgetvalue(res, 0, 0));
	PQclear(res);

	/*
	 * Build the SQL to execute on primary
	 */
	sqlquery_snprintf(sqlquery,
	                  "INSERT INTO %s.repl_monitor "
	                  "VALUES(%d, %d, '%s'::timestamp with time zone, "
	                  " pg_current_xlog_location(), null,  "
	                  " 0, 0)",
	                  repmgr_schema, primary_options.node, local_options.node, monitor_witness_timestamp);

	/*
	 * Execute the query asynchronously, but don't check for a result. We
	 * will check the result next time we pause for a monitor step.
	 */
	if (PQsendQuery(primaryConn, sqlquery) == 0)
		log_warning(_("Query could not be sent to primary. %s\n"),
		            PQerrorMessage(primaryConn));
}