PGresult * do_postgres_cCommand_execute_async(VALUE self, VALUE connection, PGconn *db, VALUE query) { PGresult *response; char* str = StringValuePtr(query); while ((response = PQgetResult(db))) { PQclear(response); } struct timeval start; int retval; gettimeofday(&start, NULL); retval = PQsendQuery(db, str); if (!retval) { if (PQstatus(db) != CONNECTION_OK) { PQreset(db); if (PQstatus(db) == CONNECTION_OK) { retval = PQsendQuery(db, str); } else { do_postgres_full_connect(connection, db); retval = PQsendQuery(db, str); } } if (!retval) { rb_raise(eDO_ConnectionError, "%s", PQerrorMessage(db)); } } int socket_fd = PQsocket(db); fd_set rset; while (1) { FD_ZERO(&rset); FD_SET(socket_fd, &rset); retval = rb_thread_select(socket_fd + 1, &rset, NULL, NULL, NULL); if (retval < 0) { rb_sys_fail(0); } if (retval == 0) { continue; } if (PQconsumeInput(db) == 0) { rb_raise(eDO_ConnectionError, "%s", PQerrorMessage(db)); } if (PQisBusy(db) == 0) { break; } } data_objects_debug(connection, query, &start); return PQgetResult(db); }
void finishHim(PGconn *conn) { PGresult *res; if(PQputCopyEnd(conn, NULL)) { //std::cerr << std::endl << "Copy End..." << std::endl; } while((res = PQgetResult(conn)) != NULL) { //std::cerr << "\r" << "Waiting for Copy to finish"; } //std::cerr << "\r" << "Copy finished " << std:: endl; PQendcopy(conn); while((res = PQgetResult(conn)) != NULL) { //std::cerr << "\r" << "Waiting for Server to Sync"; } //std::cerr << "Sync Done" << std:: endl; res = PQexec(conn, "END;"); if (PQresultStatus(res) != PGRES_COMMAND_OK) { std::cerr << "COMMAND END failded: "; std::cerr << PQerrorMessage(conn) << std::endl; PQclear(res); PQfinish(conn); return; } PQfinish(conn); }
static void pq_event(evutil_socket_t fd, short event, void *arg) { struct connection_struct* database = (struct connection_struct*) arg; if (database->queries) { if (database->queries->sent == 0) { PQsendQuery(database->conn, database->queries->query); database->queries->sent = 1; } if (PQconsumeInput(database->conn) && !PQisBusy(database->conn)) { PGresult* res = PQgetResult(database->conn); while (res) { if (database->queries->callback) database->queries->callback(res, database->queries->context, database->queries->query); if (database->report_errors && PQresultStatus(res) != PGRES_COMMAND_OK) fprintf(stderr, "Query: '%s' returned error\n\t%s\n", database->queries->query, PQresultErrorMessage(res)); PQclear(res); res = PQgetResult(database->conn); } database->query_count--; struct query_struct* old = database->queries; database->queries = database->queries->next; free(old->query); free(old); pq_event(fd, event, arg); } } }
/* * ClearRemainingResults reads result objects from the connection until we get * null, and clears these results. This is the last step in completing an async * query. */ static void ClearRemainingResults(PGconn *connection) { PGresult *result = PQgetResult(connection); while (result != NULL) { PQclear(result); result = PQgetResult(connection); } }
void output_gazetteer_t::stop_copy(void) { PGresult *res; /* Do we have a copy active? */ if (!CopyActive) return; /* Terminate the copy */ if (PQputCopyEnd(Connection, NULL) != 1) { fprintf(stderr, "COPY_END for place failed: %s\n", PQerrorMessage(Connection)); util::exit_nicely(); } /* Check the result */ res = PQgetResult(Connection); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "COPY_END for place failed: %s\n", PQerrorMessage(Connection)); PQclear(res); util::exit_nicely(); } /* Discard the result */ PQclear(res); /* We no longer have an active copy */ CopyActive = 0; return; }
/* load rows directly from network buffer */ static void exec_query_zero_copy(struct Context *ctx, const char *q) { PGconn *db = ctx->db; PGresult *r; ExecStatusType s; PGdataValue *cols; ctx->count = 0; if (!PQsendQuery(db, q)) die(db, "PQsendQuery"); if (!PQsetSingleRowMode(db)) die(NULL, "PQsetSingleRowMode"); /* loop until all resultset is done */ while (PQgetRowData(db, &r, &cols)) { proc_row_zcopy(ctx, r, cols); } /* get final result */ r = PQgetResult(db); s = PQresultStatus(r); switch (s) { case PGRES_TUPLES_OK: //printf("query successful, got %d rows\n", ctx->count); ctx->count = 0; break; default: printf("result: %s\n", PQresStatus(s)); break; } PQclear(r); }
/* Reads the next result row from the snapshot query, parses and processes it. * Blocks until a new row is available, if necessary. */ int snapshot_poll(client_context_t context) { int err = 0; PGresult *res = PQgetResult(context->sql_conn); /* null result indicates that there are no more rows */ if (!res) { check(err, exec_sql(context, "COMMIT")); PQfinish(context->sql_conn); context->sql_conn = NULL; // Invoke the commit callback with xid==0 to indicate end of snapshot commit_txn_cb on_commit = context->repl.frame_reader->on_commit_txn; void *cb_context = context->repl.frame_reader->cb_context; if (on_commit) { check(err, on_commit(cb_context, context->repl.start_lsn, 0)); } return 0; } ExecStatusType status = PQresultStatus(res); if (status != PGRES_SINGLE_TUPLE && status != PGRES_TUPLES_OK) { client_error(context, "While reading snapshot: %s: %s", PQresStatus(PQresultStatus(res)), PQresultErrorMessage(res)); PQclear(res); return EIO; } int tuples = PQntuples(res); for (int tuple = 0; tuple < tuples; tuple++) { check(err, snapshot_tuple(context, res, tuple)); } PQclear(res); return err; }
PGconn * pgpool_getfreeconn(void) { assert(initialized); int id = last_used; while (1) { id = (id+1) % poolsize; PGconn *c = pool[id]; assert(c); PGresult *res; int rc; // can we get state AND is the server // not blocking and any results? while ((rc=PQconsumeInput(c)) && !PQisBusy(c) && (res=PQgetResult(c))) { // we have results. we need to clear those resultsets if (reshandler(c, res)) { return NULL; } } if (!rc) { log_fatal("pgpool", "Error in PQconsumeInput. %s", PQerrorMessage(c)); return NULL; } else if (!PQisBusy(c)) { last_used = id; return c; } if (id == last_used) { usleep(150000); } } }
PGresult * do_postgres_cCommand_execute_sync(VALUE self, VALUE connection, PGconn *db, VALUE query) { char *str = StringValuePtr(query); PGresult *response; while ((response = PQgetResult(db))) { PQclear(response); } struct timeval start; gettimeofday(&start, NULL); response = PQexec(db, str); if (!response) { if (PQstatus(db) != CONNECTION_OK) { PQreset(db); if (PQstatus(db) == CONNECTION_OK) { response = PQexec(db, str); } else { do_postgres_full_connect(connection, db); response = PQexec(db, str); } } if(!response) { rb_raise(eDO_ConnectionError, PQerrorMessage(db)); } } data_objects_debug(connection, query, &start); return response; }
/* * GetQueryResult * * Process the query result. Returns true if there's no error, false * otherwise -- but errors about trying to vacuum a missing relation are * reported and subsequently ignored. */ static bool GetQueryResult(PGconn *conn, const char *dbname, const char *progname) { PGresult *result; SetCancelConn(conn); while ((result = PQgetResult(conn)) != NULL) { /* * If errors are found, report them. Errors about a missing table are * harmless so we continue processing; but die for other errors. */ if (PQresultStatus(result) != PGRES_COMMAND_OK) { char *sqlState = PQresultErrorField(result, PG_DIAG_SQLSTATE); fprintf(stderr, _("%s: vacuuming of database \"%s\" failed: %s"), progname, dbname, PQerrorMessage(conn)); if (sqlState && strcmp(sqlState, ERRCODE_UNDEFINED_TABLE) != 0) { PQclear(result); return false; } } PQclear(result); } ResetCancelConn(); return true; }
static void stop_error_copy(void) { PGresult *res; /* Do we have a copy active? */ if (!CopyErrorActive) return; /* Terminate the copy */ if (PQputCopyEnd(ConnectionError, NULL) != 1) { fprintf(stderr, "COPY_END for import_polygon_error failed: %s\n", PQerrorMessage(ConnectionError)); exit_nicely(); } /* Check the result */ res = PQgetResult(ConnectionError); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "COPY_END for import_polygon_error failed: %s\n", PQerrorMessage(ConnectionError)); PQclear(res); exit_nicely(); } /* Discard the result */ PQclear(res); /* We no longer have an active copy */ CopyErrorActive = 0; return; }
/* * Send a query and wait for the results by using the asynchronous libpq * functions and the backend version of select(). * * We must not use the regular blocking libpq functions like PQexec() * since they are uninterruptible by signals on some platforms, such as * Windows. * * We must also not use vanilla select() here since it cannot handle the * signal emulation layer on Windows. * * The function is modeled on PQexec() in libpq, but only implements * those parts that are in use in the walreceiver. * * Queries are always executed on the connection in streamConn. */ static PGresult * libpqrcv_PQexec(const char *query) { PGresult *result = NULL; PGresult *lastResult = NULL; /* * PQexec() silently discards any prior query results on the connection. * This is not required for walreceiver since it's expected that walsender * won't generate any such junk results. */ /* * Submit a query. Since we don't use non-blocking mode, this also can * block. But its risk is relatively small, so we ignore that for now. */ if (!PQsendQuery(streamConn, query)) return NULL; for (;;) { /* * Receive data until PQgetResult is ready to get the result without * blocking. */ while (PQisBusy(streamConn)) { /* * We don't need to break down the sleep into smaller increments, * and check for interrupts after each nap, since we can just * elog(FATAL) within SIGTERM signal handler if the signal arrives * in the middle of establishment of replication connection. */ if (!libpq_select(-1)) continue; /* interrupted */ if (PQconsumeInput(streamConn) == 0) return NULL; /* trouble */ } /* * Emulate the PQexec()'s behavior of returning the last result when * there are many. Since walsender will never generate multiple * results, we skip the concatenation of error messages. */ result = PQgetResult(streamConn); if (result == NULL) break; /* query is complete */ PQclear(lastResult); lastResult = result; if (PQresultStatus(lastResult) == PGRES_COPY_IN || PQresultStatus(lastResult) == PGRES_COPY_OUT || PQresultStatus(lastResult) == PGRES_COPY_BOTH || PQstatus(streamConn) == CONNECTION_BAD) break; } return lastResult; }
void output_gazetteer_t::stop_copy(void) { /* Do we have a copy active? */ if (!copy_active) return; if (buffer.length() > 0) { pgsql_CopyData("place", Connection, buffer); buffer.clear(); } /* Terminate the copy */ if (PQputCopyEnd(Connection, nullptr) != 1) { std::cerr << "COPY_END for place failed: " << PQerrorMessage(Connection) << "\n"; util::exit_nicely(); } /* Check the result */ PGresult *res = PQgetResult(Connection); if (PQresultStatus(res) != PGRES_COMMAND_OK) { std::cerr << "COPY_END for place failed: " << PQerrorMessage(Connection) << "\n"; PQclear(res); util::exit_nicely(); } /* Discard the result */ PQclear(res); /* We no longer have an active copy */ copy_active = false; }
static PGresult* cCommand_execute_sync(VALUE self, PGconn *db, VALUE query) { PGresult *response; struct timeval start; char* str = StringValuePtr(query); while ((response = PQgetResult(db)) != NULL) { PQclear(response); } gettimeofday(&start, NULL); response = PQexec(db, str); if (response == NULL) { if(PQstatus(db) != CONNECTION_OK) { PQreset(db); if (PQstatus(db) == CONNECTION_OK) { response = PQexec(db, str); } else { VALUE connection = rb_iv_get(self, "@connection"); full_connect(connection, db); response = PQexec(db, str); } } if(response == NULL) { rb_raise(eConnectionError, PQerrorMessage(db)); } } data_objects_debug(query, &start); return response; }
static void evpg_query_finished(int sock, short which, void **data) { struct evpg_db_node *dbnode; struct evpg_cfg *config; const char *querystr; void (*cb)(PGresult *, void *); void *usrdata; struct event *event; config = data[0]; querystr = data[1]; cb = data[2]; usrdata = data[3]; dbnode = data[4]; event = data[5]; PQconsumeInput(dbnode->dbconn); if (PQisBusy(dbnode->dbconn) == 0) { PGresult *result; result = PQgetResult(dbnode->dbconn); cb(result, usrdata); PQclear(result); free(event); free(data); evpg_set_ready(config, dbnode); return; } /* this query has not finished */ event_set(event, sock, EV_READ, (void *)evpg_query_finished, data); event_add(event, 0); }
bool executeAsyncQuery(std::string const& sql, int type = 0) { int result = 0; if (type == 1) { result = PQsendQueryParams(conn_,sql.c_str(), 0, 0, 0, 0, 0, 1); } else { result = PQsendQuery(conn_, sql.c_str()); } if (result != 1) { std::string err_msg = "Postgis Plugin: "; err_msg += status(); err_msg += "in executeAsyncQuery Full sql was: '"; err_msg += sql; err_msg += "'\n"; clearAsyncResult(PQgetResult(conn_)); close(); throw mapnik::datasource_exception(err_msg); } pending_ = true; return result; }
/* * wait until current query finishes ignoring any results, this could be an async command * or a cancelation of a query * return 1 if Ok; 0 if any error ocurred; -1 if timeout reached */ int wait_connection_availability(PGconn *conn, int timeout) { PGresult *res; while(timeout-- >= 0) { if (PQconsumeInput(conn) == 0) { log_warning(_("PQconsumeInput: Query could not be sent to primary. %s\n"), PQerrorMessage(conn)); return 0; } if (PQisBusy(conn) == 0) { res = PQgetResult(conn); if (res == NULL) break; PQclear(res); } sleep(1); } if (timeout >= 0) return 1; else return -1; }
/* * Receive a message available from XLOG stream, blocking for * maximum of 'timeout' ms. * * Returns: * * True if data was received. *type, *buffer and *len are set to * the type of the received data, buffer holding it, and length, * respectively. * * False if no data was available within timeout, or wait was interrupted * by signal. * * The buffer returned is only valid until the next call of this function or * libpq_connect/disconnect. * * ereports on error. */ static bool libpqrcv_receive(int timeout, unsigned char *type, char **buffer, int *len) { int rawlen; if (recvBuf != NULL) PQfreemem(recvBuf); recvBuf = NULL; /* * If the caller requested to block, wait for data to arrive. But if this * is the first call after connecting, don't wait, because there might * already be some data in libpq buffer that we haven't returned to * caller. */ if (timeout > 0 && !justconnected) { if (!libpq_select(timeout)) return false; if (PQconsumeInput(streamConn) == 0) ereport(ERROR, (errmsg("could not receive data from WAL stream: %s", PQerrorMessage(streamConn)))); } justconnected = false; /* Receive CopyData message */ rawlen = PQgetCopyData(streamConn, &recvBuf, 1); if (rawlen == 0) /* no data available yet, then return */ return false; if (rawlen == -1) /* end-of-streaming or error */ { PGresult *res; res = PQgetResult(streamConn); if (PQresultStatus(res) == PGRES_COMMAND_OK) { PQclear(res); ereport(ERROR, (errmsg("replication terminated by primary server"))); } PQclear(res); ereport(ERROR, (errmsg("could not receive data from WAL stream: %s", PQerrorMessage(streamConn)))); } if (rawlen < -1) ereport(ERROR, (errmsg("could not receive data from WAL stream: %s", PQerrorMessage(streamConn)))); /* Return received messages to caller */ *type = *((unsigned char *) recvBuf); *buffer = recvBuf + sizeof(*type); *len = rawlen - sizeof(*type); return true; }
s_object * RS_PostgreSQL_getResult(Con_Handle * conHandle) { S_EVALUATOR RS_DBI_connection * con; S_EVALUATOR RS_DBI_resultSet * result; PGconn *my_connection; Res_Handle *rsHandle; Sint res_id; PGresult *my_result; con = RS_DBI_getConnection(conHandle); my_connection = (PGconn *) con->drvConnection; if (con->num_res > 0) { res_id = (Sint) con->resultSetIds[0]; rsHandle = RS_DBI_asResHandle(MGR_ID(conHandle), CON_ID(conHandle), res_id); result = RS_DBI_getResultSet(rsHandle); if (result->completed == 0) { RS_DBI_errorMessage("connection with pending rows, close resultSet before continuing", RS_DBI_ERROR); } else { RS_PostgreSQL_closeResultSet(rsHandle); } } my_result = PQgetResult(my_connection); if(my_result == NULL) return S_NULL_ENTRY; if (strcmp(PQresultErrorMessage(my_result), "") != 0) { char *errResultMsg; const char *omsg; size_t len; omsg = PQerrorMessage(my_connection); len = strlen(omsg); errResultMsg = malloc(len + 80); /* 80 should be larger than the length of "could not ..."*/ snprintf(errResultMsg, len + 80, "could not Retrieve the result : %s", omsg); RS_DBI_errorMessage(errResultMsg, RS_DBI_ERROR); free(errResultMsg); /* Frees the storage associated with a PGresult. * void PQclear(PGresult *res); */ PQclear(my_result); } /* we now create the wrapper and copy values */ PROTECT(rsHandle = RS_DBI_allocResultSet(conHandle)); result = RS_DBI_getResultSet(rsHandle); result->drvResultSet = (void *) my_result; result->rowCount = (Sint) 0; result->isSelect = 0; result->rowsAffected = 0; result->completed = 1; UNPROTECT(1); return rsHandle; }
static PGresult* wait_for_result(PlxFn *plx_fn, PlxConn *plx_conn) { struct epoll_event listenev; struct epoll_event event; PGconn *pq_conn = plx_conn->pq_conn; PGresult *pg_result = NULL; listenev.events = EPOLLIN; listenev.data.fd = PQsocket(pq_conn); if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, listenev.data.fd, &listenev) < 0) plx_error(plx_fn, "epoll: socket adding failed"); PG_TRY(); { int tmp; while ((tmp = is_pq_busy(pq_conn))) { if (tmp == -1) { epoll_ctl(epoll_fd, EPOLL_CTL_DEL, listenev.data.fd, &listenev); plx_error(plx_fn, "%s", PQerrorMessage(pq_conn)); } CHECK_FOR_INTERRUPTS(); epoll_wait(epoll_fd, &event, 1, 10000); } } PG_CATCH(); { epoll_ctl(epoll_fd, EPOLL_CTL_DEL, listenev.data.fd, &listenev); if (geterrcode() == ERRCODE_QUERY_CANCELED) PQrequestCancel(pq_conn); pg_result = PQgetResult(pq_conn); if (pg_result) PQclear(pg_result); PG_RE_THROW(); } PG_END_TRY(); epoll_ctl(epoll_fd, EPOLL_CTL_DEL, listenev.data.fd, &listenev); return PQgetResult(pq_conn); }
/** * Executes the given SQL command through the Postgres library (libpq) * * @author Martin Turon * * @return Error code from Postgres after executing command * * @version 2004/8/8 mturon Initial version * */ int xdb_execute(char *command) { int errno = 0; PGconn *conn = xdb_connect(); PQsendQuery(conn, command); PGresult *res = PQgetResult(conn); printf("%s\n", command); while (res != NULL) { errno = PQresultStatus(res); if (errno > 1) fprintf(stderr, "error: INSERT command failed: %i\n", errno); res = PQgetResult(conn); PQclear(res); } /* close the connection to the database and cleanup */ PQfinish(conn); return errno; }
void EpollPostgresql::clear() { while(result!=NULL) { PQclear(result); result=PQgetResult(conn); } ntuples=0; tuleIndex=-1; }
void clearAsyncResult(PGresult *result) { // Clear all pending results while(result) { PQclear(result); result = PQgetResult(conn_); } pending_ = false; }
/* Discard the result of the currenly executed query, blocking. * * This function doesn't honour the wait callback: it can be used in case of * emergency if the callback fails in order to put the connection back into a * consistent state. * * If any command was issued before clearing the result, libpq would fail with * the error "another command is already in progress". */ static void psyco_clear_result_blocking(connectionObject *conn) { PGresult *res; Dprintf("psyco_clear_result_blocking"); while (NULL != (res = PQgetResult(conn->pgconn))) { PQclear(res); } }
/* * MultiClientBatchResult returns results for a "batch" of queries, meaning a * string containing multiple select statements separated by semicolons. This * function should be called multiple times to retrieve the results for all the * queries, until CLIENT_BATCH_QUERY_DONE is returned (even if a failure occurs). * If a query in the batch fails, the remaining queries will not be executed. On * success, queryResult, rowCount and columnCount will be set to the appropriate * values. After use, queryResult should be cleared using ClientClearResult. */ BatchQueryStatus MultiClientBatchResult(int32 connectionId, void **queryResult, int *rowCount, int *columnCount) { PGconn *connection = NULL; PGresult *result = NULL; ConnStatusType connStatusType = CONNECTION_OK; ExecStatusType resultStatus = PGRES_COMMAND_OK; BatchQueryStatus queryStatus = CLIENT_INVALID_BATCH_QUERY; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); /* set default result */ (*queryResult) = NULL; (*rowCount) = -1; (*columnCount) = -1; connStatusType = PQstatus(connection); if (connStatusType == CONNECTION_BAD) { ereport(WARNING, (errmsg("could not maintain connection to worker node"))); return CLIENT_BATCH_QUERY_FAILED; } result = PQgetResult(connection); if (result == NULL) { return CLIENT_BATCH_QUERY_DONE; } resultStatus = PQresultStatus(result); if (resultStatus == PGRES_TUPLES_OK) { (*queryResult) = (void **) result; (*rowCount) = PQntuples(result); (*columnCount) = PQnfields(result); queryStatus = CLIENT_BATCH_QUERY_CONTINUE; } else if (resultStatus == PGRES_COMMAND_OK) { (*queryResult) = (void **) result; queryStatus = CLIENT_BATCH_QUERY_CONTINUE; } else { WarnRemoteError(connection, result); PQclear(result); queryStatus = CLIENT_BATCH_QUERY_FAILED; } return queryStatus; }
VALUE get_end( VALUE self) { struct pgconn_data *c; PGresult *res; Data_Get_Struct( self, struct pgconn_data, c); if ((res = PQgetResult( c->conn)) != NULL) pgresult_new( res, c, Qnil, Qnil); return Qnil; }
VALUE clear_resultqueue( VALUE self) { struct pgconn_data *c; PGresult *result; Data_Get_Struct( self, struct pgconn_data, c); while ((result = PQgetResult( c->conn)) != NULL) PQclear( result); return Qnil; }
CAMLprim value PQgetResult_stub(value v_conn) { CAMLparam1(v_conn); PGconn *conn = get_conn(v_conn); np_callback *np_cb = get_conn_cb(v_conn); PGresult *res; caml_enter_blocking_section(); res = PQgetResult(conn); caml_leave_blocking_section(); CAMLreturn(alloc_result(res, np_cb)); }
/* throw away response from backend */ static void discard_response(CState * state) { PGresult *res; do { res = PQgetResult(state->con); if (res) PQclear(res); } while (res); }
/* * handleCopyOut * receives data as a result of a COPY ... TO STDOUT command * * conn should be a database connection that you just issued COPY TO on * and got back a PGRES_COPY_OUT result. * copystream is the file stream for the data to go to. * * result is true if successful, false if not. */ bool handleCopyOut(PGconn *conn, FILE *copystream) { bool OK = true; char *buf; int ret; PGresult *res; for (;;) { ret = PQgetCopyData(conn, &buf, 0); if (ret < 0) break; /* done or error */ if (buf) { if (fwrite(buf, 1, ret, copystream) != ret) { if (OK) /* complain only once, keep reading data */ psql_error("could not write COPY data: %s\n", strerror(errno)); OK = false; } PQfreemem(buf); } } if (OK && fflush(copystream)) { psql_error("could not write COPY data: %s\n", strerror(errno)); OK = false; } if (ret == -2) { psql_error("COPY data transfer failed: %s", PQerrorMessage(conn)); OK = false; } /* Check command status and return to normal libpq state */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_COMMAND_OK) { psql_error("%s", PQerrorMessage(conn)); OK = false; } PQclear(res); return OK; }