static void BaseBackup(void) { PGresult *res; char *sysidentifier; uint32 timeline; char current_path[MAXPGPATH]; char escaped_label[MAXPGPATH]; int i; char xlogstart[64]; char xlogend[64]; /* * Connect in replication mode to the server */ conn = GetConnection(); /* * Run IDENTIFY_SYSTEM so we can get the timeline */ res = PQexec(conn, "IDENTIFY_SYSTEM"); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not identify system: %s\n"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) != 1) { fprintf(stderr, _("%s: could not identify system, got %i rows\n"), progname, PQntuples(res)); disconnect_and_exit(1); } sysidentifier = strdup(PQgetvalue(res, 0, 0)); timeline = atoi(PQgetvalue(res, 0, 1)); PQclear(res); /* * Start the actual backup */ PQescapeStringConn(conn, escaped_label, label, sizeof(escaped_label), &i); snprintf(current_path, sizeof(current_path), "BASE_BACKUP LABEL '%s' %s %s %s %s", escaped_label, showprogress ? "PROGRESS" : "", includewal && !streamwal ? "WAL" : "", fastcheckpoint ? "FAST" : "", includewal ? "NOWAIT" : ""); if (PQsendQuery(conn, current_path) == 0) { fprintf(stderr, _("%s: could not send base backup command: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } /* * Get the starting xlog position */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not initiate base backup: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) != 1) { fprintf(stderr, _("%s: no start point returned from server\n"), progname); disconnect_and_exit(1); } strcpy(xlogstart, PQgetvalue(res, 0, 0)); if (verbose && includewal) fprintf(stderr, "xlog start point: %s\n", xlogstart); PQclear(res); MemSet(xlogend, 0, sizeof(xlogend)); /* * Get the header */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not get backup header: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) < 1) { fprintf(stderr, _("%s: no data returned from server\n"), progname); disconnect_and_exit(1); } /* * Sum up the total size, for progress reporting */ totalsize = totaldone = 0; tablespacecount = PQntuples(res); for (i = 0; i < PQntuples(res); i++) { if (showprogress) totalsize += atol(PQgetvalue(res, i, 2)); /* * Verify tablespace directories are empty. Don't bother with the * first once since it can be relocated, and it will be checked before * we do anything anyway. */ if (format == 'p' && !PQgetisnull(res, i, 1)) verify_dir_is_empty_or_create(PQgetvalue(res, i, 1)); } /* * When writing to stdout, require a single tablespace */ if (format == 't' && strcmp(basedir, "-") == 0 && PQntuples(res) > 1) { fprintf(stderr, _("%s: can only write single tablespace to stdout, database has %d\n"), progname, PQntuples(res)); disconnect_and_exit(1); } /* * If we're streaming WAL, start the streaming session before we start * receiving the actual data chunks. */ if (streamwal) { if (verbose) fprintf(stderr, _("%s: starting background WAL receiver\n"), progname); StartLogStreamer(xlogstart, timeline, sysidentifier); } /* * Start receiving chunks */ for (i = 0; i < PQntuples(res); i++) { if (format == 't') ReceiveTarFile(conn, res, i); else ReceiveAndUnpackTarFile(conn, res, i); } /* Loop over all tablespaces */ if (showprogress) { progress_report(PQntuples(res), NULL); fprintf(stderr, "\n"); /* Need to move to next line */ } PQclear(res); /* * Get the stop position */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not get WAL end position from server\n"), progname); disconnect_and_exit(1); } if (PQntuples(res) != 1) { fprintf(stderr, _("%s: no WAL end position returned from server\n"), progname); disconnect_and_exit(1); } strcpy(xlogend, PQgetvalue(res, 0, 0)); if (verbose && includewal) fprintf(stderr, "xlog end point: %s\n", xlogend); PQclear(res); res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, _("%s: final receive failed: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (bgchild > 0) { #ifndef WIN32 int status; int r; #else DWORD status; #endif if (verbose) fprintf(stderr, _("%s: waiting for background process to finish streaming...\n"), progname); #ifndef WIN32 if (pipewrite(bgpipe[1], xlogend, strlen(xlogend)) != strlen(xlogend)) { fprintf(stderr, _("%s: could not send command to background pipe: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } /* Just wait for the background process to exit */ r = waitpid(bgchild, &status, 0); if (r == -1) { fprintf(stderr, _("%s: could not wait for child process: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } if (r != bgchild) { fprintf(stderr, _("%s: child %i died, expected %i\n"), progname, r, bgchild); disconnect_and_exit(1); } if (!WIFEXITED(status)) { fprintf(stderr, _("%s: child process did not exit normally\n"), progname); disconnect_and_exit(1); } if (WEXITSTATUS(status) != 0) { fprintf(stderr, _("%s: child process exited with error %i\n"), progname, WEXITSTATUS(status)); disconnect_and_exit(1); } /* Exited normally, we're happy! */ #else /* WIN32 */ /* * On Windows, since we are in the same process, we can just store the * value directly in the variable, and then set the flag that says * it's there. */ if (sscanf(xlogend, "%X/%X", &xlogendptr.xlogid, &xlogendptr.xrecoff) != 2) { fprintf(stderr, _("%s: could not parse xlog end position \"%s\"\n"), progname, xlogend); exit(1); } InterlockedIncrement(&has_xlogendptr); /* First wait for the thread to exit */ if (WaitForSingleObjectEx((HANDLE) bgchild, INFINITE, FALSE) != WAIT_OBJECT_0) { _dosmaperr(GetLastError()); fprintf(stderr, _("%s: could not wait for child thread: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } if (GetExitCodeThread((HANDLE) bgchild, &status) == 0) { _dosmaperr(GetLastError()); fprintf(stderr, _("%s: could not get child thread exit status: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } if (status != 0) { fprintf(stderr, _("%s: child thread exited with error %u\n"), progname, (unsigned int) status); disconnect_and_exit(1); } /* Exited normally, we're happy */ #endif } /* * End of copy data. Final result is already checked inside the loop. */ PQfinish(conn); if (verbose) fprintf(stderr, "%s: base backup completed\n", progname); }
static int pgsql_stmt_execute(pdo_stmt_t *stmt) { pdo_pgsql_stmt *S = (pdo_pgsql_stmt*)stmt->driver_data; pdo_pgsql_db_handle *H = S->H; ExecStatusType status; /* ensure that we free any previous unfetched results */ if(S->result) { PQclear(S->result); S->result = NULL; } S->current_row = 0; if (S->cursor_name) { char *q = NULL; if (S->is_prepared) { spprintf(&q, 0, "CLOSE %s", S->cursor_name); S->result = PQexec(H->server, q); efree(q); } spprintf(&q, 0, "DECLARE %s SCROLL CURSOR WITH HOLD FOR %s", S->cursor_name, stmt->active_query_string); S->result = PQexec(H->server, q); efree(q); /* check if declare failed */ status = PQresultStatus(S->result); if (status != PGRES_COMMAND_OK && status != PGRES_TUPLES_OK) { pdo_pgsql_error_stmt(stmt, status, pdo_pgsql_sqlstate(S->result)); return 0; } /* the cursor was declared correctly */ S->is_prepared = 1; /* fetch to be able to get the number of tuples later, but don't advance the cursor pointer */ spprintf(&q, 0, "FETCH FORWARD 0 FROM %s", S->cursor_name); S->result = PQexec(H->server, q); efree(q); } else if (S->stmt_name) { /* using a prepared statement */ if (!S->is_prepared) { stmt_retry: /* we deferred the prepare until now, because we didn't * know anything about the parameter types; now we do */ S->result = PQprepare(H->server, S->stmt_name, S->query, stmt->bound_params ? zend_hash_num_elements(stmt->bound_params) : 0, S->param_types); status = PQresultStatus(S->result); switch (status) { case PGRES_COMMAND_OK: case PGRES_TUPLES_OK: /* it worked */ S->is_prepared = 1; PQclear(S->result); break; default: { char *sqlstate = pdo_pgsql_sqlstate(S->result); /* 42P05 means that the prepared statement already existed. this can happen if you use * a connection pooling software line pgpool which doesn't close the db-connection once * php disconnects. if php dies (no chance to run RSHUTDOWN) during execution it has no * chance to DEALLOCATE the prepared statements it has created. so, if we hit a 42P05 we * deallocate it and retry ONCE (thies 2005.12.15) */ if (sqlstate && !strcmp(sqlstate, "42P05")) { char buf[100]; /* stmt_name == "pdo_crsr_%08x" */ PGresult *res; snprintf(buf, sizeof(buf), "DEALLOCATE %s", S->stmt_name); res = PQexec(H->server, buf); if (res) { PQclear(res); } goto stmt_retry; } else { pdo_pgsql_error_stmt(stmt, status, sqlstate); return 0; } } } } S->result = PQexecPrepared(H->server, S->stmt_name, stmt->bound_params ? zend_hash_num_elements(stmt->bound_params) : 0, (const char**)S->param_values, S->param_lengths, S->param_formats, 0); } else if (stmt->supports_placeholders == PDO_PLACEHOLDER_NAMED) { /* execute query with parameters */ S->result = PQexecParams(H->server, S->query, stmt->bound_params ? zend_hash_num_elements(stmt->bound_params) : 0, S->param_types, (const char**)S->param_values, S->param_lengths, S->param_formats, 0); } else { /* execute plain query (with embedded parameters) */ S->result = PQexec(H->server, stmt->active_query_string); } status = PQresultStatus(S->result); if (status != PGRES_COMMAND_OK && status != PGRES_TUPLES_OK) { pdo_pgsql_error_stmt(stmt, status, pdo_pgsql_sqlstate(S->result)); return 0; } if (!stmt->executed && (!stmt->column_count || S->cols == NULL)) { stmt->column_count = (int) PQnfields(S->result); S->cols = ecalloc(stmt->column_count, sizeof(pdo_pgsql_column)); } if (status == PGRES_COMMAND_OK) { ZEND_ATOL(stmt->row_count, PQcmdTuples(S->result)); H->pgoid = PQoidValue(S->result); } else { stmt->row_count = (zend_long)PQntuples(S->result); } return 1; }
static result_return_t on_fortune_result(db_query_param_t *param, PGresult *result) { fortune_ctx_t * const fortune_ctx = H2O_STRUCT_FROM_MEMBER(fortune_ctx_t, param, param); int ret = DONE; const ExecStatusType status = PQresultStatus(result); if (status == PGRES_TUPLES_OK) { const size_t num_rows = PQntuples(result); ret = SUCCESS; for (size_t i = 0; i < num_rows; i++) { fortune_t * const fortune = h2o_mem_alloc_pool(&fortune_ctx->req->pool, sizeof(*fortune)); if (fortune) { memset(fortune, 0, sizeof(*fortune)); fortune->id.base = PQgetvalue(result, i, 0); fortune->id.len = PQgetlength(result, i, 0); fortune->message = h2o_htmlescape(&fortune_ctx->req->pool, PQgetvalue(result, i, 1), PQgetlength(result, i, 1)); fortune->l.next = fortune_ctx->result; fortune_ctx->result = &fortune->l; fortune_ctx->num_result++; if (!i) fortune->data = result; } else { send_error(INTERNAL_SERVER_ERROR, REQ_ERROR, fortune_ctx->req); ret = DONE; if (!i) PQclear(result); break; } } } else if (result) { LIBRARY_ERROR("PQresultStatus", PQresultErrorMessage(result)); send_error(BAD_GATEWAY, DB_ERROR, fortune_ctx->req); PQclear(result); } else { mustache_api_t api = {.sectget = on_fortune_section, .varget = on_fortune_variable, .write = add_iovec}; thread_context_t * const ctx = H2O_STRUCT_FROM_MEMBER(thread_context_t, event_loop.h2o_ctx, fortune_ctx->req->conn->ctx); const size_t iovcnt = MIN(MAX_IOVEC, fortune_ctx->num_result * 5 + 2); const size_t sz = offsetof(iovec_list_t, iov) + iovcnt * sizeof(h2o_iovec_t); char _Alignas(iovec_list_t) mem[sz]; iovec_list_t * const restrict iovec_list = (iovec_list_t *) mem; memset(iovec_list, 0, offsetof(iovec_list_t, iov)); iovec_list->max_iovcnt = iovcnt; fortune_ctx->iovec_list_iter = iovec_list; fortune_ctx->result = sort_fortunes(fortune_ctx->result); if (mustache_render(&api, fortune_ctx, ctx->global_data->fortunes_template)) { fortune_ctx->iovec_list = iovec_list->l.next; set_default_response_param(HTML, fortune_ctx->content_length, fortune_ctx->req); h2o_start_response(fortune_ctx->req, &fortune_ctx->generator); const h2o_send_state_t state = fortune_ctx->iovec_list ? H2O_SEND_STATE_IN_PROGRESS : H2O_SEND_STATE_FINAL; h2o_send(fortune_ctx->req, iovec_list->iov, iovec_list->iovcnt, state); } else send_error(INTERNAL_SERVER_ERROR, REQ_ERROR, fortune_ctx->req); } return ret; }
int main () { int i; int nparamdb = 2; const char *paramValues[2]; /* FILE *arq = fopen ("param.dat", "w"); */ for (i = 0; i < nparamdb; ++i) paramValues[i] = (char*) malloc (4*sizeof (char)); PGconn *conn = PQconnectdb("user=allisson dbname=acodb"); if (PQstatus(conn) == CONNECTION_BAD) { printf ("connection to database failsed\n"); do_exit (conn); } double alfa, beta; int somalen = 0; float medialen = 0; float desviolen = 0; int maiorlen = 0; int desv1 = 0; int exec, execucoes = 30; PGresult *res; //BEGIN SELECT MAX LENGTH /* char *s = "SELECT MAX(len) FROM antsolutions"; */ /* res = PQexec (conn, s); */ /* if (PQresultStatus (res) != 2) { */ /* printf ("No data retrieved\n"); */ /* PQclear(res); */ /* do_exit(conn); */ /* } */ /* /\* maiorlen = 172; *\/ */ /* maiorlen = atoi (PQgetvalue (res, 0, 0)); */ /* printf ("maiorlen = %d\n",maiorlen); */ /* PQclear (res); */ //END SELECT MAX LENGTH for (alfa = 0.5; alfa <= 3.5; alfa+=0.5) { for (beta = 0.5; beta <= 4.; beta+=0.5) { /* for (exec = 0; exec < execucoes; ++exec) { */ sprintf (paramValues[0], "%.1lf", alfa); sprintf (paramValues[1], "%.1lf", beta); /* sprintf (paramValues[2], "%d", exec); */ char *stm="SELECT AVG(timesol) FROM antsolutions WHERE alfa=$1 AND beta=$2"; res=PQexecParams(conn, stm, 2, NULL, paramValues, NULL, NULL, 0); if (PQresultStatus (res) != PGRES_TUPLES_OK) { printf ("No data retrieved\n"); PQclear(res); do_exit(conn); } int rows = PQntuples (res); int leng = 0; double tempo; for (i = 0; i < rows; ++i) { tempo = atof (PQgetvalue (res, i ,0)); /* somalen += atoi (PQgetvalue (res, i ,10)); */ /* printf ("alfa = %s\tbeta = %s\n", */ /* PQgetvalue (res, i, 4),PQgetvalue (res, i, 5)); */ } /* printf ("exec %d -> len = %d\n", exec, leng); */ /* printf ("rows = %d\t somalen = %d\n", rows, somalen); */ /* printf ("Para alfa = %s e beta = %s\na média de len é %d\n\n", PQgetvalue (res, 0, 4), PQgetvalue (res, 0, 5), somalen/rows); */ PQclear (res); /* } //markexec */ printf ("alfa = %.1lf, beta = %.1lf, tempo = %3.2f\n", alfa, beta, tempo); /* printf ("alfa = %.1lf, beta = %.1lf, len = %3.2f, desvio = %3.2f qualidade = %3.2lf\n", alfa, beta, medialen, desviolen, maiorlen - medialen); */ /* fprintf (arq, "%.1lf %.1lf %.1lf\n", alfa, beta, maiorlen - medialen); */ } //markbeta putchar ('\n'); /* fprintf (arq, "\n"); */ } //markalfa PQfinish (conn); /* fclose (arq); */ return 0; }
void TrackPipeHandler::Pick(float x,float y ) { osgUtil::LineSegmentIntersector::Intersections intersection; //x , y 坐标值,intersection存放与法线相交的节点以及相交的节点路径等相关信息的列表 if (mViewer->computeIntersections(x,y,intersection))//使用computeIntersections计算当前场景中单击到了那些模型,结果存放在结果集内 { //使用迭代器取出这些模型,取出的结果是一个NodePath类对象,遍历该NodePath对象可以找到是否单击到了目标节点 for (osgUtil::LineSegmentIntersector::Intersections::iterator hiter=intersection.begin();hiter!=intersection.end();++hiter) { std::cout<<"scan the computeIntersections"<<std::endl; std::cout<<intersection.size(); if (!hiter->nodePath.empty()) { const osg::NodePath& np = hiter->nodePath; for (int i = np.size()-1;i>=0;--i) { osg::Node* nd = dynamic_cast<osg::Node*>(np[i]); if (nd) { if (nd->getName().find("ysgline_new") == 0) { osg::Geode* tmp = dynamic_cast<osg::Geode*>(nd); if(tmp) { osg::Vec4f color,colorEnd; osg::Geometry* tmpGeom = dynamic_cast<osg::Geometry*>(tmp->getDrawable(0)); if(tmpGeom) { osg::Vec4Array* tmpColorArray = dynamic_cast<osg::Vec4Array*>(tmpGeom->getColorArray()); if(tmpColorArray) { COLORREF cref = (*ppTrackDlg)->mColorPicker.GetColor(); BYTE r = GetRValue(cref); BYTE g = GetGValue(cref); BYTE b = GetBValue(cref); color.set(r/255,g/255,b/255,1); osg::Vec4Array::iterator iter = tmpColorArray->begin(); for(iter; iter!=tmpColorArray->end(); iter++) { iter->set(r/255,g/255,b/255,0.5); } } } DBConnection reader; makeSql ms; reader.ConnectToDB("localhost","5432","HRBPipe","postgres","123456"); string sql = ms.flowDirectionSql(nd->getName()); PGresult* res = reader.ExecSQL(const_cast<char*>(sql.c_str())); int field_num=PQnfields(res); int tuple_num=PQntuples(res); float* fbzms = new float[tuple_num]; // { CString cs; cs.Format("共%d条管线!",tuple_num); (*ppTrackDlg)->mEdit.SetWindowTextA(cs); } //初始化list { (*ppTrackDlg)->m_List.DeleteAllItems(); for(int j=0;j<field_num;++j) { (*ppTrackDlg)->m_List.InsertColumn(j,PQfname(res,j), LVCFMT_LEFT, 80); } for(int j=0;j<tuple_num;++j) { char* s = PQgetvalue(res,j,0); int nRow = (*ppTrackDlg)->m_List.InsertItem(j, s);//插入行 for(int k=1;k<field_num;++k) { char* t = PQgetvalue(res,j,k); (*ppTrackDlg)->m_List.SetItemText(j, k, t);//设置数据 if(1==k) { char* s = PQfname(res,k); //ASSERT(PQfname(res,k) == "标识码"); fbzms[j] = atof(t);//查询到的标识码 } } } } //绘制整条流向管线 { for(std::vector<std::string>::iterator it=oldBzms.begin();it!=oldBzms.end();it++) { HighLightVisitor hl(*it,false); mViewer->getSceneData()->accept(hl); } oldBzms.erase(oldBzms.begin(),oldBzms.end()); ASSERT(oldBzms.empty()); /*ColorGradient g(color,colorEnd,tuple_num);*/ ColorGradient g(color,tuple_num); osg::Vec4f* colors = g.getColorArray(); for(int i=0;i<tuple_num;i++) { string s; ostringstream buf; buf<<"ysgline_new "<<fbzms[i]; s = buf.str(); HighLightVisitor hl(s,true,colors[i]); mViewer->getSceneData()->accept(hl); oldBzms.push_back(s); } } tmpGeom->dirtyDisplayList(); } return; } } } } } } }
void load_TUBii_command(client *c, int argc, sds *argv) { /* Load CAEN hardware settings from the database. */ uint32_t key; PGconn *conn; PGresult *res = NULL; char conninfo[1024]; char command[10000]; char *name, *value_str; uint32_t value; int i; int rows; if (safe_strtoul(argv[1], &key)) { addReplyErrorFormat(c, "'%s' is not a valid uint32_t", argv[1]); return; } sprintf(command, "select * from TUBii where key = %i", key); sprintf(conninfo, "dbname=%s host=%s user=%s password=%s", dbconfig.name, dbconfig.host, dbconfig.user, dbconfig.password); /* Request row from the database. */ conn = PQconnectdb(conninfo); if (PQstatus(conn) != CONNECTION_OK) { addReplyErrorFormat(c, "connection to database failed: %s", PQerrorMessage(conn)); goto pq_error; } res = PQexec(conn, command); if (PQresultStatus(res) != PGRES_TUPLES_OK) { addReplyErrorFormat(c, "select command failed: %s", PQerrorMessage(conn)); goto pq_error; } rows = PQntuples(res); if (rows != 1) { if (rows == 0) { addReplyErrorFormat(c, "no database row with key = %i", key); } else { addReplyError(c, "this should never happen. Call Tony"); } goto pq_error; } for (i = 0; i < PQnfields(res); i++) { name = PQfname(res, i); if (!strcmp(name, "key") || !strcmp(name, "timestamp")) continue; value_str = PQgetvalue(res, 0, i); if (safe_strtoul(value_str, &value)) { addReplyErrorFormat(c, "unable to convert value '%s' for field %s", value_str, name); goto pq_error; } if (!strcmp(name, "control_reg")) { ControlReg(value); } else if (!strcmp(name, "trigger_mask")) { triggerMask(value,0); } else if (!strcmp(name, "speaker_mask")) { speakerMask(value); } else if (!strcmp(name, "counter_mask")) { counterMask(value); } else if (!strcmp(name, "caen_gain_reg")) { CAENWords(value, mReadReg((u32) MappedRegsBaseAddress, RegOffset12)); } else if (!strcmp(name, "caen_channel_reg")) { CAENWords(mReadReg((u32) MappedRegsBaseAddress, RegOffset11), value); } else if (!strcmp(name, "lockout_reg")) { GTDelays(value, mReadReg((u32) MappedRegsBaseAddress, RegOffset15)); } else if (!strcmp(name, "dgt_reg")) { GTDelays(mReadReg((u32) MappedRegsBaseAddress, RegOffset14), value); } else if (!strcmp(name, "dac_reg")) { DACThresholds(value); } else if (!strcmp(name, "counter_mode")) { counterMode(value); } else if (!strcmp(name, "clock_status")) { // Do Nowt } else if (!strcmp(name, "combo_enable_mask")) { mWriteReg((u32) MappedComboBaseAddress, RegOffset2, value); } else if (!strcmp(name, "combo_mask")) { mWriteReg((u32) MappedComboBaseAddress, RegOffset3, value); } else if (!strcmp(name, "prescale_value")) { mWriteReg((u32) MappedPrescaleBaseAddress, RegOffset2, value); } else if (!strcmp(name, "prescale_channel")) { mWriteReg((u32) MappedPrescaleBaseAddress, RegOffset3, value); } else if (!strcmp(name, "burst_rate")) { mWriteReg((u32) MappedBurstBaseAddress, RegOffset2, value); } else if (!strcmp(name, "burst_channel")) { mWriteReg((u32) MappedBurstBaseAddress, RegOffset3, value); } else { addReplyErrorFormat(c, "got unknown field '%s'", name); goto pq_error; } } addReplyStatus(c, "OK"); PQclear(res); PQfinish(conn); return; err: addReplyError(c, tubii_err); return; pq_error: if (res) PQclear(res); PQfinish(conn); }
int main(int argc, char * argv[]) { PGconn *conn; PGresult *res; char * conninfo = calloc (MAX_LEN, sizeof (char)); char * query = calloc (MAX_LEN, sizeof (char)); char * nama = calloc (MAX_LEN, sizeof (char)); char * alamat = calloc (MAX_LEN, sizeof (char)); char * temp = calloc (MAX_LEN, sizeof (char)); int field_count; int rec_count; int c,menu; int i,j; strcpy (conninfo, "dbname=test user=nop"); conn = PQconnectdb (conninfo); if (PQstatus (conn) != CONNECTION_OK) { fprintf (stderr, "Kesalahan koneksi: %s\n", PQerrorMessage (conn)); exit (1); } while ( 1 ) { fprintf(stdout, "\n\n\nDATA PASIEN\n"); fprintf(stdout, "***********\n\n"); fprintf(stdout, "a Tambah data\n"); fprintf(stdout, "b Tampil data\n"); fprintf(stdout, "x Keluar aplikasi\n"); fprintf(stdout, "Pilihan Anda: "); c = tolower(fgetc (stdin)); menu = c; while (c != '\n' && c != EOF) c = fgetc (stdin); if (menu == 'a') { fprintf(stdout, "Tambah data\n"); fprintf(stdout, "===========\n"); fprintf(stdout, "Nama : "); fgets (nama, MAX_LEN-1, stdin); fprintf(stdout, "Alamat : "); fgets (alamat, MAX_LEN-1, stdin); sprintf (query, "insert into pasien (nama, alamat) values ('%s','%s')", nama, alamat); res = PQexec (conn, query); PQclear (res); } else if (menu == 'b') { fprintf(stdout, "Tampil data\n"); fprintf(stdout, "===========\n"); sprintf (query, "select nama,alamat from pasien"); res = PQexec (conn, query); field_count = PQnfields (res); for (i=0; i< field_count; i++) { fprintf (stdout, "%-40s", PQfname (res, i)); } fprintf (stdout, "\n"); rec_count = PQntuples (res); for (i=0; i< rec_count; i++) { for (j=0; j< field_count; j++) { strcpy (temp, PQgetvalue (res, i, j)); temp[strlen(temp)-1] = 0; fprintf (stdout, "%-40s", temp); } fprintf (stdout, "\n"); } PQclear (res); } else if (menu == 'x') { fprintf(stdout, "Bye\n"); break; } }; PQfinish (conn); free (nama); free (alamat); free (query); free (conninfo); free (temp); return 0; }
/* * ExecQueryUsingCursor: run a SELECT-like query using a cursor * * This feature allows result sets larger than RAM to be dealt with. * * Returns true if the query executed successfully, false otherwise. * * If pset.timing is on, total query time (exclusive of result-printing) is * stored into *elapsed_msec. */ static bool ExecQueryUsingCursor(const char *query, double *elapsed_msec) { bool OK = true; PGresult *results; PQExpBufferData buf; printQueryOpt my_popt = pset.popt; FILE *fout; bool is_pipe; bool is_pager = false; bool started_txn = false; int ntuples; int fetch_count; char fetch_cmd[64]; instr_time before, after; int flush_error; *elapsed_msec = 0; /* initialize print options for partial table output */ my_popt.topt.start_table = true; my_popt.topt.stop_table = false; my_popt.topt.prior_records = 0; if (pset.timing) INSTR_TIME_SET_CURRENT(before); /* if we're not in a transaction, start one */ if (PQtransactionStatus(pset.db) == PQTRANS_IDLE) { results = PQexec(pset.db, "BEGIN"); OK = AcceptResult(results) && (PQresultStatus(results) == PGRES_COMMAND_OK); PQclear(results); if (!OK) return false; started_txn = true; } /* Send DECLARE CURSOR */ initPQExpBuffer(&buf); appendPQExpBuffer(&buf, "DECLARE _psql_cursor NO SCROLL CURSOR FOR\n%s", query); results = PQexec(pset.db, buf.data); OK = AcceptResult(results) && (PQresultStatus(results) == PGRES_COMMAND_OK); PQclear(results); termPQExpBuffer(&buf); if (!OK) goto cleanup; if (pset.timing) { INSTR_TIME_SET_CURRENT(after); INSTR_TIME_SUBTRACT(after, before); *elapsed_msec += INSTR_TIME_GET_MILLISEC(after); } /* * In \gset mode, we force the fetch count to be 2, so that we will throw * the appropriate error if the query returns more than one row. */ if (pset.gset_prefix) fetch_count = 2; else fetch_count = pset.fetch_count; snprintf(fetch_cmd, sizeof(fetch_cmd), "FETCH FORWARD %d FROM _psql_cursor", fetch_count); /* prepare to write output to \g argument, if any */ if (pset.gfname) { if (!openQueryOutputFile(pset.gfname, &fout, &is_pipe)) { OK = false; goto cleanup; } if (is_pipe) disable_sigpipe_trap(); } else { fout = pset.queryFout; is_pipe = false; /* doesn't matter */ } /* clear any pre-existing error indication on the output stream */ clearerr(fout); for (;;) { if (pset.timing) INSTR_TIME_SET_CURRENT(before); /* get fetch_count tuples at a time */ results = PQexec(pset.db, fetch_cmd); if (pset.timing) { INSTR_TIME_SET_CURRENT(after); INSTR_TIME_SUBTRACT(after, before); *elapsed_msec += INSTR_TIME_GET_MILLISEC(after); } if (PQresultStatus(results) != PGRES_TUPLES_OK) { /* shut down pager before printing error message */ if (is_pager) { ClosePager(fout); is_pager = false; } OK = AcceptResult(results); Assert(!OK); PQclear(results); break; } if (pset.gset_prefix) { /* StoreQueryTuple will complain if not exactly one row */ OK = StoreQueryTuple(results); PQclear(results); break; } ntuples = PQntuples(results); if (ntuples < fetch_count) { /* this is the last result set, so allow footer decoration */ my_popt.topt.stop_table = true; } else if (fout == stdout && !is_pager) { /* * If query requires multiple result sets, hack to ensure that * only one pager instance is used for the whole mess */ fout = PageOutput(INT_MAX, &(my_popt.topt)); is_pager = true; } printQuery(results, &my_popt, fout, is_pager, pset.logfile); PQclear(results); /* after the first result set, disallow header decoration */ my_popt.topt.start_table = false; my_popt.topt.prior_records += ntuples; /* * Make sure to flush the output stream, so intermediate results are * visible to the client immediately. We check the results because if * the pager dies/exits/etc, there's no sense throwing more data at * it. */ flush_error = fflush(fout); /* * Check if we are at the end, if a cancel was pressed, or if there * were any errors either trying to flush out the results, or more * generally on the output stream at all. If we hit any errors * writing things to the stream, we presume $PAGER has disappeared and * stop bothering to pull down more data. */ if (ntuples < fetch_count || cancel_pressed || flush_error || ferror(fout)) break; } if (pset.gfname) { /* close \g argument file/pipe */ if (is_pipe) { pclose(fout); restore_sigpipe_trap(); } else fclose(fout); } else if (is_pager) { /* close transient pager */ ClosePager(fout); } cleanup: if (pset.timing) INSTR_TIME_SET_CURRENT(before); /* * We try to close the cursor on either success or failure, but on failure * ignore the result (it's probably just a bleat about being in an aborted * transaction) */ results = PQexec(pset.db, "CLOSE _psql_cursor"); if (OK) { OK = AcceptResult(results) && (PQresultStatus(results) == PGRES_COMMAND_OK); } PQclear(results); if (started_txn) { results = PQexec(pset.db, OK ? "COMMIT" : "ROLLBACK"); OK &= AcceptResult(results) && (PQresultStatus(results) == PGRES_COMMAND_OK); PQclear(results); } if (pset.timing) { INSTR_TIME_SET_CURRENT(after); INSTR_TIME_SUBTRACT(after, before); *elapsed_msec += INSTR_TIME_GET_MILLISEC(after); } return OK; }
int center_manage_updatedata(){ int i; PGconn *db_conn; PGresult *db_res; int db_count; int cacheid; center_jmod_info *jmod_info; int proid; int lang_flag; std::map<std::string,center_jmod_info*>::iterator jmod_it; center_pro_info *pro_info; std::vector<std::pair<int,int> > pro_list; if((db_conn = center_manage_conndb()) == NULL){ return -1; } db_res = PQexec(db_conn,"SELECT DISTINCT \"jmodname\" FROM \"mod\";"); if(PQresultStatus(db_res) != PGRES_TUPLES_OK){ center_manage_closedb(db_conn); return -1; } db_count = PQntuples(db_res); for(i = 0;i < db_count;i++){ jmod_info = new center_jmod_info(PQgetvalue(db_res,i,0),2); center_manage_jmodmap.insert(std::pair<std::string,center_jmod_info*>(jmod_info->name,jmod_info)); } PQclear(db_res); db_res = PQexec(db_conn,"SELECT \"proid\",\"cacheid\",\"lang\",\"jmodname\" FROM \"problem\" INNER JOIN \"mod\" ON (\"problem\".\"modid\"=\"mod\".\"modid\");"); if(PQresultStatus(db_res) != PGRES_TUPLES_OK){ center_manage_closedb(db_conn); return -1; } db_count = PQntuples(db_res); for(i = 0;i < db_count;i++){ sscanf(PQgetvalue(db_res,i,0),"%d",&proid); sscanf(PQgetvalue(db_res,i,1),"%d",&cacheid); sscanf(PQgetvalue(db_res,i,2),"%d",&lang_flag); if((jmod_it = center_manage_jmodmap.find(PQgetvalue(db_res,i,3))) == center_manage_jmodmap.end()){ continue; } if(manage_updatepro(proid,cacheid,jmod_it->second,lang_flag) == 1){ pro_list.push_back(std::make_pair(proid,cacheid)); printf("pro update %d %d\n",proid,cacheid); } } PQclear(db_res); if(!pro_list.empty()){ center_judge_updatepro(pro_list); } center_manage_closedb(db_conn); return 0; }
/* * pg_lock_status - produce a view with one row per held or awaited lock mode */ Datum pg_lock_status(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; PG_Lock_Status *mystatus; LockData *lockData; if (SRF_IS_FIRSTCALL()) { TupleDesc tupdesc; MemoryContext oldcontext; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* * switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); /* build tupdesc for result tuples */ /* this had better match pg_locks view in system_views.sql */ tupdesc = CreateTemplateTupleDesc(16, false); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "locktype", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "database", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 3, "relation", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 4, "page", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 5, "tuple", INT2OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 6, "transactionid", XIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 7, "classid", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 8, "objid", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 9, "objsubid", INT2OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 10, "transaction", XIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 11, "pid", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 12, "mode", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 13, "granted", BOOLOID, -1, 0); /* * These next columns are specific to GPDB */ TupleDescInitEntry(tupdesc, (AttrNumber) 14, "mppSessionId", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 15, "mppIsWriter", BOOLOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 16, "gp_segment_id", INT4OID, -1, 0); funcctx->tuple_desc = BlessTupleDesc(tupdesc); /* * Collect all the locking information that we will format and send * out as a result set. */ mystatus = (PG_Lock_Status *) palloc(sizeof(PG_Lock_Status)); funcctx->user_fctx = (void *) mystatus; mystatus->lockData = GetLockStatusData(); mystatus->currIdx = 0; mystatus->numSegLocks = 0; mystatus->numsegresults = 0; mystatus->segresults = NULL; /* * Seeing the locks just from the masterDB isn't enough to know what is locked, * or if there is a deadlock. That's because the segDBs also take locks. * Some locks show up only on the master, some only on the segDBs, and some on both. * * So, let's collect the lock information from all the segDBs. Sure, this means * there are a lot more rows coming back from pg_locks than before, since most locks * on the segDBs happen across all the segDBs at the same time. But not always, * so let's play it safe and get them all. */ if (Gp_role == GP_ROLE_DISPATCH) { int resultCount = 0; struct pg_result **results = NULL; StringInfoData buffer; StringInfoData errbuf; int i; initStringInfo(&buffer); /* * This query has to match the tupledesc we just made above. */ appendStringInfo(&buffer, "SELECT * FROM pg_lock_status() L " " (locktype text, database oid, relation oid, page int4, tuple int2," " transactionid xid, classid oid, objid oid, objsubid int2," " transaction xid, pid int4, mode text, granted boolean, " " mppSessionId int4, mppIsWriter boolean, gp_segment_id int4) "); initStringInfo(&errbuf); /* * Why dispatch something here, rather than do a UNION ALL in pg_locks view, and * a join to gp_dist_random('gp_id')? There are several important reasons. * * The union all method is much slower, and requires taking locks on gp_id. * More importantly, applications such as pgAdmin do queries of this view that * involve a correlated subqueries joining to other catalog tables, * which works if we do it this way, but fails * if the view includes the union all. That completely breaks the server status * display in pgAdmin. * * Why dispatch this way, rather than via SPI? There are several advantages. * First, it's easy to get "writer gang is busy" errors if we use SPI. * * Second, this should be much faster, as it doesn't require setting up * the interconnect, and doesn't need to touch any actual data tables to be * able to get the gp_segment_id. * * The downside is we get n result sets, where n == number of segDBs. * * It would be better yet if we sent a plan tree rather than a text string, * so the segDBs don't need to parse it. That would also avoid taking any relation locks * on the segDB to get this info (normally need to get an accessShareLock on pg_locks on the segDB * to make sure it doesn't go away during parsing). But the only safe way I know to do this * is to hand-build the plan tree, and I'm to lazy to do it right now. It's just a matter of * building a function scan node, and filling it in with our result set info (from the tupledesc). * * One thing to note: it's OK to join pg_locks with any catalog table or master-only table, * but joining to a distributed table will result in "writer gang busy: possible attempt to * execute volatile function in unsupported context" errors, because * the scan of the distributed table might already be running on the writer gang * when we want to dispatch this. * * This could be fixed by allocating a reader gang and dispatching to that, but the cost * of setting up a new gang is high, and I've never seen anyone need to join this to a * distributed table. * */ results = cdbdisp_dispatchRMCommand(buffer.data, true, &errbuf, &resultCount); if (errbuf.len > 0) ereport(ERROR, (errmsg("pg_lock internal error (gathered %d results from cmd '%s')", resultCount, buffer.data), errdetail("%s", errbuf.data))); /* * I don't think resultCount can ever be zero if errbuf isn't set. * But check to be sure. */ if (resultCount == 0) elog(ERROR, "pg_locks didn't get back any data from the segDBs"); for (i = 0; i < resultCount; i++) { /* * Any error here should have propagated into errbuf, so we shouldn't * ever see anything other that tuples_ok here. But, check to be * sure. */ if (PQresultStatus(results[i]) != PGRES_TUPLES_OK) { elog(ERROR,"pg_locks: resultStatus not tuples_Ok"); } else { /* * numSegLocks needs to be the total size we are returning to * the application. At the start of this loop, it has the count * for the masterDB locks. Add each of the segDB lock counts. */ mystatus->numSegLocks += PQntuples(results[i]); } } pfree(errbuf.data); mystatus->numsegresults = resultCount; /* * cdbdisp_dispatchRMCommand copies the result sets into our memory, which * will still exist on the subsequent calls. */ mystatus->segresults = results; MemoryContextSwitchTo(oldcontext); } } funcctx = SRF_PERCALL_SETUP(); mystatus = (PG_Lock_Status *) funcctx->user_fctx; lockData = mystatus->lockData; /* * This loop returns all the local lock data from the segment we are running on. */ while (mystatus->currIdx < lockData->nelements) { PROCLOCK *proclock; LOCK *lock; PGPROC *proc; bool granted; LOCKMODE mode = 0; const char *locktypename; char tnbuf[32]; Datum values[16]; bool nulls[16]; HeapTuple tuple; Datum result; proclock = &(lockData->proclocks[mystatus->currIdx]); lock = &(lockData->locks[mystatus->currIdx]); proc = &(lockData->procs[mystatus->currIdx]); /* * Look to see if there are any held lock modes in this PROCLOCK. If * so, report, and destructively modify lockData so we don't report * again. */ granted = false; if (proclock->holdMask) { for (mode = 0; mode < MAX_LOCKMODES; mode++) { if (proclock->holdMask & LOCKBIT_ON(mode)) { granted = true; proclock->holdMask &= LOCKBIT_OFF(mode); break; } } } /* * If no (more) held modes to report, see if PROC is waiting for a * lock on this lock. */ if (!granted) { if (proc->waitLock == proclock->tag.myLock) { /* Yes, so report it with proper mode */ mode = proc->waitLockMode; /* * We are now done with this PROCLOCK, so advance pointer to * continue with next one on next call. */ mystatus->currIdx++; } else { /* * Okay, we've displayed all the locks associated with this * PROCLOCK, proceed to the next one. */ mystatus->currIdx++; continue; } } /* * Form tuple with appropriate data. */ MemSet(values, 0, sizeof(values)); MemSet(nulls, false, sizeof(nulls)); if (lock->tag.locktag_type <= LOCKTAG_ADVISORY) locktypename = LockTagTypeNames[lock->tag.locktag_type]; else { snprintf(tnbuf, sizeof(tnbuf), "unknown %d", (int) lock->tag.locktag_type); locktypename = tnbuf; } values[0] = CStringGetTextDatum(locktypename); switch (lock->tag.locktag_type) { case LOCKTAG_RELATION: case LOCKTAG_RELATION_EXTEND: case LOCKTAG_RELATION_RESYNCHRONIZE: values[1] = ObjectIdGetDatum(lock->tag.locktag_field1); values[2] = ObjectIdGetDatum(lock->tag.locktag_field2); nulls[3] = true; nulls[4] = true; nulls[5] = true; nulls[6] = true; nulls[7] = true; nulls[8] = true; break; case LOCKTAG_PAGE: values[1] = ObjectIdGetDatum(lock->tag.locktag_field1); values[2] = ObjectIdGetDatum(lock->tag.locktag_field2); values[3] = UInt32GetDatum(lock->tag.locktag_field3); nulls[4] = true; nulls[5] = true; nulls[6] = true; nulls[7] = true; nulls[8] = true; break; case LOCKTAG_TUPLE: values[1] = ObjectIdGetDatum(lock->tag.locktag_field1); values[2] = ObjectIdGetDatum(lock->tag.locktag_field2); values[3] = UInt32GetDatum(lock->tag.locktag_field3); values[4] = UInt16GetDatum(lock->tag.locktag_field4); nulls[5] = true; nulls[6] = true; nulls[7] = true; nulls[8] = true; break; case LOCKTAG_TRANSACTION: values[5] = TransactionIdGetDatum(lock->tag.locktag_field1); nulls[1] = true; nulls[2] = true; nulls[3] = true; nulls[4] = true; nulls[6] = true; nulls[7] = true; nulls[8] = true; break; case LOCKTAG_RELATION_APPENDONLY_SEGMENT_FILE: values[1] = ObjectIdGetDatum(lock->tag.locktag_field1); values[2] = ObjectIdGetDatum(lock->tag.locktag_field2); values[7] = ObjectIdGetDatum(lock->tag.locktag_field3); nulls[3] = true; nulls[4] = true; nulls[5] = true; nulls[6] = true; nulls[8] = true; break; case LOCKTAG_RESOURCE_QUEUE: values[1] = ObjectIdGetDatum(proc->databaseId); values[7] = ObjectIdGetDatum(lock->tag.locktag_field1); nulls[2] = true; nulls[3] = true; nulls[4] = true; nulls[5] = true; nulls[6] = true; nulls[8] = true; break; case LOCKTAG_OBJECT: case LOCKTAG_USERLOCK: case LOCKTAG_ADVISORY: default: /* treat unknown locktags like OBJECT */ values[1] = ObjectIdGetDatum(lock->tag.locktag_field1); values[6] = ObjectIdGetDatum(lock->tag.locktag_field2); values[7] = ObjectIdGetDatum(lock->tag.locktag_field3); values[8] = Int16GetDatum(lock->tag.locktag_field4); nulls[2] = true; nulls[3] = true; nulls[4] = true; nulls[5] = true; break; } values[9] = TransactionIdGetDatum(proc->xid); if (proc->pid != 0) values[10] = Int32GetDatum(proc->pid); else nulls[10] = true; values[11] = DirectFunctionCall1(textin, CStringGetDatum((char *) GetLockmodeName(LOCK_LOCKMETHOD(*lock), mode))); values[12] = BoolGetDatum(granted); values[13] = Int32GetDatum(proc->mppSessionId); values[14] = Int32GetDatum(proc->mppIsWriter); values[15] = Int32GetDatum(Gp_segment); tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); result = HeapTupleGetDatum(tuple); SRF_RETURN_NEXT(funcctx, result); } /* * This loop only executes on the masterDB and only in dispatch mode, because that * is the only time we dispatched to the segDBs. */ while (mystatus->currIdx >= lockData->nelements && mystatus->currIdx < lockData->nelements + mystatus->numSegLocks) { HeapTuple tuple; Datum result; Datum values[16]; bool nulls[16]; int i; int whichresultset = 0; int whichelement = mystatus->currIdx - lockData->nelements; int whichrow = whichelement; Assert(Gp_role == GP_ROLE_DISPATCH); /* * Because we have one result set per segDB (rather than one big result set with everything), * we need to figure out which result set we are on, and which row within that result set * we are returning. * * So, we walk through all the result sets and all the rows in each one, in order. */ while(whichrow >= PQntuples(mystatus->segresults[whichresultset])) { whichrow -= PQntuples(mystatus->segresults[whichresultset]); whichresultset++; if (whichresultset >= mystatus->numsegresults) break; } /* * If this condition is true, we have already sent everything back, * and we just want to do the SRF_RETURN_DONE */ if (whichresultset >= mystatus->numsegresults) break; mystatus->currIdx++; /* * Form tuple with appropriate data we got from the segDBs */ MemSet(values, 0, sizeof(values)); MemSet(nulls, false, sizeof(nulls)); /* * For each column, extract out the value (which comes out in text). * Convert it to the appropriate datatype to match our tupledesc, * and put that in values. * The columns look like this (from select statement earlier): * * " (locktype text, database oid, relation oid, page int4, tuple int2," * " transactionid xid, classid oid, objid oid, objsubid int2," * " transaction xid, pid int4, mode text, granted boolean, " * " mppSessionId int4, mppIsWriter boolean, gp_segment_id int4) ," */ values[0] = CStringGetTextDatum(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 0)); values[1] = ObjectIdGetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 1))); values[2] = ObjectIdGetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 2))); values[3] = UInt32GetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 3))); values[4] = UInt16GetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 4))); values[5] = TransactionIdGetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 5))); values[6] = ObjectIdGetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 6))); values[7] = ObjectIdGetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 7))); values[8] = UInt16GetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 8))); values[9] = TransactionIdGetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow, 9))); values[10] = UInt32GetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow,10))); values[11] = CStringGetTextDatum(PQgetvalue(mystatus->segresults[whichresultset], whichrow,11)); values[12] = BoolGetDatum(strncmp(PQgetvalue(mystatus->segresults[whichresultset], whichrow,12),"t",1)==0); values[13] = Int32GetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow,13))); values[14] = BoolGetDatum(strncmp(PQgetvalue(mystatus->segresults[whichresultset], whichrow,14),"t",1)==0); values[15] = Int32GetDatum(atoi(PQgetvalue(mystatus->segresults[whichresultset], whichrow,15))); /* * Copy the null info over. It should all match properly. */ for (i=0; i<16; i++) { nulls[i] = PQgetisnull(mystatus->segresults[whichresultset], whichrow, i); } tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); result = HeapTupleGetDatum(tuple); SRF_RETURN_NEXT(funcctx, result); } /* * if we dispatched to the segDBs, free up the memory holding the result sets. * Otherwise we might leak this memory each time we got called (does it automatically * get freed by the pool being deleted? Probably, but this is safer). */ if (mystatus->segresults != NULL) { int i; for (i = 0; i < mystatus->numsegresults; i++) PQclear(mystatus->segresults[i]); free(mystatus->segresults); } SRF_RETURN_DONE(funcctx); }
static void BaseBackup(void) { PGresult *res; char *sysidentifier; uint32 latesttli; uint32 starttli; char current_path[MAXPGPATH]; char escaped_label[MAXPGPATH]; int i; char xlogstart[64]; char xlogend[64]; int minServerMajor, maxServerMajor; int serverMajor; /* * Connect in replication mode to the server */ conn = GetConnection(); if (!conn) /* Error message already written in GetConnection() */ exit(1); /* * Check server version. BASE_BACKUP command was introduced in 9.1, so we * can't work with servers older than 9.1. */ minServerMajor = 901; maxServerMajor = PG_VERSION_NUM / 100; serverMajor = PQserverVersion(conn) / 100; if (serverMajor < minServerMajor || serverMajor > maxServerMajor) { const char *serverver = PQparameterStatus(conn, "server_version"); fprintf(stderr, _("%s: incompatible server version %s\n"), progname, serverver ? serverver : "'unknown'"); disconnect_and_exit(1); } /* * If WAL streaming was requested, also check that the server is new * enough for that. */ if (streamwal && !CheckServerVersionForStreaming(conn)) { /* Error message already written in CheckServerVersionForStreaming() */ disconnect_and_exit(1); } /* * Build contents of recovery.conf if requested */ if (writerecoveryconf) GenerateRecoveryConf(conn); /* * Run IDENTIFY_SYSTEM so we can get the timeline */ res = PQexec(conn, "IDENTIFY_SYSTEM"); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not send replication command \"%s\": %s"), progname, "IDENTIFY_SYSTEM", PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) != 1 || PQnfields(res) != 3) { fprintf(stderr, _("%s: could not identify system: got %d rows and %d fields, expected %d rows and %d fields\n"), progname, PQntuples(res), PQnfields(res), 1, 3); disconnect_and_exit(1); } sysidentifier = pg_strdup(PQgetvalue(res, 0, 0)); latesttli = atoi(PQgetvalue(res, 0, 1)); PQclear(res); /* * Start the actual backup */ PQescapeStringConn(conn, escaped_label, label, sizeof(escaped_label), &i); snprintf(current_path, sizeof(current_path), "BASE_BACKUP LABEL '%s' %s %s %s %s", escaped_label, showprogress ? "PROGRESS" : "", includewal && !streamwal ? "WAL" : "", fastcheckpoint ? "FAST" : "", includewal ? "NOWAIT" : ""); if (PQsendQuery(conn, current_path) == 0) { fprintf(stderr, _("%s: could not send replication command \"%s\": %s"), progname, "BASE_BACKUP", PQerrorMessage(conn)); disconnect_and_exit(1); } /* * Get the starting xlog position */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not initiate base backup: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) != 1) { fprintf(stderr, _("%s: server returned unexpected response to BASE_BACKUP command; got %d rows and %d fields, expected %d rows and %d fields\n"), progname, PQntuples(res), PQnfields(res), 1, 2); disconnect_and_exit(1); } strcpy(xlogstart, PQgetvalue(res, 0, 0)); /* * 9.3 and later sends the TLI of the starting point. With older servers, * assume it's the same as the latest timeline reported by * IDENTIFY_SYSTEM. */ if (PQnfields(res) >= 2) starttli = atoi(PQgetvalue(res, 0, 1)); else starttli = latesttli; PQclear(res); MemSet(xlogend, 0, sizeof(xlogend)); if (verbose && includewal) fprintf(stderr, _("transaction log start point: %s on timeline %u\n"), xlogstart, starttli); /* * Get the header */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not get backup header: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) < 1) { fprintf(stderr, _("%s: no data returned from server\n"), progname); disconnect_and_exit(1); } /* * Sum up the total size, for progress reporting */ totalsize = totaldone = 0; tablespacecount = PQntuples(res); for (i = 0; i < PQntuples(res); i++) { if (showprogress) totalsize += atol(PQgetvalue(res, i, 2)); /* * Verify tablespace directories are empty. Don't bother with the * first once since it can be relocated, and it will be checked before * we do anything anyway. */ if (format == 'p' && !PQgetisnull(res, i, 1)) verify_dir_is_empty_or_create(PQgetvalue(res, i, 1)); } /* * When writing to stdout, require a single tablespace */ if (format == 't' && strcmp(basedir, "-") == 0 && PQntuples(res) > 1) { fprintf(stderr, _("%s: can only write single tablespace to stdout, database has %d\n"), progname, PQntuples(res)); disconnect_and_exit(1); } /* * If we're streaming WAL, start the streaming session before we start * receiving the actual data chunks. */ if (streamwal) { if (verbose) fprintf(stderr, _("%s: starting background WAL receiver\n"), progname); StartLogStreamer(xlogstart, starttli, sysidentifier); } /* * Start receiving chunks */ for (i = 0; i < PQntuples(res); i++) { if (format == 't') ReceiveTarFile(conn, res, i); else ReceiveAndUnpackTarFile(conn, res, i); } /* Loop over all tablespaces */ if (showprogress) { progress_report(PQntuples(res), NULL); fprintf(stderr, "\n"); /* Need to move to next line */ } PQclear(res); /* * Get the stop position */ res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, _("%s: could not get transaction log end position from server: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (PQntuples(res) != 1) { fprintf(stderr, _("%s: no transaction log end position returned from server\n"), progname); disconnect_and_exit(1); } strcpy(xlogend, PQgetvalue(res, 0, 0)); if (verbose && includewal) fprintf(stderr, "transaction log end point: %s\n", xlogend); PQclear(res); res = PQgetResult(conn); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, _("%s: final receive failed: %s"), progname, PQerrorMessage(conn)); disconnect_and_exit(1); } if (bgchild > 0) { #ifndef WIN32 int status; int r; #else DWORD status; uint32 hi, lo; #endif if (verbose) fprintf(stderr, _("%s: waiting for background process to finish streaming ...\n"), progname); #ifndef WIN32 if (write(bgpipe[1], xlogend, strlen(xlogend)) != strlen(xlogend)) { fprintf(stderr, _("%s: could not send command to background pipe: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } /* Just wait for the background process to exit */ r = waitpid(bgchild, &status, 0); if (r == -1) { fprintf(stderr, _("%s: could not wait for child process: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } if (r != bgchild) { fprintf(stderr, _("%s: child %d died, expected %d\n"), progname, r, (int) bgchild); disconnect_and_exit(1); } if (!WIFEXITED(status)) { fprintf(stderr, _("%s: child process did not exit normally\n"), progname); disconnect_and_exit(1); } if (WEXITSTATUS(status) != 0) { fprintf(stderr, _("%s: child process exited with error %d\n"), progname, WEXITSTATUS(status)); disconnect_and_exit(1); } /* Exited normally, we're happy! */ #else /* WIN32 */ /* * On Windows, since we are in the same process, we can just store the * value directly in the variable, and then set the flag that says * it's there. */ if (sscanf(xlogend, "%X/%X", &hi, &lo) != 2) { fprintf(stderr, _("%s: could not parse transaction log location \"%s\"\n"), progname, xlogend); disconnect_and_exit(1); } xlogendptr = ((uint64) hi) << 32 | lo; InterlockedIncrement(&has_xlogendptr); /* First wait for the thread to exit */ if (WaitForSingleObjectEx((HANDLE) bgchild, INFINITE, FALSE) != WAIT_OBJECT_0) { _dosmaperr(GetLastError()); fprintf(stderr, _("%s: could not wait for child thread: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } if (GetExitCodeThread((HANDLE) bgchild, &status) == 0) { _dosmaperr(GetLastError()); fprintf(stderr, _("%s: could not get child thread exit status: %s\n"), progname, strerror(errno)); disconnect_and_exit(1); } if (status != 0) { fprintf(stderr, _("%s: child thread exited with error %u\n"), progname, (unsigned int) status); disconnect_and_exit(1); } /* Exited normally, we're happy */ #endif } /* Free the recovery.conf contents */ destroyPQExpBuffer(recoveryconfcontents); /* * End of copy data. Final result is already checked inside the loop. */ PQclear(res); PQfinish(conn); if (verbose) fprintf(stderr, "%s: base backup completed\n", progname); }
/* * gp_read_error_log * * Returns set of error log tuples. */ Datum gp_read_error_log(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; ReadErrorLogContext *context; HeapTuple tuple; Datum result; /* * First call setup */ if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; FILE *fp; text *relname; funcctx = SRF_FIRSTCALL_INIT(); relname = PG_GETARG_TEXT_P(0); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); context = palloc0(sizeof(ReadErrorLogContext)); funcctx->user_fctx = (void *) context; funcctx->tuple_desc = BlessTupleDesc(GetErrorTupleDesc()); /* * Though this function is usually executed on segment, we dispatch * the execution if it happens to be on QD, and combine the results * into one set. */ if (Gp_role == GP_ROLE_DISPATCH) { int resultCount = 0; PGresult **results = NULL; StringInfoData sql; StringInfoData errbuf; int i; initStringInfo(&sql); initStringInfo(&errbuf); /* * construct SQL */ appendStringInfo(&sql, "SELECT * FROM pg_catalog.gp_read_error_log(%s) ", quote_literal_internal(text_to_cstring(relname))); results = cdbdisp_dispatchRMCommand(sql.data, true, &errbuf, &resultCount); if (errbuf.len > 0) elog(ERROR, "%s", errbuf.data); Assert(resultCount > 0); for (i = 0; i < resultCount; i++) { if (PQresultStatus(results[i]) != PGRES_TUPLES_OK) elog(ERROR, "unexpected result from segment: %d", PQresultStatus(results[i])); context->numTuples += PQntuples(results[i]); } pfree(errbuf.data); pfree(sql.data); context->segResults = results; context->numSegResults = resultCount; } else { /* * In QE, read the error log. */ RangeVar *relrv; Oid relid; relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname)); relid = RangeVarGetRelid(relrv, true); /* * If the relation has gone, silently return no tuples. */ if (OidIsValid(relid)) { AclResult aclresult; /* * Requires SELECT priv to read error log. */ aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_CLASS, relrv->relname); ErrorLogFileName(context->filename, MyDatabaseId, relid); fp = AllocateFile(context->filename, "r"); context->fp = fp; } } MemoryContextSwitchTo(oldcontext); if (Gp_role != GP_ROLE_DISPATCH && !context->fp) { pfree(context); SRF_RETURN_DONE(funcctx); } } funcctx = SRF_PERCALL_SETUP(); context = (ReadErrorLogContext *) funcctx->user_fctx; /* * Read error log, probably on segments. We don't check Gp_role, however, * in case master also wants to read the file. */ if (context->fp) { pg_crc32 crc, written_crc; tuple = ErrorLogRead(context->fp, &written_crc); /* * CRC check. */ if (HeapTupleIsValid(tuple)) { INIT_CRC32C(crc); COMP_CRC32C(crc, tuple->t_data, tuple->t_len); FIN_CRC32C(crc); if (!EQ_CRC32C(crc, written_crc)) { elog(LOG, "incorrect checksum in error log %s", context->filename); tuple = NULL; } } /* * If we found a valid tuple, return it. Otherwise, fall through * in the DONE routine. */ if (HeapTupleIsValid(tuple)) { /* * We need to set typmod for the executor to understand * its type we just blessed. */ HeapTupleHeaderSetTypMod(tuple->t_data, funcctx->tuple_desc->tdtypmod); result = HeapTupleGetDatum(tuple); SRF_RETURN_NEXT(funcctx, result); } } /* * If we got results from dispatch, return all the tuples. */ while (context->currentResult < context->numSegResults) { Datum values[NUM_ERRORTABLE_ATTR]; bool isnull[NUM_ERRORTABLE_ATTR]; PGresult *segres = context->segResults[context->currentResult]; int row = context->currentRow; if (row >= PQntuples(segres)) { context->currentRow = 0; context->currentResult++; continue; } context->currentRow++; MemSet(isnull, false, sizeof(isnull)); values[0] = ResultToDatum(segres, row, 0, timestamptz_in, &isnull[0]); values[1] = ResultToDatum(segres, row, 1, textin, &isnull[1]); values[2] = ResultToDatum(segres, row, 2, textin, &isnull[2]); values[3] = ResultToDatum(segres, row, 3, int4in, &isnull[3]); values[4] = ResultToDatum(segres, row, 4, int4in, &isnull[4]); values[5] = ResultToDatum(segres, row, 5, textin, &isnull[5]); values[6] = ResultToDatum(segres, row, 6, textin, &isnull[6]); values[7] = ResultToDatum(segres, row, 7, byteain, &isnull[7]); tuple = heap_form_tuple(funcctx->tuple_desc, values, isnull); result = HeapTupleGetDatum(tuple); SRF_RETURN_NEXT(funcctx, result); } if (context->segResults != NULL) { int i; for (i = 0; i < context->numSegResults; i++) PQclear(context->segResults[i]); /* XXX: better to copy to palloc'ed area */ free(context->segResults[i]); } /* * Close the file, if we have opened it. */ if (context->fp != NULL) { FreeFile(context->fp); context->fp = NULL; } SRF_RETURN_DONE(funcctx); }
int main(int argc, char *argv[]) { /* First, we need to take in input from the items file. */ if (argc != 6) { printf("Usage: workload total alpha beta gamma delta\n"); printf("The values for alpha, beta, gamma and delta need to be integers that sum to 100.\n"); exit(0); } int i, j, k, numrecs; // These are the parameters that come from the user. int total, nalpha, nbeta, ngamma, ndelta; // These represent thresholds. int talpha, tbeta, tgamma, tdelta; // These are derived parameters from the database. int alphacells, betacells, gammacells, deltacells; char **recs; PGconn *psql; AttributeInfo *head_alpha, *head_beta, *head_gamma, *head_delta; AttributeInfo *tail_alpha, *tail_beta, *tail_gamma, *tail_delta; AttributeInfo **alpha, **beta, **gamma, **delta; head_alpha = NULL; head_beta = NULL; head_gamma = NULL; head_delta = NULL; tail_alpha = NULL; tail_beta = NULL; tail_gamma = NULL; tail_delta = NULL; // Storing our parameters. total = atoi(argv[1]); nalpha = atoi(argv[2]); nbeta = atoi(argv[3]); ngamma = atoi(argv[4]); ndelta = atoi(argv[5]); // Establish thresholds for our RNG. tdelta = 100 - ndelta; tgamma = tdelta - ngamma; tbeta = tgamma - nbeta; talpha = 0; if (nalpha+nbeta+ngamma+ndelta != 100) { printf("The values for alpha, beta, gamma and delta need to be integers that sum to 100.\n"); exit(0); } // Seeding our RNG. srand(time(NULL)); // We start off by getting a recommender list. recs = recommenderList(&numrecs); printf("Numrecs: %d\n",numrecs); /* Connect to the database. */ psql = PQconnectdb("host = 'localhost' port = '5432' dbname = 'recathon'"); if (PQstatus(psql) != CONNECTION_OK) printf("bad conn\n"); printf("%s, %s, %s, %s, %s\n",PQdb(psql), PQuser(psql), PQpass(psql), PQhost(psql), PQport(psql)); if (psql == NULL) printf("connection failed\n"); // Next, we need to query the index of each recommender, to get the attribute information and // cell types. for (i = 0; i < numrecs; i++) { char *querystring, *celltype; PGresult *query; int rows, cols; AttributeInfo *newatt; querystring = (char*) malloc(1024*sizeof(char)); // Since we don't know all of the attributes, we need to request everything. sprintf(querystring,"select * from %sindex;",recs[i]); query = PQexec(psql,querystring); rows = PQntuples(query); cols = PQnfields(query); // A new AttributeInfo for each row. for (j = 0; j < rows; j++) { // Get query information. Cell type is attribute #8. Recommender-specific // attributes begin at #13. newatt = (AttributeInfo*) malloc(sizeof(AttributeInfo)); newatt->next = NULL; newatt->recname = (char*) malloc(128*sizeof(char)); sprintf(newatt->recname,"%s",recs[i]); newatt->numatts = cols - 12; newatt->attnames = (char**) malloc(newatt->numatts*sizeof(char*)); for (k = 0; k < newatt->numatts; k++) newatt->attnames[k] = (char*) malloc(64*sizeof(char)); newatt->attvalues = (char**) malloc(newatt->numatts*sizeof(char*)); for (k = 0; k < newatt->numatts; k++) newatt->attvalues[k] = (char*) malloc(64*sizeof(char)); celltype = PQgetvalue(query,j,7); if (strcmp(celltype,"Alpha") == 0) newatt->celltype = CELL_ALPHA; else if (strcmp(celltype,"Beta") == 0) newatt->celltype = CELL_BETA; else if (strcmp(celltype,"Gamma") == 0) newatt->celltype = CELL_GAMMA; else newatt->celltype = CELL_DELTA; // Get column information. for (k = 0; k < cols-12; k++) { sprintf(newatt->attnames[k],"%s",PQfname(query,k+12)); sprintf(newatt->attvalues[k],"%s",PQgetvalue(query,j,k+12)); } // With the item complete, we put it into the appropriate bucket. switch (newatt->celltype) { case CELL_ALPHA: if (!head_alpha) { head_alpha = newatt; tail_alpha = newatt; } else { tail_alpha->next = newatt; tail_alpha = newatt; } break; case CELL_BETA: if (!head_beta) { head_beta = newatt; tail_beta = newatt; } else { tail_beta->next = newatt; tail_beta = newatt; } break; case CELL_GAMMA: if (!head_gamma) { head_gamma = newatt; tail_gamma = newatt; } else { tail_gamma->next = newatt; tail_gamma = newatt; } break; default: if (!head_delta) { head_delta = newatt; tail_delta = newatt; } else { tail_delta->next = newatt; tail_delta = newatt; } break; } } PQclear(query); free(querystring); } // For easy randomization, we should flatten our AttributeInfo lists. alpha = flatten(head_alpha, &alphacells); beta = flatten(head_beta, &betacells); gamma = flatten(head_gamma, &gammacells); delta = flatten(head_delta, &deltacells); // DEBUG: loop through the lists of alpha/beta/gamma/delta cells and print info. if (DEBUG) { printf("--- ALPHA CELLS ---\n"); printCellList(alpha, alphacells); printf("--- BETA CELLS ---\n"); printCellList(beta, betacells); printf("--- GAMMA CELLS ---\n"); printCellList(gamma, gammacells); printf("--- DELTA CELLS ---\n"); printCellList(delta, deltacells); } // One more thing we need to do is obtain a list of users that will work for // each AttributeInfo. We can semi-randomize by sorting based on zip code. addUsers(alpha, alphacells, psql); addUsers(beta, betacells, psql); addUsers(gamma, gammacells, psql); addUsers(delta, deltacells, psql); // Now to issue the given number of queries, with the frequencies established // probabilistically. for (i = 0; i < total; i++) { int randnum, randatt, randuser, userid; recathon_cell celltype; bool valid = false; PGresult *workquery; char *qstring; AttributeInfo *queryatt; // It's possible one of our buckets will have nothing in it, so // we need to continue choosing until we get something valid. while (!valid) { // A RNG chooses which kind of cell we work with. randnum = rand() % 100; if (randnum < tbeta) { if (alphacells > 0) { valid = true; celltype = CELL_ALPHA; } } else if (randnum < tgamma) { if (betacells > 0) { valid = true; celltype = CELL_BETA; } } else if (randnum < tdelta) { if (gammacells > 0) { valid = true; celltype = CELL_GAMMA; } } else { if (deltacells > 0) { valid = true; celltype = CELL_DELTA; } } } // Depending on our cell type, we'll have a different set of possible // queries to issue; we can choose from the alpha, beta, gamma or delta // buckets. Which item we get is also random. switch (celltype) { case CELL_ALPHA: randatt = rand() % alphacells; queryatt = alpha[randatt]; break; case CELL_BETA: randatt = rand() % betacells; queryatt = beta[randatt]; break; case CELL_GAMMA: randatt = rand() % gammacells; queryatt = gamma[randatt]; break; default: randatt = rand() % deltacells; queryatt = delta[randatt]; break; } randuser = rand() % 10; userid = queryatt->valid_users[randuser]; qstring = (char*) malloc(1024*sizeof(char)); sprintf(qstring,"select itemid from %s recommend(10) userid=%d",queryatt->recname,userid); if (queryatt->numatts > 0) { strncat(qstring," and ",5); for (j = 0; j < queryatt->numatts; j++) { char addition[128]; sprintf(addition,"%s = '%s' ", queryatt->attnames[j],queryatt->attvalues[j]); strncat(qstring,addition,strlen(addition)); if (j+1 < queryatt->numatts) strncat(qstring,"and ",5); } } strncat(qstring,";",1); workquery = PQexec(psql,qstring); PQclear(workquery); free(qstring); } PQfinish(psql); }
int main(int argc, char *argv[]) { static struct option long_options[] = { {"list", no_argument, NULL, 'l'}, {"host", required_argument, NULL, 'h'}, {"port", required_argument, NULL, 'p'}, {"username", required_argument, NULL, 'U'}, {"no-password", no_argument, NULL, 'w'}, {"password", no_argument, NULL, 'W'}, {"dbname", required_argument, NULL, 'd'}, {"echo", no_argument, NULL, 'e'}, {NULL, 0, NULL, 0} }; const char *progname; int optindex; int c; bool listlangs = false; const char *dbname = NULL; char *host = NULL; char *port = NULL; char *username = NULL; enum trivalue prompt_password = TRI_DEFAULT; bool echo = false; char *langname = NULL; char *p; PQExpBufferData sql; PGconn *conn; PGresult *result; progname = get_progname(argv[0]); set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pgscripts")); handle_help_version_opts(argc, argv, "createlang", help); while ((c = getopt_long(argc, argv, "lh:p:U:wWd:e", long_options, &optindex)) != -1) { switch (c) { case 'l': listlangs = true; break; case 'h': host = optarg; break; case 'p': port = optarg; break; case 'U': username = optarg; break; case 'w': prompt_password = TRI_NO; break; case 'W': prompt_password = TRI_YES; break; case 'd': dbname = optarg; break; case 'e': echo = true; break; default: fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); } } if (argc - optind > 0) { if (listlangs) dbname = argv[optind++]; else { langname = argv[optind++]; if (argc - optind > 0) dbname = argv[optind++]; } } if (argc - optind > 0) { fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"), progname, argv[optind]); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); } if (dbname == NULL) { if (getenv("PGDATABASE")) dbname = getenv("PGDATABASE"); else if (getenv("PGUSER")) dbname = getenv("PGUSER"); else dbname = get_user_name(progname); } initPQExpBuffer(&sql); /* * List option */ if (listlangs) { printQueryOpt popt; static const bool translate_columns[] = {false, true}; conn = connectDatabase(dbname, host, port, username, prompt_password, progname); printfPQExpBuffer(&sql, "SELECT lanname as \"%s\", " "(CASE WHEN lanpltrusted THEN '%s' ELSE '%s' END) as \"%s\" " "FROM pg_catalog.pg_language WHERE lanispl;", gettext_noop("Name"), gettext_noop("yes"), gettext_noop("no"), gettext_noop("Trusted?")); result = executeQuery(conn, sql.data, progname, echo); memset(&popt, 0, sizeof(popt)); popt.topt.format = PRINT_ALIGNED; popt.topt.border = 1; popt.topt.start_table = true; popt.topt.stop_table = true; popt.topt.encoding = PQclientEncoding(conn); popt.title = _("Procedural Languages"); popt.translate_header = true; popt.translate_columns = translate_columns; printQuery(result, &popt, stdout, NULL); PQfinish(conn); exit(0); } if (langname == NULL) { fprintf(stderr, _("%s: missing required argument language name\n"), progname); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); } for (p = langname; *p; p++) if (*p >= 'A' && *p <= 'Z') *p += ('a' - 'A'); conn = connectDatabase(dbname, host, port, username, prompt_password, progname); /* * Make sure the language isn't already installed */ printfPQExpBuffer(&sql, "SELECT oid FROM pg_catalog.pg_language WHERE lanname = '%s';", langname); result = executeQuery(conn, sql.data, progname, echo); if (PQntuples(result) > 0) { PQfinish(conn); fprintf(stderr, _("%s: language \"%s\" is already installed in database \"%s\"\n"), progname, langname, dbname); /* separate exit status for "already installed" */ exit(2); } PQclear(result); printfPQExpBuffer(&sql, "CREATE LANGUAGE \"%s\";\n", langname); if (echo) printf("%s", sql.data); result = PQexec(conn, sql.data); if (PQresultStatus(result) != PGRES_COMMAND_OK) { fprintf(stderr, _("%s: language installation failed: %s"), progname, PQerrorMessage(conn)); PQfinish(conn); exit(1); } PQclear(result); PQfinish(conn); exit(0); }
/* * get_rel_infos() * * gets the relinfos for all the user tables of the database refered * by "db". * * NOTE: we assume that relations/entities with oids greater than * FirstNormalObjectId belongs to the user */ static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) { PGconn *conn = connectToServer(cluster, dbinfo->db_name); PGresult *res; RelInfo *relinfos; int ntups; int relnum; int num_rels = 0; char *nspname = NULL; char *relname = NULL; int i_spclocation, i_nspname, i_relname, i_oid, i_relfilenode; char query[QUERY_ALLOC]; /* * pg_largeobject contains user data that does not appear in pg_dumpall * --schema-only output, so we have to copy that system table heap and * index. We could grab the pg_largeobject oids from template1, but * it is easy to treat it as a normal table. * Order by oid so we can join old/new structures efficiently. */ snprintf(query, sizeof(query), "SELECT c.oid, n.nspname, c.relname, " " c.relfilenode, t.spclocation " "FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n " " ON c.relnamespace = n.oid " " LEFT OUTER JOIN pg_catalog.pg_tablespace t " " ON c.reltablespace = t.oid " "WHERE relkind IN ('r','t', 'i'%s) AND " " ((n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND " " c.oid >= %u) " " OR (n.nspname = 'pg_catalog' AND " " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) " /* we preserve pg_class.oid so we sort by it to match old/new */ "ORDER BY 1;", /* see the comment at the top of old_8_3_create_sequence_script() */ (GET_MAJOR_VERSION(old_cluster.major_version) <= 803) ? "" : ", 'S'", /* this oid allows us to skip system toast tables */ FirstNormalObjectId, /* does pg_largeobject_metadata need to be migrated? */ (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ? "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'"); res = executeQueryOrDie(conn, query); ntups = PQntuples(res); relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups); i_oid = PQfnumber(res, "oid"); i_nspname = PQfnumber(res, "nspname"); i_relname = PQfnumber(res, "relname"); i_relfilenode = PQfnumber(res, "relfilenode"); i_spclocation = PQfnumber(res, "spclocation"); for (relnum = 0; relnum < ntups; relnum++) { RelInfo *curr = &relinfos[num_rels++]; const char *tblspace; curr->reloid = atooid(PQgetvalue(res, relnum, i_oid)); nspname = PQgetvalue(res, relnum, i_nspname); strlcpy(curr->nspname, nspname, sizeof(curr->nspname)); relname = PQgetvalue(res, relnum, i_relname); strlcpy(curr->relname, relname, sizeof(curr->relname)); curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode)); tblspace = PQgetvalue(res, relnum, i_spclocation); /* if no table tablespace, use the database tablespace */ if (strlen(tblspace) == 0) tblspace = dbinfo->db_tblspace; strlcpy(curr->tablespace, tblspace, sizeof(curr->tablespace)); } PQclear(res); PQfinish(conn); dbinfo->rel_arr.rels = relinfos; dbinfo->rel_arr.nrels = num_rels; }
/************************************************************************* * * Function: sql_query * * Purpose: Issue a query to the database * *************************************************************************/ static int sql_query(SQLSOCK * sqlsocket, SQL_CONFIG *config, char *querystr) { rlm_sql_postgres_sock *pg_sock = sqlsocket->conn; int numfields = 0; char *errorcode; char *errormsg; if (config->sqltrace) radlog(L_DBG,"rlm_sql_postgresql: query:\n%s", querystr); if (pg_sock->conn == NULL) { radlog(L_ERR, "rlm_sql_postgresql: Socket not connected"); return SQL_DOWN; } pg_sock->result = PQexec(pg_sock->conn, querystr); /* * Returns a PGresult pointer or possibly a null pointer. * A non-null pointer will generally be returned except in * out-of-memory conditions or serious errors such as inability * to send the command to the server. If a null pointer is * returned, it should be treated like a PGRES_FATAL_ERROR * result. */ if (!pg_sock->result) { radlog(L_ERR, "rlm_sql_postgresql: PostgreSQL Query failed Error: %s", PQerrorMessage(pg_sock->conn)); /* As this error COULD be a connection error OR an out-of-memory * condition return value WILL be wrong SOME of the time regardless! * Pick your poison.... */ return SQL_DOWN; } else { ExecStatusType status = PQresultStatus(pg_sock->result); radlog(L_DBG, "rlm_sql_postgresql: Status: %s", PQresStatus(status)); switch (status) { case PGRES_COMMAND_OK: /*Successful completion of a command returning no data.*/ /*affected_rows function only returns the number of affected rows of a command returning no data... */ pg_sock->affected_rows = affected_rows(pg_sock->result); radlog(L_DBG, "rlm_sql_postgresql: query affected rows = %i", pg_sock->affected_rows); return 0; break; case PGRES_TUPLES_OK: /*Successful completion of a command returning data (such as a SELECT or SHOW).*/ pg_sock->cur_row = 0; pg_sock->affected_rows = PQntuples(pg_sock->result); numfields = PQnfields(pg_sock->result); /*Check row storing functions..*/ radlog(L_DBG, "rlm_sql_postgresql: query affected rows = %i , fields = %i", pg_sock->affected_rows, numfields); return 0; break; case PGRES_BAD_RESPONSE: /*The server's response was not understood.*/ radlog(L_DBG, "rlm_sql_postgresql: Bad Response From Server!!"); return -1; break; case PGRES_NONFATAL_ERROR: /*A nonfatal error (a notice or warning) occurred. Possibly never returns*/ return -1; break; case PGRES_FATAL_ERROR: #if defined(PG_DIAG_SQLSTATE) && defined(PG_DIAG_MESSAGE_PRIMARY) /*A fatal error occurred.*/ errorcode = PQresultErrorField(pg_sock->result, PG_DIAG_SQLSTATE); errormsg = PQresultErrorField(pg_sock->result, PG_DIAG_MESSAGE_PRIMARY); radlog(L_DBG, "rlm_sql_postgresql: Error %s", errormsg); return check_fatal_error(errorcode); #endif break; default: /* FIXME: An unhandled error occurred.*/ /* PGRES_EMPTY_QUERY PGRES_COPY_OUT PGRES_COPY_IN */ return -1; break; } /* Note to self ... sql_store_result returns 0 anyway after setting the sqlsocket->affected_rows.. sql_num_fields returns 0 at worst case which means the check below has a really small chance to return false.. lets remove it then .. yuck!! */ /* } else { if ((sql_store_result(sqlsocket, config) == 0) && (sql_num_fields(sqlsocket, config) >= 0)) return 0; else return -1; } */ } return -1; }
/** \brief Add a new license to license_ref table Adds a license to license_ref table. @param licenseName Name of license @return rf_pk for success, 0 for failure */ FUNCTION long add2license_ref(char *licenseName) { PGresult *result; char query[myBUFSIZ]; char insert[myBUFSIZ]; char escLicName[myBUFSIZ]; char *specialLicenseText; long rf_pk; int len; int error; int numRows; // escape the name len = strlen(licenseName); PQescapeStringConn(gl.pgConn, escLicName, licenseName, len, &error); if (error) LOG_WARNING("Does license name %s have multibyte encoding?", licenseName) /* verify the license is not already in the table */ sprintf(query, "SELECT rf_pk FROM " LICENSE_REF_TABLE " where rf_shortname='%s'", escLicName); result = PQexec(gl.pgConn, query); if (fo_checkPQresult(gl.pgConn, result, query, __FILE__, __LINE__)) return 0; numRows = PQntuples(result); if (numRows) { rf_pk = atol(PQgetvalue(result, 0, 0)); PQclear(result); return rf_pk; } PQclear(result); /* Insert the new license */ specialLicenseText = "License by Nomos."; sprintf(insert, "insert into license_ref(rf_shortname, rf_text, rf_detector_type) values('%s', '%s', 2)", escLicName, specialLicenseText); result = PQexec(gl.pgConn, insert); // ignore duplicate constraint failure (23505), report others if ((result == 0) || ((PQresultStatus(result) != PGRES_COMMAND_OK) && (strncmp(PG_ERRCODE_UNIQUE_VIOLATION, PQresultErrorField(result, PG_DIAG_SQLSTATE), 5)))) { printf("ERROR: %s(%d): Nomos failed to add a new license. %s/n: %s/n", __FILE__, __LINE__, PQresultErrorMessage(result), insert); PQclear(result); return (0); } PQclear(result); /* retrieve the new rf_pk */ result = PQexec(gl.pgConn, query); if (fo_checkPQresult(gl.pgConn, result, query, __FILE__, __LINE__)) return 0; numRows = PQntuples(result); if (numRows) rf_pk = atol(PQgetvalue(result, 0, 0)); else { printf("ERROR: %s:%s:%d Just inserted value is missing. On: %s", __FILE__, "add2license_ref()", __LINE__, query); PQclear(result); return (0); } PQclear(result); return (rf_pk); }
/***************************************************** * \brief Read a natural block of raster band data *****************************************************/ CPLErr PostGISRasterTileRasterBand::IReadBlock(CPL_UNUSED int nBlockXOff, CPL_UNUSED int nBlockYOff, void * pImage) { CPLString osCommand; PGresult * poResult = NULL; int nWKBLength = 0; int nPixelSize = GDALGetDataTypeSize(eDataType)/8; PostGISRasterTileDataset * poRTDS = (PostGISRasterTileDataset *)poDS; // Get by PKID if (poRTDS->poRDS->pszPrimaryKeyName) { osCommand.Printf("select st_band(%s, %d) from %s.%s where " "%s = '%s'", poRTDS->poRDS->pszColumn, nBand, poRTDS->poRDS->pszSchema, poRTDS->poRDS->pszTable, poRTDS->poRDS->pszPrimaryKeyName, poRTDS->pszPKID); } // Get by upperleft else { CPLLocaleC oCLocale; // Force C locale to avoid commas instead of decimal points (for QGIS e.g.) osCommand.Printf("select st_band(%s, %d) from %s.%s where " "abs(ST_UpperLeftX(%s) - %.8f) < 1e-8 and abs(ST_UpperLeftY(%s) - %.8f) < 1e-8", poRTDS->poRDS->pszColumn, nBand, poRTDS->poRDS->pszSchema, poRTDS->poRDS->pszTable, poRTDS->poRDS->pszColumn, poRTDS->adfGeoTransform[GEOTRSFRM_TOPLEFT_X], poRTDS->poRDS->pszColumn, poRTDS->adfGeoTransform[GEOTRSFRM_TOPLEFT_Y]); } poResult = PQexec(poRTDS->poRDS->poConn, osCommand.c_str()); #ifdef DEBUG_QUERY CPLDebug("PostGIS_Raster", "PostGISRasterTileRasterBand::IReadBlock(): " "Query = \"%s\" --> number of rows = %d", osCommand.c_str(), poResult ? PQntuples(poResult) : 0 ); #endif if (poResult == NULL || PQresultStatus(poResult) != PGRES_TUPLES_OK || PQntuples(poResult) <= 0) { if (poResult) PQclear(poResult); ReportError(CE_Failure, CPLE_AppDefined, "Error getting block of data (upperpixel = %f, %f)", poRTDS->adfGeoTransform[GEOTRSFRM_TOPLEFT_X], poRTDS->adfGeoTransform[GEOTRSFRM_TOPLEFT_Y]); return CE_Failure; } // TODO: Check this if (bIsOffline) { CPLError(CE_Failure, CPLE_AppDefined, "This raster has outdb " "storage. This feature isn't still available"); PQclear(poResult); return CE_Failure; } /* Copy only data size, without payload */ int nExpectedDataSize = nBlockXSize * nBlockYSize * nPixelSize; GByte * pbyData = CPLHexToBinary(PQgetvalue(poResult, 0, 0), &nWKBLength); int nExpectedWKBLength = RASTER_HEADER_SIZE + BAND_SIZE(nPixelSize, nExpectedDataSize); CPLErr eRet = CE_None; if( nWKBLength != nExpectedWKBLength ) { CPLDebug("PostGIS_Raster", "nWKBLength=%d, nExpectedWKBLength=%d", nWKBLength, nExpectedWKBLength ); eRet = CE_Failure; } else { GByte * pbyDataToRead = (GByte*)GET_BAND_DATA(pbyData,1, nPixelSize, nExpectedDataSize); // Do byte-swapping if necessary */ int bIsLittleEndian = (pbyData[0] == 1); #ifdef CPL_LSB int bSwap = !bIsLittleEndian; #else int bSwap = bIsLittleEndian; #endif if( bSwap && nPixelSize > 1 ) { GDALSwapWords( pbyDataToRead, nPixelSize, nBlockXSize * nBlockYSize, nPixelSize ); } memcpy(pImage, pbyDataToRead, nExpectedDataSize); } CPLFree(pbyData); PQclear(poResult); return eRet; }
int msPOSTGRESQLJoinNext(joinObj *join) { msPOSTGRESQLJoinInfo *joininfo = join->joininfo; int i, length, row_count; char *sql, *columns; /* We need a connection, and a join value. */ if(!joininfo || !joininfo->conn) { msSetError(MS_JOINERR, "Join has not been connected.\n", "msPOSTGRESQLJoinNext()"); return MS_FAILURE; } if(!joininfo->from_value) { msSetError(MS_JOINERR, "Join has not been prepared.\n", "msPOSTGRESQLJoinNext()"); return MS_FAILURE; } /* Free the previous results. */ if(join->values) { msFreeCharArray(join->values, join->numitems); join->values = NULL; } /* We only need to execute the query if no results exist. */ if(!joininfo->query_result) { /* Write the list of column names. */ length = 0; for(i = 0; i < join->numitems; i++) { length += 8 + strlen(join->items[i]) + 2; } columns = (char *)malloc(length); if(!columns) { msSetError(MS_MEMERR, "Failure to malloc.\n", "msPOSTGRESQLJoinNext()"); return MS_FAILURE; } strcpy(columns, ""); for(i = 0; i < join->numitems; i++) { strcat(columns, "\""); strcat(columns, join->items[i]); strcat(columns, "\"::text"); if(i != join->numitems - 1) { strcat(columns, ", "); } } /* Create the query string. */ sql = (char *)malloc(26 + strlen(columns) + strlen(join->table) + strlen(join->to) + strlen(joininfo->from_value)); if(!sql) { msSetError(MS_MEMERR, "Failure to malloc.\n", "msPOSTGRESQLJoinNext()"); return MS_FAILURE; } sprintf(sql, "SELECT %s FROM %s WHERE %s = '%s'", columns, join->table, join->to, joininfo->from_value); if(joininfo->layer_debug) { msDebug("msPOSTGRESQLJoinNext(): executing %s.\n", sql); } free(columns); joininfo->query_result = PQexec(joininfo->conn, sql); if(!joininfo->query_result || PQresultStatus(joininfo->query_result) != PGRES_TUPLES_OK) { msSetError(MS_QUERYERR, "Error executing queri %s: %s\n", "msPOSTGRESQLJoinNext()", sql, PQerrorMessage(joininfo->conn)); if(joininfo->query_result) { PQclear(joininfo->query_result); joininfo->query_result = NULL; } free(sql); return MS_FAILURE; } free(sql); } row_count = PQntuples(joininfo->query_result); /* see if we're done processing this set */ if(joininfo->row_num >= row_count) { return(MS_DONE); } if(joininfo->layer_debug) { msDebug("msPOSTGRESQLJoinNext(): fetching row %ld.\n", joininfo->row_num); } /* Copy the resulting values into the joinObj. */ join->values = (char **)malloc(sizeof(char *) * join->numitems); for(i = 0; i < join->numitems; i++) { join->values[i] = msStrdup(PQgetvalue( joininfo->query_result, joininfo->row_num, i)); } joininfo->row_num++; return MS_SUCCESS; }
/*+++++++++++++++++++++++++ Main Program or Function +++++++++++++++*/ void SCIA_WR_SQL_CH4_TILE( PGconn *conn, const char *prodName, unsigned int num_rec, const struct imap_rec *rec ) { register unsigned int nr; register unsigned int affectedRows = 0u; char sql_query[SQL_STR_SIZE], cbuff[SQL_STR_SIZE]; char *pntr; int nrow, numChar, meta_id; long long tile_id; PGresult *res; /* * check if product is already in database */ (void) snprintf( sql_query, SQL_STR_SIZE, "SELECT pk_meta FROM %s WHERE name=\'%s\'", META_TBL_NAME, prodName ); res = PQexec( conn, sql_query ); if ( PQresultStatus( res ) != PGRES_TUPLES_OK ) { NADC_GOTO_ERROR( NADC_ERR_SQL, PQresultErrorMessage(res) ); } if ( (nrow = PQntuples( res )) == 0 ) { NADC_GOTO_ERROR( NADC_ERR_FATAL, prodName ); } pntr = PQgetvalue( res, 0, 0 ); meta_id = (int) strtol( pntr, (char **) NULL, 10 ); PQclear( res ); /* * Start a transaction block */ res = PQexec( conn, "BEGIN" ); if ( PQresultStatus( res ) != PGRES_COMMAND_OK ) NADC_GOTO_ERROR( NADC_ERR_SQL, PQresultErrorMessage(res) ); PQclear( res ); /* * insert all tiles in products */ for ( nr = 0; nr < num_rec; nr++ ) { /* obtain next value for serial pk_tile */ res = PQexec( conn, "SELECT nextval(\'tile_imap_ch4_pk_tile_seq\')" ); if ( PQresultStatus( res ) != PGRES_TUPLES_OK ) NADC_GOTO_ERROR( NADC_ERR_SQL, PQresultErrorMessage(res) ); pntr = PQgetvalue( res, 0, 0 ); tile_id = strtoll( pntr, (char **) NULL, 10 ); PQclear( res ); numChar = snprintf( sql_query, SQL_STR_SIZE, SQL_INSERT_TILE, TILE_TBL_NAME, tile_id, meta_id, rec[nr].jday, NINT(16 * rec[nr].meta.intg_time), rec[nr].meta.elev, rec[nr].ch4_vcd, rec[nr].ch4_error, rec[nr].co2_vcd, rec[nr].co2_error, rec[nr].ch4_vmr, rec[nr].lon_corner[0],rec[nr].lat_corner[0], rec[nr].lon_corner[1],rec[nr].lat_corner[1], rec[nr].lon_corner[2],rec[nr].lat_corner[2], rec[nr].lon_corner[3],rec[nr].lat_corner[3], rec[nr].lon_corner[0],rec[nr].lat_corner[0] ); (void) fprintf( stderr, "%s [%-d]\n", sql_query, numChar ); if ( numChar >= SQL_STR_SIZE ) NADC_RETURN_ERROR( NADC_ERR_STRLEN, "sql_query" ); res = PQexec( conn, sql_query ); if ( PQresultStatus( res ) != PGRES_COMMAND_OK ) { NADC_ERROR( NADC_ERR_SQL, PQresultErrorMessage(res) ); PQclear( res ); res = PQexec( conn, "ROLLBACK" ); if ( PQresultStatus( res ) != PGRES_COMMAND_OK ) NADC_ERROR( NADC_ERR_SQL, PQresultErrorMessage(res) ); goto done; } PQclear( res ); affectedRows += 1; } /* * end the transaction */ res = PQexec( conn, "COMMIT" ); if ( PQresultStatus( res ) != PGRES_COMMAND_OK ) NADC_ERROR( NADC_ERR_SQL, PQresultErrorMessage(res) ); done: PQclear( res ); (void) snprintf( cbuff, SQL_STR_SIZE, "affectedRows=%-u", affectedRows ); NADC_ERROR( NADC_ERR_NONE, cbuff ); }
//[-------------------------------------------------------] //[ Public virtual PLDatabase::DatabaseQueryResult functions ] //[-------------------------------------------------------] bool DatabaseQueryResult::IsEmpty() const { return !PQntuples(m_pPostgreSQLResult); }
int PQninstances(PortalBuffer *portal) { return(PQntuples(portal)); }
int main(int argc, char **argv) { PGconn *conn; PQExpBufferData sql; PGresult *res; PGresult *pkrel_res; PGresult *fkrel_res; char *fk_relname; char *fk_nspname; char *fk_attname; char *pk_relname; char *pk_nspname; int fk, pk; /* loop counters */ if (argc != 2) { fprintf(stderr, "Usage: %s database\n", argv[0]); exit(EXIT_FAILURE); } initPQExpBuffer(&sql); appendPQExpBuffer(&sql, "dbname=%s", argv[1]); conn = PQconnectdb(sql.data); if (PQstatus(conn) == CONNECTION_BAD) { fprintf(stderr, "connection error: %s\n", PQerrorMessage(conn)); exit(EXIT_FAILURE); } /* Get a list of relations that have OIDs */ printfPQExpBuffer(&sql, "%s", "SET search_path = public;" "SELECT c.relname, (SELECT nspname FROM " "pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname " "FROM pg_catalog.pg_class c " "WHERE c.relkind = 'r' " "AND c.relhasoids " "ORDER BY nspname, c.relname" ); res = PQexec(conn, sql.data); if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, "sql error: %s\n", PQerrorMessage(conn)); exit(EXIT_FAILURE); } pkrel_res = res; /* Get a list of columns of OID type (or any OID-alias type) */ printfPQExpBuffer(&sql, "%s", "SELECT c.relname, " "(SELECT nspname FROM pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname, " "a.attname " "FROM pg_catalog.pg_class c, pg_catalog.pg_attribute a " "WHERE a.attnum > 0 AND c.relkind = 'r' " "AND a.attrelid = c.oid " "AND a.atttypid IN ('pg_catalog.oid'::regtype, " " 'pg_catalog.regclass'::regtype, " " 'pg_catalog.regoper'::regtype, " " 'pg_catalog.regoperator'::regtype, " " 'pg_catalog.regproc'::regtype, " " 'pg_catalog.regprocedure'::regtype, " " 'pg_catalog.regtype'::regtype, " " 'pg_catalog.regconfig'::regtype, " " 'pg_catalog.regdictionary'::regtype) " "ORDER BY nspname, c.relname, a.attnum" ); res = PQexec(conn, sql.data); if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, "sql error: %s\n", PQerrorMessage(conn)); exit(EXIT_FAILURE); } fkrel_res = res; /* * For each column and each relation-having-OIDs, look to see if the * column contains any values matching entries in the relation. */ for (fk = 0; fk < PQntuples(fkrel_res); fk++) { fk_relname = PQgetvalue(fkrel_res, fk, 0); fk_nspname = PQgetvalue(fkrel_res, fk, 1); fk_attname = PQgetvalue(fkrel_res, fk, 2); for (pk = 0; pk < PQntuples(pkrel_res); pk++) { pk_relname = PQgetvalue(pkrel_res, pk, 0); pk_nspname = PQgetvalue(pkrel_res, pk, 1); printfPQExpBuffer(&sql, "SELECT 1 " "FROM \"%s\".\"%s\" t1, " "\"%s\".\"%s\" t2 " "WHERE t1.\"%s\"::pg_catalog.oid = t2.oid " "LIMIT 1", fk_nspname, fk_relname, pk_nspname, pk_relname, fk_attname); res = PQexec(conn, sql.data); if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, "sql error: %s\n", PQerrorMessage(conn)); exit(EXIT_FAILURE); } if (PQntuples(res) != 0) printf("Join %s.%s.%s => %s.%s.oid\n", fk_nspname, fk_relname, fk_attname, pk_nspname, pk_relname); PQclear(res); } } PQclear(fkrel_res); /* Now, do the same for referencing columns that are arrays */ /* Get a list of columns of OID-array type (or any OID-alias type) */ printfPQExpBuffer(&sql, "%s", "SELECT c.relname, " "(SELECT nspname FROM pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname, " "a.attname " "FROM pg_catalog.pg_class c, pg_catalog.pg_attribute a " "WHERE a.attnum > 0 AND c.relkind = 'r' " "AND a.attrelid = c.oid " "AND a.atttypid IN ('pg_catalog.oid[]'::regtype, " " 'pg_catalog.regclass[]'::regtype, " " 'pg_catalog.regoper[]'::regtype, " " 'pg_catalog.regoperator[]'::regtype, " " 'pg_catalog.regproc[]'::regtype, " " 'pg_catalog.regprocedure[]'::regtype, " " 'pg_catalog.regtype[]'::regtype, " " 'pg_catalog.regconfig[]'::regtype, " " 'pg_catalog.regdictionary[]'::regtype) " "ORDER BY nspname, c.relname, a.attnum" ); res = PQexec(conn, sql.data); if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, "sql error: %s\n", PQerrorMessage(conn)); exit(EXIT_FAILURE); } fkrel_res = res; /* * For each column and each relation-having-OIDs, look to see if the * column contains any values matching entries in the relation. */ for (fk = 0; fk < PQntuples(fkrel_res); fk++) { fk_relname = PQgetvalue(fkrel_res, fk, 0); fk_nspname = PQgetvalue(fkrel_res, fk, 1); fk_attname = PQgetvalue(fkrel_res, fk, 2); for (pk = 0; pk < PQntuples(pkrel_res); pk++) { pk_relname = PQgetvalue(pkrel_res, pk, 0); pk_nspname = PQgetvalue(pkrel_res, pk, 1); printfPQExpBuffer(&sql, "SELECT 1 " "FROM \"%s\".\"%s\" t1, " "\"%s\".\"%s\" t2 " "WHERE t2.oid = ANY(t1.\"%s\")" "LIMIT 1", fk_nspname, fk_relname, pk_nspname, pk_relname, fk_attname); res = PQexec(conn, sql.data); if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, "sql error: %s\n", PQerrorMessage(conn)); exit(EXIT_FAILURE); } if (PQntuples(res) != 0) printf("Join %s.%s.%s []=> %s.%s.oid\n", fk_nspname, fk_relname, fk_attname, pk_nspname, pk_relname); PQclear(res); } } PQclear(fkrel_res); PQclear(pkrel_res); PQfinish(conn); termPQExpBuffer(&sql); exit(EXIT_SUCCESS); }
void PQdisplayTuples(const PGresult *res, FILE *fp, /* where to send the output */ int fillAlign, /* pad the fields with spaces */ const char *fieldSep, /* field separator */ int printHeader, /* display headers? */ int quiet ) { #define DEFAULT_FIELD_SEP " " int i, j; int nFields; int nTuples; int *fLength = NULL; if (fieldSep == NULL) fieldSep = DEFAULT_FIELD_SEP; /* Get some useful info about the results */ nFields = PQnfields(res); nTuples = PQntuples(res); if (fp == NULL) fp = stdout; /* Figure the field lengths to align to */ /* will be somewhat time consuming for very large results */ if (fillAlign) { fLength = (int *) malloc(nFields * sizeof(int)); if (!fLength) { fprintf(stderr, libpq_gettext("out of memory\n")); exit(1); } for (j = 0; j < nFields; j++) { fLength[j] = strlen(PQfname(res, j)); for (i = 0; i < nTuples; i++) { int flen = PQgetlength(res, i, j); if (flen > fLength[j]) fLength[j] = flen; } } } if (printHeader) { /* first, print out the attribute names */ for (i = 0; i < nFields; i++) { fputs(PQfname(res, i), fp); if (fillAlign) fill(strlen(PQfname(res, i)), fLength[i], ' ', fp); fputs(fieldSep, fp); } fprintf(fp, "\n"); /* Underline the attribute names */ for (i = 0; i < nFields; i++) { if (fillAlign) fill(0, fLength[i], '-', fp); fputs(fieldSep, fp); } fprintf(fp, "\n"); } /* next, print out the instances */ for (i = 0; i < nTuples; i++) { for (j = 0; j < nFields; j++) { fprintf(fp, "%s", PQgetvalue(res, i, j)); if (fillAlign) fill(strlen(PQgetvalue(res, i, j)), fLength[j], ' ', fp); fputs(fieldSep, fp); } fprintf(fp, "\n"); } if (!quiet) fprintf(fp, "\nQuery returned %d row%s.\n", PQntuples(res), (PQntuples(res) == 1) ? "" : "s"); fflush(fp); if (fLength) free(fLength); }
/* * pqSetenvPoll * * Polls the process of passing the values of a standard set of environment * variables to the backend. */ PostgresPollingStatusType pqSetenvPoll(PGconn *conn) { PGresult *res; if (conn == NULL || conn->status == CONNECTION_BAD) return PGRES_POLLING_FAILED; /* Check whether there are any data for us */ switch (conn->setenv_state) { /* These are reading states */ case SETENV_STATE_OPTION_WAIT: case SETENV_STATE_QUERY1_WAIT: case SETENV_STATE_QUERY2_WAIT: { /* Load waiting data */ int n = pqReadData(conn); if (n < 0) goto error_return; if (n == 0) return PGRES_POLLING_READING; break; } /* These are writing states, so we just proceed. */ case SETENV_STATE_OPTION_SEND: case SETENV_STATE_QUERY1_SEND: case SETENV_STATE_QUERY2_SEND: break; /* Should we raise an error if called when not active? */ case SETENV_STATE_IDLE: return PGRES_POLLING_OK; default: printfPQExpBuffer(&conn->errorMessage, libpq_gettext( "invalid setenv state %c, " "probably indicative of memory corruption\n" ), conn->setenv_state); goto error_return; } /* We will loop here until there is nothing left to do in this call. */ for (;;) { switch (conn->setenv_state) { case SETENV_STATE_OPTION_SEND: { /* * Send SET commands for stuff directed by Environment * Options. Note: we assume that SET commands won't start * transaction blocks, even in a 7.3 server with * autocommit off. */ char setQuery[100]; /* note length limit in * sprintf below */ if (conn->next_eo->envName) { const char *val; if ((val = getenv(conn->next_eo->envName))) { if (pg_strcasecmp(val, "default") == 0) sprintf(setQuery, "SET %s = DEFAULT", conn->next_eo->pgName); else sprintf(setQuery, "SET %s = '%.60s'", conn->next_eo->pgName, val); #ifdef CONNECTDEBUG fprintf(stderr, "Use environment variable %s to send %s\n", conn->next_eo->envName, setQuery); #endif if (!PQsendQuery(conn, setQuery)) goto error_return; conn->setenv_state = SETENV_STATE_OPTION_WAIT; } else conn->next_eo++; } else { /* No more options to send, so move on to querying */ conn->setenv_state = SETENV_STATE_QUERY1_SEND; } break; } case SETENV_STATE_OPTION_WAIT: { if (PQisBusy(conn)) return PGRES_POLLING_READING; res = PQgetResult(conn); if (res) { if (PQresultStatus(res) != PGRES_COMMAND_OK) { PQclear(res); goto error_return; } PQclear(res); /* Keep reading until PQgetResult returns NULL */ } else { /* Query finished, so send the next option */ conn->next_eo++; conn->setenv_state = SETENV_STATE_OPTION_SEND; } break; } case SETENV_STATE_QUERY1_SEND: { /* * Issue query to get information we need. Here we must * use begin/commit in case autocommit is off by default * in a 7.3 server. * * Note: version() exists in all protocol-2.0-supporting * backends. In 7.3 it would be safer to write * pg_catalog.version(), but we can't do that without * causing problems on older versions. */ if (!PQsendQuery(conn, "begin; select version(); end")) goto error_return; conn->setenv_state = SETENV_STATE_QUERY1_WAIT; return PGRES_POLLING_READING; } case SETENV_STATE_QUERY1_WAIT: { if (PQisBusy(conn)) return PGRES_POLLING_READING; res = PQgetResult(conn); if (res) { char *val; if (PQresultStatus(res) == PGRES_COMMAND_OK) { /* ignore begin/commit command results */ PQclear(res); continue; } if (PQresultStatus(res) != PGRES_TUPLES_OK || PQntuples(res) != 1) { PQclear(res); goto error_return; } /* * Extract server version and save as if * ParameterStatus */ val = PQgetvalue(res, 0, 0); if (val && strncmp(val, "PostgreSQL ", 11) == 0) { char *ptr; /* strip off PostgreSQL part */ val += 11; /* * strip off platform part (scribbles on result, * naughty naughty) */ ptr = strchr(val, ' '); if (ptr) *ptr = '\0'; pqSaveParameterStatus(conn, "server_version", val); } PQclear(res); /* Keep reading until PQgetResult returns NULL */ } else { /* Query finished, move to next */ conn->setenv_state = SETENV_STATE_QUERY2_SEND; } break; } case SETENV_STATE_QUERY2_SEND: { const char *query; /* * pg_client_encoding does not exist in pre-7.2 servers. * So we need to be prepared for an error here. Do *not* * start a transaction block, except in 7.3 servers where * we need to prevent autocommit-off from starting a * transaction anyway. */ if (conn->sversion >= 70300 && conn->sversion < 70400) query = "begin; select pg_catalog.pg_client_encoding(); end"; else query = "select pg_client_encoding()"; if (!PQsendQuery(conn, query)) goto error_return; conn->setenv_state = SETENV_STATE_QUERY2_WAIT; return PGRES_POLLING_READING; } case SETENV_STATE_QUERY2_WAIT: { if (PQisBusy(conn)) return PGRES_POLLING_READING; res = PQgetResult(conn); if (res) { const char *val; if (PQresultStatus(res) == PGRES_COMMAND_OK) { /* ignore begin/commit command results */ PQclear(res); continue; } if (PQresultStatus(res) == PGRES_TUPLES_OK && PQntuples(res) == 1) { /* Extract client encoding and save it */ val = PQgetvalue(res, 0, 0); if (val && *val) /* null should not happen, but */ pqSaveParameterStatus(conn, "client_encoding", val); } else { /* * Error: presumably function not available, so * use PGCLIENTENCODING or SQL_ASCII as the * fallback. */ val = getenv("PGCLIENTENCODING"); if (val && *val) pqSaveParameterStatus(conn, "client_encoding", val); else pqSaveParameterStatus(conn, "client_encoding", "SQL_ASCII"); } PQclear(res); /* Keep reading until PQgetResult returns NULL */ } else { /* Query finished, so we're done */ conn->setenv_state = SETENV_STATE_IDLE; return PGRES_POLLING_OK; } break; } default: printfPQExpBuffer(&conn->errorMessage, libpq_gettext("invalid state %c, " "probably indicative of memory corruption\n"), conn->setenv_state); goto error_return; } } /* Unreachable */ error_return: conn->setenv_state = SETENV_STATE_IDLE; return PGRES_POLLING_FAILED; }
/* * PQprint() * * Format results of a query for printing. * * PQprintOpt is a typedef (structure) that containes * various flags and options. consult libpq-fe.h for * details * * This function should probably be removed sometime since psql * doesn't use it anymore. It is unclear to what extent this is used * by external clients, however. */ void PQprint(FILE *fout, const PGresult *res, const PQprintOpt *po) { int nFields; nFields = PQnfields(res); if (nFields > 0) { /* only print rows with at least 1 field. */ int i, j; int nTups; int *fieldMax = NULL; /* in case we don't use them */ unsigned char *fieldNotNum = NULL; char *border = NULL; char **fields = NULL; const char **fieldNames; int fieldMaxLen = 0; int numFieldName; int fs_len = strlen(po->fieldSep); int total_line_length = 0; int usePipe = 0; char *pagerenv; #if defined(ENABLE_THREAD_SAFETY) && !defined(WIN32) sigset_t osigset; bool sigpipe_masked = false; bool sigpipe_pending; #endif #if !defined(ENABLE_THREAD_SAFETY) && !defined(WIN32) pqsigfunc oldsigpipehandler = NULL; #endif #ifdef TIOCGWINSZ struct winsize screen_size; #else struct winsize { int ws_row; int ws_col; } screen_size; #endif nTups = PQntuples(res); if (!(fieldNames = (const char **) calloc(nFields, sizeof(char *)))) { fprintf(stderr, libpq_gettext("out of memory\n")); exit(1); } if (!(fieldNotNum = (unsigned char *) calloc(nFields, 1))) { fprintf(stderr, libpq_gettext("out of memory\n")); exit(1); } if (!(fieldMax = (int *) calloc(nFields, sizeof(int)))) { fprintf(stderr, libpq_gettext("out of memory\n")); exit(1); } for (numFieldName = 0; po->fieldName && po->fieldName[numFieldName]; numFieldName++) ; for (j = 0; j < nFields; j++) { int len; const char *s = (j < numFieldName && po->fieldName[j][0]) ? po->fieldName[j] : PQfname(res, j); fieldNames[j] = s; len = s ? strlen(s) : 0; fieldMax[j] = len; len += fs_len; if (len > fieldMaxLen) fieldMaxLen = len; total_line_length += len; } total_line_length += nFields * strlen(po->fieldSep) + 1; if (fout == NULL) fout = stdout; if (po->pager && fout == stdout && isatty(fileno(stdin)) && isatty(fileno(stdout))) { /* * If we think there'll be more than one screen of output, try to * pipe to the pager program. */ #ifdef TIOCGWINSZ if (ioctl(fileno(stdout), TIOCGWINSZ, &screen_size) == -1 || screen_size.ws_col == 0 || screen_size.ws_row == 0) { screen_size.ws_row = 24; screen_size.ws_col = 80; } #else screen_size.ws_row = 24; screen_size.ws_col = 80; #endif pagerenv = getenv("PAGER"); if (pagerenv != NULL && pagerenv[0] != '\0' && !po->html3 && ((po->expanded && nTups * (nFields + 1) >= screen_size.ws_row) || (!po->expanded && nTups * (total_line_length / screen_size.ws_col + 1) * (1 + (po->standard != 0)) >= screen_size.ws_row - (po->header != 0) * (total_line_length / screen_size.ws_col + 1) * 2 - (po->header != 0) * 2 /* row count and newline */ ))) { fout = popen(pagerenv, "w"); if (fout) { usePipe = 1; #ifndef WIN32 #ifdef ENABLE_THREAD_SAFETY if (pq_block_sigpipe(&osigset, &sigpipe_pending) == 0) sigpipe_masked = true; #else oldsigpipehandler = pqsignal(SIGPIPE, SIG_IGN); #endif /* ENABLE_THREAD_SAFETY */ #endif /* WIN32 */ } else fout = stdout; } } if (!po->expanded && (po->align || po->html3)) { if (!(fields = (char **) calloc(nFields * (nTups + 1), sizeof(char *)))) { fprintf(stderr, libpq_gettext("out of memory\n")); exit(1); } } else if (po->header && !po->html3) { if (po->expanded) { if (po->align) fprintf(fout, libpq_gettext("%-*s%s Value\n"), fieldMaxLen - fs_len, libpq_gettext("Field"), po->fieldSep); else fprintf(fout, libpq_gettext("%s%sValue\n"), libpq_gettext("Field"), po->fieldSep); } else { int len = 0; for (j = 0; j < nFields; j++) { const char *s = fieldNames[j]; fputs(s, fout); len += strlen(s) + fs_len; if ((j + 1) < nFields) fputs(po->fieldSep, fout); } fputc('\n', fout); for (len -= fs_len; len--; fputc('-', fout)); fputc('\n', fout); } } if (po->expanded && po->html3) { if (po->caption) fprintf(fout, "<center><h2>%s</h2></center>\n", po->caption); else fprintf(fout, "<center><h2>" "Query retrieved %d rows * %d fields" "</h2></center>\n", nTups, nFields); } for (i = 0; i < nTups; i++) { if (po->expanded) { if (po->html3) fprintf(fout, "<table %s><caption align=\"top\">%d</caption>\n", po->tableOpt ? po->tableOpt : "", i); else fprintf(fout, libpq_gettext("-- RECORD %d --\n"), i); } for (j = 0; j < nFields; j++) do_field(po, res, i, j, fs_len, fields, nFields, fieldNames, fieldNotNum, fieldMax, fieldMaxLen, fout); if (po->html3 && po->expanded) fputs("</table>\n", fout); } if (!po->expanded && (po->align || po->html3)) { if (po->html3) { if (po->header) { if (po->caption) fprintf(fout, "<table %s><caption align=\"top\">%s</caption>\n", po->tableOpt ? po->tableOpt : "", po->caption); else fprintf(fout, "<table %s><caption align=\"top\">" "Retrieved %d rows * %d fields" "</caption>\n", po->tableOpt ? po->tableOpt : "", nTups, nFields); } else fprintf(fout, "<table %s>", po->tableOpt ? po->tableOpt : ""); } if (po->header) border = do_header(fout, po, nFields, fieldMax, fieldNames, fieldNotNum, fs_len, res); for (i = 0; i < nTups; i++) output_row(fout, po, nFields, fields, fieldNotNum, fieldMax, border, i); free(fields); if (border) free(border); } if (po->header && !po->html3) fprintf(fout, "(%d row%s)\n\n", PQntuples(res), (PQntuples(res) == 1) ? "" : "s"); free(fieldMax); free(fieldNotNum); free((void *) fieldNames); if (usePipe) { #ifdef WIN32 _pclose(fout); #else pclose(fout); #ifdef ENABLE_THREAD_SAFETY /* we can't easily verify if EPIPE occurred, so say it did */ if (sigpipe_masked) pq_reset_sigpipe(&osigset, sigpipe_pending, true); #else pqsignal(SIGPIPE, oldsigpipehandler); #endif /* ENABLE_THREAD_SAFETY */ #endif /* WIN32 */ } if (po->html3 && !po->expanded) fputs("</table>\n", fout); } }
static int pgsql_stmt_get_column_meta(pdo_stmt_t *stmt, zend_long colno, zval *return_value) { pdo_pgsql_stmt *S = (pdo_pgsql_stmt*)stmt->driver_data; PGresult *res; char *q=NULL; ExecStatusType status; Oid table_oid; char *table_name=NULL; if (!S->result) { return FAILURE; } if (colno >= stmt->column_count) { return FAILURE; } array_init(return_value); add_assoc_long(return_value, "pgsql:oid", S->cols[colno].pgsql_type); table_oid = PQftable(S->result, colno); add_assoc_long(return_value, "pgsql:table_oid", table_oid); table_name = pdo_pgsql_translate_oid_to_table(table_oid, S->H->server); if (table_name) { add_assoc_string(return_value, "table", table_name); } switch (S->cols[colno].pgsql_type) { case BOOLOID: add_assoc_string(return_value, "native_type", BOOLLABEL); break; case BYTEAOID: add_assoc_string(return_value, "native_type", BYTEALABEL); break; case INT8OID: add_assoc_string(return_value, "native_type", INT8LABEL); break; case INT2OID: add_assoc_string(return_value, "native_type", INT2LABEL); break; case INT4OID: add_assoc_string(return_value, "native_type", INT4LABEL); break; case TEXTOID: add_assoc_string(return_value, "native_type", TEXTLABEL); break; case VARCHAROID: add_assoc_string(return_value, "native_type", VARCHARLABEL); break; case DATEOID: add_assoc_string(return_value, "native_type", DATELABEL); break; case TIMESTAMPOID: add_assoc_string(return_value, "native_type", TIMESTAMPLABEL); break; default: /* Fetch metadata from Postgres system catalogue */ spprintf(&q, 0, "SELECT TYPNAME FROM PG_TYPE WHERE OID=%u", S->cols[colno].pgsql_type); res = PQexec(S->H->server, q); efree(q); status = PQresultStatus(res); if (status == PGRES_TUPLES_OK && 1 == PQntuples(res)) { add_assoc_string(return_value, "native_type", PQgetvalue(res, 0, 0)); } PQclear(res); } return 1; }
void PQprintTuples(const PGresult *res, FILE *fout, /* output stream */ int PrintAttNames, /* print attribute names or not */ int TerseOutput, /* delimiter bars or not? */ int colWidth /* width of column, if 0, use variable width */ ) { int nFields; int nTups; int i, j; char formatString[80]; char *tborder = NULL; nFields = PQnfields(res); nTups = PQntuples(res); if (colWidth > 0) sprintf(formatString, "%%s %%-%ds", colWidth); else sprintf(formatString, "%%s %%s"); if (nFields > 0) { /* only print rows with at least 1 field. */ if (!TerseOutput) { int width; width = nFields * 14; tborder = malloc(width + 1); if (!tborder) { fprintf(stderr, libpq_gettext("out of memory\n")); exit(1); } for (i = 0; i <= width; i++) tborder[i] = '-'; tborder[i] = '\0'; fprintf(fout, "%s\n", tborder); } for (i = 0; i < nFields; i++) { if (PrintAttNames) { fprintf(fout, formatString, TerseOutput ? "" : "|", PQfname(res, i)); } } if (PrintAttNames) { if (TerseOutput) fprintf(fout, "\n"); else fprintf(fout, "|\n%s\n", tborder); } for (i = 0; i < nTups; i++) { for (j = 0; j < nFields; j++) { const char *pval = PQgetvalue(res, i, j); fprintf(fout, formatString, TerseOutput ? "" : "|", pval ? pval : ""); } if (TerseOutput) fprintf(fout, "\n"); else fprintf(fout, "|\n%s\n", tborder); } } if (tborder) free(tborder); }
static int c_psql_exec_query (c_psql_database_t *db, udb_query_t *q, udb_query_preparation_area_t *prep_area) { PGresult *res; c_psql_user_data_t *data; const char *host; char **column_names; char **column_values; int column_num; int rows_num; int status; int row, col; /* The user data may hold parameter information, but may be NULL. */ data = udb_query_get_user_data (q); /* Versions up to `3' don't know how to handle parameters. */ if (3 <= db->proto_version) res = c_psql_exec_query_params (db, q, data); else if ((NULL == data) || (0 == data->params_num)) res = c_psql_exec_query_noparams (db, q); else { log_err ("Connection to database \"%s\" does not support parameters " "(protocol version %d) - cannot execute query \"%s\".", db->database, db->proto_version, udb_query_get_name (q)); return -1; } column_names = NULL; column_values = NULL; #define BAIL_OUT(status) \ sfree (column_names); \ sfree (column_values); \ PQclear (res); \ return status if (PGRES_TUPLES_OK != PQresultStatus (res)) { log_err ("Failed to execute SQL query: %s", PQerrorMessage (db->conn)); log_info ("SQL query was: %s", udb_query_get_statement (q)); BAIL_OUT (-1); } rows_num = PQntuples (res); if (1 > rows_num) { BAIL_OUT (0); } column_num = PQnfields (res); column_names = (char **) calloc (column_num, sizeof (char *)); if (NULL == column_names) { log_err ("calloc failed."); BAIL_OUT (-1); } column_values = (char **) calloc (column_num, sizeof (char *)); if (NULL == column_values) { log_err ("calloc failed."); BAIL_OUT (-1); } for (col = 0; col < column_num; ++col) { /* Pointers returned by `PQfname' are freed by `PQclear' via * `BAIL_OUT'. */ column_names[col] = PQfname (res, col); if (NULL == column_names[col]) { log_err ("Failed to resolve name of column %i.", col); BAIL_OUT (-1); } } if (C_PSQL_IS_UNIX_DOMAIN_SOCKET (db->host) || (0 == strcmp (db->host, "localhost"))) host = hostname_g; else host = db->host; status = udb_query_prepare_result (q, prep_area, host, "postgresql", db->database, column_names, (size_t) column_num, db->interval); if (0 != status) { log_err ("udb_query_prepare_result failed with status %i.", status); BAIL_OUT (-1); } for (row = 0; row < rows_num; ++row) { for (col = 0; col < column_num; ++col) { /* Pointers returned by `PQgetvalue' are freed by `PQclear' via * `BAIL_OUT'. */ column_values[col] = PQgetvalue (res, row, col); if (NULL == column_values[col]) { log_err ("Failed to get value at (row = %i, col = %i).", row, col); break; } } /* check for an error */ if (col < column_num) continue; status = udb_query_handle_result (q, prep_area, column_values); if (status != 0) { log_err ("udb_query_handle_result failed with status %i.", status); } } /* for (row = 0; row < rows_num; ++row) */ udb_query_finish_result (q, prep_area); BAIL_OUT (0); #undef BAIL_OUT } /* c_psql_exec_query */
/****************************************************************************** * * * Function: zbx_db_vselect * * * * Purpose: execute a select statement * * * * Return value: data, NULL (on error) or (DB_RESULT)ZBX_DB_DOWN * * * ******************************************************************************/ DB_RESULT zbx_db_vselect(const char *fmt, va_list args) { char *sql = NULL; DB_RESULT result = NULL; double sec = 0; #if defined(HAVE_IBM_DB2) int i; SQLRETURN ret = SQL_SUCCESS; #elif defined(HAVE_ORACLE) sword err = OCI_SUCCESS; ub4 counter; #elif defined(HAVE_POSTGRESQL) char *error = NULL; #elif defined(HAVE_SQLITE3) int ret = FAIL; char *error = NULL; #endif if (0 != CONFIG_LOG_SLOW_QUERIES) sec = zbx_time(); sql = zbx_dvsprintf(sql, fmt, args); if (1 == txn_error) { zabbix_log(LOG_LEVEL_DEBUG, "ignoring query [txnlev:%d] [%s] within failed transaction", txn_level, sql); goto clean; } zabbix_log(LOG_LEVEL_DEBUG, "query [txnlev:%d] [%s]", txn_level, sql); #if defined(HAVE_IBM_DB2) result = zbx_malloc(result, sizeof(ZBX_IBM_DB2_RESULT)); memset(result, 0, sizeof(ZBX_IBM_DB2_RESULT)); /* allocate a statement handle */ if (SUCCEED != zbx_ibm_db2_success(ret = SQLAllocHandle(SQL_HANDLE_STMT, ibm_db2.hdbc, &result->hstmt))) goto error; /* directly execute the statement */ if (SUCCEED != zbx_ibm_db2_success(ret = SQLExecDirect(result->hstmt, (SQLCHAR *)sql, SQL_NTS))) goto error; /* identify the number of output columns */ if (SUCCEED != zbx_ibm_db2_success(ret = SQLNumResultCols(result->hstmt, &result->ncolumn))) goto error; if (0 == result->ncolumn) goto error; result->nalloc = 0; result->values = zbx_malloc(result->values, sizeof(char *) * result->ncolumn); result->values_cli = zbx_malloc(result->values_cli, sizeof(char *) * result->ncolumn); result->values_len = zbx_malloc(result->values_len, sizeof(SQLINTEGER) * result->ncolumn); for (i = 0; i < result->ncolumn; i++) { /* get the display size for a column */ if (SUCCEED != zbx_ibm_db2_success(ret = SQLColAttribute(result->hstmt, (SQLSMALLINT)(i + 1), SQL_DESC_DISPLAY_SIZE, NULL, 0, NULL, &result->values_len[i]))) { goto error; } result->values_len[i] += 1; /* '\0'; */ /* allocate memory to bind a column */ result->values_cli[i] = zbx_malloc(NULL, result->values_len[i]); result->nalloc++; /* bind columns to program variables, converting all types to CHAR */ if (SUCCEED != zbx_ibm_db2_success(ret = SQLBindCol(result->hstmt, (SQLSMALLINT)(i + 1), SQL_C_CHAR, result->values_cli[i], result->values_len[i], &result->values_len[i]))) { goto error; } } error: if (SUCCEED != zbx_ibm_db2_success(ret) || 0 == result->ncolumn) { zbx_ibm_db2_log_errors(SQL_HANDLE_DBC, ibm_db2.hdbc); zbx_ibm_db2_log_errors(SQL_HANDLE_STMT, result->hstmt); IBM_DB2free_result(result); result = (SQL_CD_TRUE == IBM_DB2server_status() ? NULL : (DB_RESULT)ZBX_DB_DOWN); } #elif defined(HAVE_MYSQL) if (NULL == conn) { zabbix_errlog(ERR_Z3003); result = NULL; } else { if (0 != mysql_query(conn, sql)) { zabbix_errlog(ERR_Z3005, mysql_errno(conn), mysql_error(conn), sql); switch (mysql_errno(conn)) { case CR_CONN_HOST_ERROR: case CR_SERVER_GONE_ERROR: case CR_CONNECTION_ERROR: case CR_SERVER_LOST: case ER_SERVER_SHUTDOWN: case ER_ACCESS_DENIED_ERROR: /* wrong user or password */ case ER_ILLEGAL_GRANT_FOR_TABLE: /* user without any privileges */ case ER_TABLEACCESS_DENIED_ERROR:/* user without some privilege */ case ER_UNKNOWN_ERROR: result = (DB_RESULT)ZBX_DB_DOWN; break; default: result = NULL; break; } } else result = mysql_store_result(conn); } #elif defined(HAVE_ORACLE) result = zbx_malloc(NULL, sizeof(ZBX_OCI_DB_RESULT)); memset(result, 0, sizeof(ZBX_OCI_DB_RESULT)); err = OCIHandleAlloc((dvoid *)oracle.envhp, (dvoid **)&result->stmthp, OCI_HTYPE_STMT, (size_t)0, (dvoid **)0); if (OCI_SUCCESS == err) { err = OCIStmtPrepare(result->stmthp, oracle.errhp, (text *)sql, (ub4)strlen((char *)sql), (ub4)OCI_NTV_SYNTAX, (ub4)OCI_DEFAULT); } if (OCI_SUCCESS == err) { err = OCIStmtExecute(oracle.svchp, result->stmthp, oracle.errhp, (ub4)0, (ub4)0, (CONST OCISnapshot *)NULL, (OCISnapshot *)NULL, OCI_COMMIT_ON_SUCCESS); } if (OCI_SUCCESS == err) { /* get the number of columns in the query */ err = OCIAttrGet((void *)result->stmthp, OCI_HTYPE_STMT, (void *)&result->ncolumn, (ub4 *)0, OCI_ATTR_PARAM_COUNT, oracle.errhp); } if (OCI_SUCCESS != err) goto error; assert(0 < result->ncolumn); result->values = zbx_malloc(NULL, result->ncolumn * sizeof(char *)); memset(result->values, 0, result->ncolumn * sizeof(char *)); for (counter = 1; OCI_SUCCESS == err && counter <= result->ncolumn; counter++) { OCIParam *parmdp = NULL; OCIDefine *defnp = NULL; ub4 char_semantics; ub2 col_width; /* request a parameter descriptor in the select-list */ err = OCIParamGet((void *)result->stmthp, OCI_HTYPE_STMT, oracle.errhp, (void **)&parmdp, (ub4)counter); if (OCI_SUCCESS == err) { /* retrieve the length semantics for the column */ char_semantics = 0; err = OCIAttrGet((void *)parmdp, (ub4)OCI_DTYPE_PARAM, (void *)&char_semantics, (ub4 *)0, (ub4)OCI_ATTR_CHAR_USED, (OCIError *)oracle.errhp); } if (OCI_SUCCESS == err) { col_width = 0; if (char_semantics) { /* retrieve the column width in characters */ err = OCIAttrGet((void *)parmdp, (ub4)OCI_DTYPE_PARAM, (void *)&col_width, (ub4 *)0, (ub4)OCI_ATTR_CHAR_SIZE, (OCIError *)oracle.errhp); } else { /* retrieve the column width in bytes */ err = OCIAttrGet((void *)parmdp, (ub4)OCI_DTYPE_PARAM, (void *)&col_width, (ub4 *)0, (ub4)OCI_ATTR_DATA_SIZE, (OCIError *)oracle.errhp); } } col_width++; result->values[counter - 1] = zbx_malloc(NULL, col_width); memset(result->values[counter - 1], 0, col_width); if (OCI_SUCCESS == err) { /* represent any data as characters */ err = OCIDefineByPos(result->stmthp, &defnp, oracle.errhp, counter, (dvoid *)result->values[counter - 1], col_width, SQLT_STR, (dvoid *)0, (ub2 *)0, (ub2 *)0, OCI_DEFAULT); } /* free cell descriptor */ OCIDescriptorFree(parmdp, OCI_DTYPE_PARAM); parmdp = NULL; } error: if (OCI_SUCCESS != err) { zabbix_errlog(ERR_Z3005, err, zbx_oci_error(err), sql); OCI_DBfree_result(result); result = (OCI_SERVER_NORMAL == OCI_DBserver_status() ? NULL : (DB_RESULT)ZBX_DB_DOWN); } #elif defined(HAVE_POSTGRESQL) result = zbx_malloc(NULL, sizeof(ZBX_PG_DB_RESULT)); result->pg_result = PQexec(conn, sql); result->values = NULL; result->cursor = 0; result->row_num = 0; if (NULL == result->pg_result) zabbix_errlog(ERR_Z3005, 0, "result is NULL", sql); if (PGRES_TUPLES_OK != PQresultStatus(result->pg_result)) { error = zbx_dsprintf(error, "%s:%s", PQresStatus(PQresultStatus(result->pg_result)), PQresultErrorMessage(result->pg_result)); zabbix_errlog(ERR_Z3005, 0, error, sql); zbx_free(error); PG_DBfree_result(result); result = (CONNECTION_OK == PQstatus(conn) ? NULL : (DB_RESULT)ZBX_DB_DOWN); } else /* init rownum */ result->row_num = PQntuples(result->pg_result); #elif defined(HAVE_SQLITE3) if (0 == txn_level && PHP_MUTEX_OK != php_sem_acquire(&sqlite_access)) { zabbix_log(LOG_LEVEL_CRIT, "ERROR: cannot create lock on SQLite3 database"); exit(FAIL); } result = zbx_malloc(NULL, sizeof(ZBX_SQ_DB_RESULT)); result->curow = 0; lbl_get_table: if (SQLITE_OK != (ret = sqlite3_get_table(conn,sql, &result->data, &result->nrow, &result->ncolumn, &error))) { if (SQLITE_BUSY == ret) goto lbl_get_table; zabbix_errlog(ERR_Z3005, 0, error, sql); sqlite3_free(error); SQ_DBfree_result(result); switch (ret) { case SQLITE_ERROR: /* SQL error or missing database; assuming SQL error, because if we are this far into execution, zbx_db_connect() was successful */ case SQLITE_NOMEM: /* a malloc() failed */ case SQLITE_MISMATCH: /* data type mismatch */ result = NULL; break; default: result = (DB_RESULT)ZBX_DB_DOWN; break; } } if (0 == txn_level) php_sem_release(&sqlite_access); #endif /* HAVE_SQLITE3 */ if (0 != CONFIG_LOG_SLOW_QUERIES) { sec = zbx_time() - sec; if (sec > (double)CONFIG_LOG_SLOW_QUERIES / 1000.0) zabbix_log(LOG_LEVEL_WARNING, "slow query: " ZBX_FS_DBL " sec, \"%s\"", sec, sql); } if (NULL == result && 0 < txn_level) { zabbix_log(LOG_LEVEL_DEBUG, "query [%s] failed, setting transaction as failed", sql); txn_error = 1; } clean: zbx_free(sql); return result; }