/* * Convert rows from internal to db API representation */ static int dbt_convert_rows(db1_res_t* _r, dbt_result_p _dres) { int row; dbt_row_p _rp = NULL; if (!_r || !_dres) { LM_ERR("invalid parameter\n"); return -1; } RES_ROW_N(_r) = _dres->nrrows; if (!RES_ROW_N(_r)) { return 0; } if (db_allocate_rows(_r) < 0) { LM_ERR("could not allocate rows"); return -2; } row = 0; _rp = _dres->rows; while(_rp) { if (dbt_convert_row(_r, &(RES_ROWS(_r)[row]), _rp) < 0) { LM_ERR("failed to convert row #%d\n", row); RES_ROW_N(_r) = row; db_free_rows(_r); return -4; } row++; _rp = _rp->next; } return 0; }
int cql_convert_row(oac::CqlResult& _cql_res, db1_res_t* _r) { std::vector<oac::CqlRow> res_cql_rows = _cql_res.rows; int rows_no = res_cql_rows.size(); int cols_no = res_cql_rows[0].columns.size(); str col_val; RES_ROW_N(_r) = rows_no; if (db_allocate_rows(_r) < 0) { LM_ERR("Could not allocate rows.\n"); return -1; } for(int ri=0; ri < rows_no; ri++) { if (db_allocate_row(_r, &(RES_ROWS(_r)[ri])) != 0) { LM_ERR("Could not allocate row.\n"); return -2; } // complete the row with the columns for(int col = 0; col< cols_no; col++) { col_val.s = (char*)res_cql_rows[ri].columns[col].value.c_str(); col_val.len = strlen(col_val.s); RES_ROWS(_r)[ri].values[col].type = RES_TYPES(_r)[col]; cassa_convert_result_raw(&RES_ROWS(_r)[ri].values[col], &col_val); LM_DBG("Field index %d. %s = %s.\n", col, res_cql_rows[ri].columns[col].name.c_str(), res_cql_rows[ri].columns[col].value.c_str()); } } return 0; }
/** * Convert rows from mysql to db API representation */ static inline int db_mysql_convert_rows(const db_con_t* _h, db_res_t* _r) { int row; if ((!_h) || (!_r)) { LM_ERR("invalid parameter\n"); return -1; } if (CON_HAS_PS(_h)) { RES_ROW_N(_r) = mysql_stmt_num_rows(CON_PS_STMT(_h)); } else { RES_ROW_N(_r) = mysql_num_rows(CON_RESULT(_h)); } if (!RES_ROW_N(_r)) { LM_DBG("no rows returned from the query\n"); RES_ROWS(_r) = 0; return 0; } if (db_allocate_rows( _r, RES_ROW_N(_r))!=0) { LM_ERR("no private memory left\n"); return -2; } for(row = 0; row < RES_ROW_N(_r); row++) { if (CON_HAS_PS(_h)) { mysql_stmt_fetch(CON_PS_STMT(_h)); //if(mysql_stmt_fetch(CON_PS_STMT(_h))!=1) // LM_ERR("STMT ERR=%s\n",mysql_stmt_error(CON_PS_STMT(_h))); } else { CON_ROW(_h) = mysql_fetch_row(CON_RESULT(_h)); if (!CON_ROW(_h)) { LM_ERR("driver error: %s\n", mysql_error(CON_CONNECTION(_h))); RES_ROW_N(_r) = row; db_free_rows(_r); return -3; } } if (db_mysql_convert_row(_h, _r, &(RES_ROWS(_r)[row])) < 0) { LM_ERR("error while converting row #%d\n", row); RES_ROW_N(_r) = row; db_free_rows(_r); return -4; } } return 0; }
/*! * \brief Convert rows from mysql to db API representation * \param _h database connection * \param _r database result set * \return 0 on success, negative on failure */ static inline int db_mysql_convert_rows(const db1_con_t* _h, db1_res_t* _r) { int row; if ((!_h) || (!_r)) { LM_ERR("invalid parameter\n"); return -1; } RES_ROW_N(_r) = mysql_num_rows(RES_RESULT(_r)); if (!RES_ROW_N(_r)) { LM_DBG("no rows returned from the query\n"); RES_ROWS(_r) = 0; return 0; } if (db_allocate_rows(_r) < 0) { LM_ERR("could not allocate rows"); RES_ROW_N(_r) = 0; return -2; } for(row = 0; row < RES_ROW_N(_r); row++) { RES_ROW(_r) = mysql_fetch_row(RES_RESULT(_r)); if (!RES_ROW(_r)) { LM_ERR("driver error: %s\n", mysql_error(CON_CONNECTION(_h))); RES_ROW_N(_r) = row; db_free_rows(_r); return -3; } if (db_mysql_convert_row(_h, _r, &(RES_ROWS(_r)[row])) < 0) { LM_ERR("error while converting row #%d\n", row); RES_ROW_N(_r) = row; db_free_rows(_r); return -4; } } return 0; }
db_res_t * new_full_db_res(int rows, int cols) { db_res_t * res; int i; res = db_new_result(); if( res == NULL) { LM_ERR("Error allocating db result\n"); return NULL; } if( db_allocate_columns(res,cols) < 0) { LM_ERR("Error allocating db result columns\n"); pkg_free(res); return NULL; } res->col.n = cols; if( db_allocate_rows(res,rows) < 0 ) { LM_ERR("Error allocating db result rows\n"); db_free_columns( res ); pkg_free(res); return NULL; } res->n = rows; res->res_rows = rows; res->last_row = rows; for( i=0;i<rows;i++) res->rows[i].n = cols; return res; }
/* * Convert rows from internal to db API representation */ static int dbt_convert_rows(db_con_t* _h, db_res_t* _r) { int col; dbt_row_p _rp = NULL; if (!_h || !_r) { LM_ERR("invalid parameter\n"); return -1; } RES_ROW_N(_r) = DBT_CON_RESULT(_h)->nrrows; if (!RES_ROW_N(_r)) { return 0; } if (db_allocate_rows( _r, RES_ROW_N(_r))!=0) { LM_ERR("no private memory left\n"); return -2; } col = 0; _rp = DBT_CON_RESULT(_h)->rows; while(_rp) { DBT_CON_ROW(_h) = _rp; if (!DBT_CON_ROW(_h)) { LM_ERR("failed to get current row\n"); RES_ROW_N(_r) = col; db_free_rows(_r); return -3; } if (dbt_convert_row(_h, _r, &(RES_ROWS(_r)[col])) < 0) { LM_ERR("failed to convert row #%d\n", col); RES_ROW_N(_r) = col; db_free_rows(_r); return -4; } col++; _rp = _rp->next; } return 0; }
/* * Query table for specified rows * _h: structure representing database connection * _k: key names * _op: operators * _v: values of the keys that must match * _c: column names to return * _n: number of key=values pairs to compare * _nc: number of columns to return * _o: order by the specified column */ int db_cassa_query(const db1_con_t* _h, const db_key_t* _k, const db_op_t* _op, const db_val_t* _v, const db_key_t* _c, int _n, int _nc, const db_key_t _o, db1_res_t** _r) { db1_res_t* db_res = 0; int rows_no; ColumnVecPtr cassa_result; dbcassa_table_p tbc; int seckey_len; if (!_h || !CON_TABLE(_h) || !_r) { LM_ERR("invalid parameter value\n"); return -1; } LM_DBG("query table=%s\n", _h->table->s); /** Construct and send the query to Cassandra Cluster **/ cassa_result = cassa_translate_query(_h, _k, _v, _c, _n, _nc, &rows_no); if(cassa_result.get() == NULL) { LM_ERR("Failed to query Cassandra cluster\n"); return -1; } /* compare the number of queried cols with the key cols*/ // if(no_kc + no_sec_kc < _n) { /* TODO */ /* filter manually for the rest of the values */ // } db_res = db_new_result(); if (!db_res) { LM_ERR("no memory left\n"); goto error; } RES_COL_N(db_res)= _nc; if(!db_allocate_columns(db_res, _nc) < 0) { LM_ERR("no more memory\n"); goto error; } tbc = dbcassa_db_get_table(&CON_CASSA(_h)->db_name, CON_TABLE(_h)); if(!tbc) { LM_ERR("table %.*s does not exist!\n", CON_TABLE(_h)->len, CON_TABLE(_h)->s); return -1; } /** Convert the result from Cassandra **/ /* fill in the columns name and type */ for(int col = 0; col < _nc; col++) { RES_NAMES(db_res)[col] = (str*)pkg_malloc(sizeof(str)); if (! RES_NAMES(db_res)[col]) { LM_ERR("no private memory left\n"); dbcassa_lock_release(tbc); RES_COL_N(db_res) = col; db_free_columns(db_res); goto error; } *RES_NAMES(db_res)[col] = *_c[col]; /* search the column in table schema to get the type */ dbcassa_column_p colp = cassa_search_col(tbc, _c[col]); if(!colp) { LM_ERR("No column with name [%.*s] found\n", _c[col]->len, _c[col]->s); dbcassa_lock_release(tbc); RES_COL_N(db_res) = col; db_free_columns(db_res); goto error; } RES_TYPES(db_res)[col] = colp->type; LM_DBG("RES_NAMES(%p)[%d]=[%.*s]\n", RES_NAMES(db_res)[col], col, RES_NAMES(db_res)[col]->len, RES_NAMES(db_res)[col]->s); } /* TODO if all columns asked - take from table schema */ seckey_len = tbc->seckey_len; dbcassa_lock_release(tbc); if(!cassa_result->size()) { LM_DBG("The query returned no result\n"); RES_ROW_N(db_res) = 0; goto done; } /* Initialize the row_slices vector for the case with one column and no secondary key */ if(rows_no == 1) { row_slices[0][0]= cassa_result->size(); row_slices[0][1]= 0; if(seckey_len) { /* if the table has a secondary key defined */ /* pass through the result once to see how many rows there are */ rows_no = cassa_result_separate_rows(*cassa_result); if(rows_no < 0) { LM_ERR("Wrong formated column names\n"); goto error; } } } RES_ROW_N(db_res) = rows_no; if (db_allocate_rows(db_res) < 0) { LM_ERR("could not allocate rows"); goto error; } for(int ri=0; ri < rows_no; ri++) { if (db_allocate_row(db_res, &(RES_ROWS(db_res)[ri])) != 0) { LM_ERR("could not allocate row"); goto error; } /* complete the row with the columns */ for(int col = 0; col< _nc; col++) { RES_ROWS(db_res)[ri].values[col].type = RES_TYPES(db_res)[col]; cassa_convert_result(_c[col], *cassa_result, (ri>0?row_slices[ri-1][0]:0), row_slices[ri][0], row_slices[ri][1], &RES_ROWS(db_res)[ri].values[col]); } } done: *_r = db_res; LM_DBG("Exited with success\n"); return 0; error: if(db_res) db_free_result(db_res); return -1; }
/* * Get rows and convert it from oracle to db API representation */ static int get_rows(ora_con_t* con, db_res_t* _r, OCIStmt* _c, dmap_t* _d) { ub4 rcnt; sword status; unsigned n = RES_COL_N(_r); memcpy(_d->len, _d->ilen, sizeof(_d->len[0]) * n); // timelimited operation status = begin_timelimit(con, 0); if (status != OCI_SUCCESS) goto ora_err; do status = OCIStmtFetch2(_c, con->errhp, 1, OCI_FETCH_NEXT, 0, OCI_DEFAULT); while (wait_timelimit(con, status)); if (done_timelimit(con, status)) goto stop_load; if (status != OCI_SUCCESS) { if (status != OCI_NO_DATA) goto ora_err; RES_ROW_N(_r) = 0; RES_ROWS(_r) = NULL; return 0; } status = OCIAttrGet(_c, OCI_HTYPE_STMT, &rcnt, NULL, OCI_ATTR_CURRENT_POSITION, con->errhp); if (status != OCI_SUCCESS) goto ora_err; if (!rcnt) { LM_ERR("lastpos==0\n"); goto stop_load; } RES_ROW_N(_r) = rcnt; if (db_allocate_rows( _r, rcnt)!=0) { LM_ERR("no private memory left\n"); return -1; } while ( 1 ) { if (convert_row(_r, &RES_ROWS(_r)[--rcnt], _d) < 0) { LM_ERR("error convert row\n"); goto stop_load; } if (!rcnt) return 0; memcpy(_d->len, _d->ilen, sizeof(_d->len[0]) * n); // timelimited operation status = begin_timelimit(con, 0); if (status != OCI_SUCCESS) goto ora_err; do status = OCIStmtFetch2(_c, con->errhp, 1, OCI_FETCH_PRIOR, 0, OCI_DEFAULT); while (wait_timelimit(con, status)); if (done_timelimit(con, status)) goto stop_load; if (status != OCI_SUCCESS) break; } ora_err: LM_ERR("driver: %s\n", db_oracle_error(con, status)); stop_load: db_free_rows(_r); RES_ROW_N(_r) = 0; /* TODO: skipped in db_res.c :) */ return -3; }
/*! * \brief Convert rows from mongodb to db API representation * \param _h database connection * \param _r database result set * \return 0 on success, negative on failure */ static int db_mongodb_convert_result(const db1_con_t* _h, db1_res_t* _r) { int row; db_mongodb_result_t *mgres; const bson_t *itdoc; char *jstr; if ((!_h) || (!_r)) { LM_ERR("invalid parameter\n"); return -1; } mgres = (db_mongodb_result_t*)RES_PTR(_r); if(!mgres->rdoc) { mgres->nrcols = 0; return 0; } if(mgres->nrcols==0) { LM_DBG("no fields to return\n"); return 0; } if(!mongoc_cursor_more (mgres->cursor)) { RES_ROW_N(_r) = 1; mgres->maxrows = 1; } else { RES_ROW_N(_r) = DB_MONGODB_ROWS_STEP; mgres->maxrows = DB_MONGODB_ROWS_STEP; } if (db_allocate_rows(_r) < 0) { LM_ERR("could not allocate rows\n"); RES_ROW_N(_r) = 0; return -2; } itdoc = mgres->rdoc; row = 0; do { if(row >= RES_ROW_N(_r)) { if (db_reallocate_rows(_r, RES_ROW_N(_r)+DB_MONGODB_ROWS_STEP) < 0) { LM_ERR("could not reallocate rows\n"); return -2; } mgres->maxrows = RES_ROW_N(_r); } if(is_printable(L_DBG)) { jstr = bson_as_json (itdoc, NULL); LM_DBG("selected document: %s\n", jstr); bson_free (jstr); } if(db_mongodb_convert_bson(_h, _r, row, itdoc)) { LM_ERR("failed to convert bson at pos %d\n", row); return -1; } row++; } while (mongoc_cursor_more (mgres->cursor) && mongoc_cursor_next (mgres->cursor, &itdoc)); RES_ROW_N(_r) = row; LM_DBG("retrieved number of rows: %d\n", row); return 0; }
int perlresult2dbres(SV *perlres, db_res_t **r) { HV * result = NULL; SV *colarrayref = NULL; AV *colarray = NULL; SV *acol = NULL; int colcount = 0; SV *rowarrayref = NULL; AV *rowarray = NULL; int rowcount = 0; SV *arowref = NULL; AV *arow = NULL; int arowlen = 0; SV *aelement = NULL; SV *atypesv = 0; int atype = 0; SV *aval = NULL; char *charbuf; char *currentstring; int i, j; int retval = 0; STRLEN len; SV *d1; /* helper variables */ /*db_val_t cur_val;*/ /* Abbreviation in "switch" below. The currently modified db result value. */ if (!(SvROK(perlres) && (sv_derived_from(perlres, "OpenSIPS::VDB::Result")))) { goto error; } result = (HV*)SvRV(perlres); /* Memory allocation for C side result structure */ *r = db_new_result(); /* Fetch column definitions */ colarrayref = *hv_fetchs(result, PERL_VDB_COLDEFSMETHOD, 0); /* colarrayref = perlvdb_perlmethod(perlres, PERL_VDB_COLDEFSMETHOD, NULL, NULL, NULL, NULL); */ if (!(SvROK(colarrayref))) goto error; colarray = (AV *)SvRV(colarrayref); /* SvREFCNT_dec(colarray); */ if (!(SvTYPE(colarray) == SVt_PVAV)) goto error; colcount = av_len(colarray) + 1; RES_COL_N(*r) = colcount; db_allocate_columns(*r, colcount); /* reverse direction, as elements are removed by "SvREFCNT_dec" */ for (i = colcount-1; i >= 0; i--) { acol = *av_fetch(colarray, i, 0); d1 = perlvdb_perlmethod(acol, PERL_VDB_TYPEMETHOD, NULL, NULL, NULL, NULL); if (!SvIOK(d1)) goto error; (*r)->col.types[i] = SvIV(d1); SvREFCNT_dec(d1); d1 = perlvdb_perlmethod(acol, PERL_VDB_NAMEMETHOD, NULL, NULL, NULL, NULL); if (!SvPOK(d1)) goto error; currentstring = SvPV(d1, len); charbuf = pkg_malloc(len+1); /* Column names buffers are freed in the perlvdb free function */ strncpy(charbuf, currentstring, len+1); (*r)->col.names[i]->s = charbuf; (*r)->col.names[i]->len = strlen(charbuf); SvREFCNT_dec(d1); } if(hv_exists(result, "rows", 4)){ rowarrayref =(SV*) hv_fetchs(result, "rows", 0); }else{ (*r)->n = 0; (*r)->res_rows = 0; (*r)->last_row = 0; goto end; } if(rowarrayref){ rowarrayref = *((SV**)rowarrayref); }else{ (*r)->n = 0; (*r)->res_rows = 0; (*r)->last_row = 0; goto end; } if (!(SvROK(rowarrayref))) { /* Empty result set */ (*r)->n = 0; (*r)->res_rows = 0; (*r)->last_row = 0; goto end; } rowarray = (AV *)SvRV(rowarrayref); if (!(SvTYPE(rowarray) == SVt_PVAV)) goto error; rowcount = av_len(rowarray) + 1; (*r)->n = rowcount; (*r)->res_rows = rowcount; (*r)->last_row = rowcount; db_allocate_rows(*r, rowcount); /* (rows * (sizeof(db_row_t) + sizeof(db_val_t) * RES_COL_N(_res)) */ /* LM_DBG("We got %d rows each row requres %d bytes because the row struct is %d and" "the values in that row take up %d. That is %d values each size is %d\n", rowcount, sizeof(db_row_t) + sizeof(db_val_t) * RES_COL_N(*r), sizeof(db_row_t), sizeof(db_val_t) * RES_COL_N(*r), RES_COL_N(*r), sizeof(db_val_t)); */ for (i = 0; i < rowcount; i++) { arowref = *av_fetch(rowarray, i, 0); if (!SvROK(arowref)) goto error; arow = (AV *)SvRV(arowref); if (!(SvTYPE(colarray) == SVt_PVAV)) goto error; arowlen = av_len(arow) + 1; (*r)->rows[i].n = arowlen; for (j = 0; j < arowlen; j++) { aelement = *av_fetch(arow, j, 0); #define cur_val (((*r)->rows)[i].values)[j] /*cur_val = (((*r)->rows)[i].values)[j];*/ /* cur_val is just an "abbreviation" */ if (!(sv_isobject(aelement) && sv_derived_from(aelement, PERL_CLASS_VALUE))) { cur_val.nul = 1; continue; } atypesv = *hv_fetchs((HV*)SvRV(aelement),PERL_VDB_TYPEMETHOD,0); /*aelement->{type} */ atype = SvIV(atypesv); /*atypesv = perlvdb_perlmethod(aelement, PERL_VDB_TYPEMETHOD, NULL, NULL, NULL, NULL);*/ aval = perlvdb_perlmethod(aelement, PERL_VDB_DATAMETHOD, NULL, NULL, NULL, NULL); (*r)->rows[i].values[j].type = atype; /* SvREFCNT_dec(atypesv); */ if (!SvOK(aval)) { cur_val.nul = 1; } else { switch (atype) { case DB_INT: cur_val.val.int_val = SvIV(aval); cur_val.nul = 0; break; case DB_DOUBLE: cur_val.val.double_val = SvNV(aval); cur_val.nul = 0; break; case DB_STRING: case DB_STR: /* We dont support DB_STR for now. * Set DB_STRING instead */ cur_val.type = DB_STRING; currentstring = SvPV(aval, len); charbuf = pkg_malloc(len+1); strncpy(charbuf, currentstring, len+1); cur_val.val.string_val = charbuf; cur_val.nul = 0; break; case DB_DATETIME: cur_val.val.time_val = (time_t)SvIV(aval); cur_val.nul = 0; break; case DB_BLOB: currentstring = SvPV(aval, len); charbuf = pkg_malloc(len+1); strncpy(charbuf, currentstring, len+1); cur_val.val.blob_val.s = charbuf; cur_val.val.blob_val.len = len; cur_val.nul = 0; break; case DB_BITMAP: cur_val.val.bitmap_val = SvIV(aval); cur_val.nul = 0; break; default: LM_CRIT("cannot handle this data type.\n"); return -1; break; } } SvREFCNT_dec(aval); } } end: return retval; error: LM_CRIT("broken result set. Exiting, leaving OpenSIPS in unknown state.\n"); return -1; }
/* * Convert rows from UNIXODBC to db API representation */ static inline int db_unixodbc_convert_rows(const db1_con_t* _h, db1_res_t* _r) { int i = 0, ret = 0; SQLSMALLINT columns; list* rows = NULL; list* rowstart = NULL; strn* temp_row = NULL; if((!_h) || (!_r)) { LM_ERR("invalid parameter\n"); return -1; } SQLNumResultCols(CON_RESULT(_h), (SQLSMALLINT *)&columns); temp_row = (strn*)pkg_malloc( columns*sizeof(strn) ); if(!temp_row) { LM_ERR("no private memory left\n"); return -1; } while(SQL_SUCCEEDED(ret = SQLFetch(CON_RESULT(_h)))) { for(i=0; i < columns; i++) { SQLLEN indicator; ret = SQLGetData(CON_RESULT(_h), i+1, SQL_C_CHAR, temp_row[i].s, STRN_LEN, &indicator); if (SQL_SUCCEEDED(ret)) { if (indicator == SQL_NULL_DATA) strcpy(temp_row[i].s, "NULL"); } else { LM_ERR("SQLGetData failed\n"); } } if (db_unixodbc_list_insert(&rowstart, &rows, columns, temp_row) < 0) { LM_ERR("insert failed\n"); pkg_free(temp_row); temp_row= NULL; return -5; } RES_ROW_N(_r)++; } /* free temporary row data */ pkg_free(temp_row); CON_ROW(_h) = NULL; if (!RES_ROW_N(_r)) { RES_ROWS(_r) = 0; return 0; } if (db_allocate_rows(_r) != 0) { LM_ERR("could not allocate rows"); db_unixodbc_list_destroy(rowstart); return -2; } i = 0; rows = rowstart; while(rows) { CON_ROW(_h) = rows->data; if (!CON_ROW(_h)) { LM_ERR("string null\n"); RES_ROW_N(_r) = i; db_free_rows(_r); db_unixodbc_list_destroy(rowstart); return -3; } if (db_unixodbc_convert_row(_h, _r, &(RES_ROWS(_r)[i]), rows->lengths) < 0) { LM_ERR("converting row failed #%d\n", i); RES_ROW_N(_r) = i; db_free_rows(_r); db_unixodbc_list_destroy(rowstart); return -4; } i++; rows = rows->next; } db_unixodbc_list_destroy(rowstart); return 0; }
/* * Convert rows from UNIXODBC to db API representation */ static inline int db_unixodbc_convert_rows(const db_con_t* _h, db_res_t* _r) { int row_n = 0, i = 0, ret = 0; SQLSMALLINT columns; strn* temp_row = NULL; str *rows = NULL; if((!_h) || (!_r)) { LM_ERR("invalid parameter\n"); return -1; } SQLNumResultCols(CON_RESULT(_h), (SQLSMALLINT *)&columns); temp_row = (strn*)pkg_malloc( columns*sizeof(strn) ); if(!temp_row) { LM_ERR("no private memory left\n"); return E_OUT_OF_MEM; } while (SQL_SUCCEEDED(ret = SQLFetch(CON_RESULT(_h)))) { for(i=0; i < columns; i++) { SQLLEN indicator; ret = SQLGetData(CON_RESULT(_h), i+1, SQL_C_CHAR, temp_row[i].s, STRN_LEN, &indicator); if (SQL_SUCCEEDED(ret)) { if (indicator == SQL_NULL_DATA) strcpy(temp_row[i].s, "NULL"); } else { LM_ERR("SQLGetData failed\n"); } } rows = db_unixodbc_dup_row(temp_row, row_n, columns); if (!rows) { LM_ERR("no more pkg mem\n"); return E_OUT_OF_MEM; } row_n++; } /* free temporary row data */ pkg_free(temp_row); CON_ROW(_h) = NULL; RES_ROW_N(_r) = row_n; if (row_n == 0) { RES_ROWS(_r) = NULL; return 0; } if (db_allocate_rows(_r, row_n) != 0) { LM_ERR("no private memory left\n"); return E_OUT_OF_MEM; } for (i = 0; i < row_n; i++) { if (db_unixodbc_convert_row(&rows[i * columns], _r, &RES_ROWS(_r)[i]) < 0) { LM_ERR("converting row failed #%d\n", i); RES_ROW_N(_r) = 0; db_free_rows(_r); return -4; } } return 0; }
/** * Convert rows from PostgreSQL to db API representation */ int db_postgres_convert_rows(const db_con_t* _h, db_res_t* _r) { char **row_buf, *s; int row, col, len; if (!_h || !_r) { LM_ERR("invalid parameter\n"); return -1; } if (!RES_ROW_N(_r)) { LM_DBG("no rows returned from the query\n"); RES_ROWS(_r) = 0; return 0; } /* Allocate an array of pointers per column to holds the string * representation */ len = sizeof(char *) * RES_COL_N(_r); row_buf = (char**)pkg_malloc(len); if (!row_buf) { LM_ERR("no private memory left\n"); return -1; } LM_DBG("allocate for %d columns %d bytes in row buffer at %p\n", RES_COL_N(_r), len, row_buf); memset(row_buf, 0, len); if (db_allocate_rows( _r, RES_ROW_N(_r))!=0) { LM_ERR("no private memory left\n"); return -2; } for(row=RES_LAST_ROW(_r); row<(RES_LAST_ROW(_r)+RES_ROW_N(_r)) ; row++) { for(col = 0; col < RES_COL_N(_r); col++) { /* * The row data pointer returned by PQgetvalue points to * storage that is part of the PGresult structure. One should * not modify the data it points to, and one must explicitly * copy the data into other storage if it is to be used past * the lifetime of the PGresult structure itself. */ /* * There's a weird bug (or just weird behavior) in the postgres * API - if the result is a BLOB (like 'text') and is with * zero length, we get a pointer to nowhere, which is not * null-terminated. The fix for this is to check what does the * DB think about the length and use that as a correction. */ if (PQgetisnull(CON_RESULT(_h), row, col) == 0) { /* not null value */ if ( (len=PQgetlength(CON_RESULT(_h), row, col))==0 ) { s=""; LM_DBG("PQgetvalue(%p,%d,%d)=[], zero len\n", _h, row,col); } else { s = PQgetvalue(CON_RESULT(_h), row, col); LM_DBG("PQgetvalue(%p,%d,%d)=[%.*s]\n", _h, row,col,len,s); } row_buf[col] = pkg_malloc(len+1); if (!row_buf[col]) { LM_ERR("no private memory left\n"); return -1; } memset(row_buf[col], 0, len+1); LM_DBG("allocated %d bytes for row_buf[%d] at %p\n", len, col, row_buf[col]); strncpy(row_buf[col], s, len); LM_DBG("[%d][%d] Column[%.*s]=[%s]\n", row, col, RES_NAMES(_r)[col]->len, RES_NAMES(_r)[col]->s, row_buf[col]); } } /* ASSERT: row_buf contains an entire row in strings */ if(db_postgres_convert_row(_h, _r, &(RES_ROWS(_r)[row - RES_LAST_ROW(_r)]), row_buf)<0){ LM_ERR("failed to convert row #%d\n", row); RES_ROW_N(_r) = row - RES_LAST_ROW(_r); for (col = 0; col < RES_COL_N(_r); col++) { LM_DBG("freeing row_buf[%d] at %p\n", col, row_buf[col]); if (row_buf[col] && !row_buf[col][0]) pkg_free(row_buf[col]); } LM_DBG("freeing row buffer at %p\n", row_buf); pkg_free(row_buf); return -4; } /* * pkg_free() must be done for the above allocations now that the row * has been converted. During pg_convert_row (and subsequent pg_str2val) * processing, data types that don't need to be converted (namely STRINGS * and STR) have their addresses saved. These data types should not have * their pkg_malloc() allocations freed here because they are still * needed. However, some data types (ex: INT, DOUBLE) should have their * pkg_malloc() allocations freed because during the conversion process, * their converted values are saved in the union portion of the db_val_t * structure. BLOB will be copied during PQunescape in str2val, thus it * has to be freed here AND in pg_free_row(). * * Warning: when the converted row is no longer needed, the data types * whose addresses were saved in the db_val_t structure must be freed * or a memory leak will happen. This processing should happen in the * pg_free_row() subroutine. The caller of this routine should ensure * that pg_free_rows(), pg_free_row() or pg_free_result() is eventually * called. */ for (col = 0; col < RES_COL_N(_r); col++) { switch (RES_TYPES(_r)[col]) { case DB_STRING: case DB_STR: break; default: LM_DBG("freeing row_buf[%d] at %p\n", col, row_buf[col]); if (row_buf[col]) pkg_free(row_buf[col]); } /* * The following housekeeping may not be technically required, but it * is a good practice to NULL pointer fields that are no longer valid. * Note that DB_STRING fields have not been pkg_free(). NULLing DB_STRING * fields would normally not be good to do because a memory leak would * occur. However, the pg_convert_row() routine has saved the DB_STRING * pointer in the db_val_t structure. The db_val_t structure will * eventually be used to pkg_free() the DB_STRING storage. */ row_buf[col] = (char *)NULL; } } LM_DBG("freeing row buffer at %p\n", row_buf); pkg_free(row_buf); row_buf = NULL; return 0; }
/*! * \brief Convert rows from PostgreSQL to db API representation * \param _h database connection * \param _r result set * \return 0 on success, negative on error */ int db_postgres_convert_rows(const db1_con_t* _h, db1_res_t* _r) { char **row_buf, *s; int row, col, len; if (!_h || !_r) { LM_ERR("invalid parameter\n"); return -1; } if (!RES_ROW_N(_r)) { LM_DBG("no rows returned from the query\n"); RES_ROWS(_r) = 0; return 0; } /*Allocate an array of pointers per column to holds the string representation */ len = sizeof(char *) * RES_COL_N(_r); row_buf = (char**)pkg_malloc(len); if (!row_buf) { LM_ERR("no private memory left\n"); return -1; } LM_DBG("allocate for %d columns %d bytes in row buffer at %p\n", RES_COL_N(_r), len, row_buf); if (db_allocate_rows(_r) < 0) { LM_ERR("could not allocate rows\n"); LM_DBG("freeing row buffer at %p\n", row_buf); pkg_free(row_buf); return -2; } for(row = RES_LAST_ROW(_r); row < (RES_LAST_ROW(_r) + RES_ROW_N(_r)); row++) { /* reset row buf content */ memset(row_buf, 0, len); for(col = 0; col < RES_COL_N(_r); col++) { /* * The row data pointer returned by PQgetvalue points to storage * that is part of the PGresult structure. One should not modify * the data it points to, and one must explicitly copy the data * into other storage if it is to be used past the lifetime of * the PGresult structure itself. */ s = PQgetvalue(CON_RESULT(_h), row, col); LM_DBG("PQgetvalue(%p,%d,%d)=[%s]\n", _h, row, col, s); /* * A empty string can be a NULL value, or just an empty string. * This differs from the mysql behaviour, that further processing * steps expect. So we need to simulate this here unfortunally. */ if (PQgetisnull(CON_RESULT(_h), row, col) == 0) { row_buf[col] = s; LM_DBG("[%d][%d] Column[%.*s]=[%s]\n", row, col, RES_NAMES(_r)[col]->len, RES_NAMES(_r)[col]->s, row_buf[col]); } } /* ASSERT: row_buf contains an entire row in strings */ if(db_postgres_convert_row(_h, _r, &(RES_ROWS(_r)[row - RES_LAST_ROW(_r)]), row_buf)<0) { LM_ERR("failed to convert row #%d\n", row); RES_ROW_N(_r) = row - RES_LAST_ROW(_r); LM_DBG("freeing row buffer at %p\n", row_buf); pkg_free(row_buf); db_free_rows(_r); return -4; } } LM_DBG("freeing row buffer at %p\n", row_buf); pkg_free(row_buf); row_buf = NULL; return 0; }