static char * decode_bitstring_list(DICT_ERLANG *dict_erlang, const char *key, ei_x_buff *eip, int *index) { int i; int arity; static VSTRING *result; arity = decode_list(eip, index); if (arity < 0) return NULL; if (arity == 0) { msg_warn("found alias with no destinations"); return NULL; } #define INIT_VSTR(buf, len) \ do { \ if (buf == 0) \ buf = vstring_alloc(len); \ VSTRING_RESET(buf); \ VSTRING_TERMINATE(buf); \ } while (0) INIT_VSTR(result, 10); for (i = 0; i < arity; i++) { char *s = decode_bitstring(eip, index); if (s == NULL) return NULL; db_common_expand(dict_erlang->ctx, "%s", s, key, result, NULL); myfree(s); } return vstring_str(result); }
static const char *dict_sqlite_lookup(DICT *dict, const char *name) { const char *myname = "dict_sqlite_lookup"; DICT_SQLITE *dict_sqlite = (DICT_SQLITE *) dict; sqlite3_stmt *sql_stmt; const char *query_remainder; static VSTRING *query; static VSTRING *result; const char *retval; int expansion = 0; int status; int domain_rc; /* * In case of return without lookup (skipped key, etc.). */ dict->error = 0; /* * Don't frustrate future attempts to make Postfix UTF-8 transparent. */ if (!valid_utf_8(name, strlen(name))) { if (msg_verbose) msg_info("%s: %s: Skipping lookup of non-UTF-8 key '%s'", myname, dict_sqlite->parser->name, name); return (0); } /* * Optionally fold the key. Folding may be enabled on on-the-fly. */ if (dict->flags & DICT_FLAG_FOLD_FIX) { if (dict->fold_buf == 0) dict->fold_buf = vstring_alloc(100); vstring_strcpy(dict->fold_buf, name); name = lowercase(vstring_str(dict->fold_buf)); } /* * Apply the optional domain filter for email address lookups. */ if ((domain_rc = db_common_check_domain(dict_sqlite->ctx, name)) == 0) { if (msg_verbose) msg_info("%s: %s: Skipping lookup of '%s'", myname, dict_sqlite->parser->name, name); return (0); } if (domain_rc < 0) DICT_ERR_VAL_RETURN(dict, domain_rc, (char *) 0); /* * Expand the query and query the database. */ #define INIT_VSTR(buf, len) do { \ if (buf == 0) \ buf = vstring_alloc(len); \ VSTRING_RESET(buf); \ VSTRING_TERMINATE(buf); \ } while (0) INIT_VSTR(query, 10); if (!db_common_expand(dict_sqlite->ctx, dict_sqlite->query, name, 0, query, dict_sqlite_quote)) return (0); if (msg_verbose) msg_info("%s: %s: Searching with query %s", myname, dict_sqlite->parser->name, vstring_str(query)); if (sqlite3_prepare_v2(dict_sqlite->db, vstring_str(query), -1, &sql_stmt, &query_remainder) != SQLITE_OK) msg_fatal("%s: %s: SQL prepare failed: %s\n", myname, dict_sqlite->parser->name, sqlite3_errmsg(dict_sqlite->db)); if (*query_remainder && msg_verbose) msg_info("%s: %s: Ignoring text at end of query: %s", myname, dict_sqlite->parser->name, query_remainder); /* * Retrieve and expand the result(s). */ INIT_VSTR(result, 10); while ((status = sqlite3_step(sql_stmt)) != SQLITE_DONE) { if (status == SQLITE_ROW) { if (db_common_expand(dict_sqlite->ctx, dict_sqlite->result_format, (char *) sqlite3_column_text(sql_stmt, 0), name, result, 0) && dict_sqlite->expansion_limit > 0 && ++expansion > dict_sqlite->expansion_limit) { msg_warn("%s: %s: Expansion limit exceeded for key '%s'", myname, dict_sqlite->parser->name, name); dict->error = DICT_ERR_RETRY; break; } } /* Fix 20100616 */ else { msg_warn("%s: %s: SQL step failed for query '%s': %s\n", myname, dict_sqlite->parser->name, vstring_str(query), sqlite3_errmsg(dict_sqlite->db)); dict->error = DICT_ERR_RETRY; break; } } /* * Clean up. */ if (sqlite3_finalize(sql_stmt)) msg_fatal("%s: %s: SQL finalize failed for query '%s': %s\n", myname, dict_sqlite->parser->name, vstring_str(query), sqlite3_errmsg(dict_sqlite->db)); return ((dict->error == 0 && *(retval = vstring_str(result)) != 0) ? retval : 0); }
static const char *dict_mysql_lookup(DICT *dict, const char *name) { const char *myname = "dict_mysql_lookup"; DICT_MYSQL *dict_mysql = (DICT_MYSQL *) dict; MYSQL_RES *query_res; MYSQL_ROW row; static VSTRING *result; static VSTRING *query; int i; int j; int numrows; int expansion; const char *r; db_quote_callback_t quote_func = dict_mysql_quote; int domain_rc; dict->error = 0; /* * Optionally fold the key. */ if (dict->flags & DICT_FLAG_FOLD_FIX) { if (dict->fold_buf == 0) dict->fold_buf = vstring_alloc(10); vstring_strcpy(dict->fold_buf, name); name = lowercase(vstring_str(dict->fold_buf)); } /* * If there is a domain list for this map, then only search for addresses * in domains on the list. This can significantly reduce the load on the * server. */ if ((domain_rc = db_common_check_domain(dict_mysql->ctx, name)) == 0) { if (msg_verbose) msg_info("%s: Skipping lookup of '%s'", myname, name); return (0); } if (domain_rc < 0) DICT_ERR_VAL_RETURN(dict, domain_rc, (char *) 0); #define INIT_VSTR(buf, len) do { \ if (buf == 0) \ buf = vstring_alloc(len); \ VSTRING_RESET(buf); \ VSTRING_TERMINATE(buf); \ } while (0) INIT_VSTR(query, 10); /* * Suppress the lookup if the query expansion is empty * * This initial expansion is outside the context of any specific host * connection, we just want to check the key pre-requisites, so when * quoting happens separately for each connection, we don't bother with * quoting... */ #if defined(MYSQL_VERSION_ID) && MYSQL_VERSION_ID >= 40000 quote_func = 0; #endif if (!db_common_expand(dict_mysql->ctx, dict_mysql->query, name, 0, query, quote_func)) return (0); /* do the query - set dict->error & cleanup if there's an error */ if ((query_res = plmysql_query(dict_mysql, name, query)) == 0) { dict->error = DICT_ERR_RETRY; return (0); } numrows = mysql_num_rows(query_res); if (msg_verbose) msg_info("%s: retrieved %d rows", myname, numrows); if (numrows == 0) { mysql_free_result(query_res); return 0; } INIT_VSTR(result, 10); for (expansion = i = 0; i < numrows && dict->error == 0; i++) { row = mysql_fetch_row(query_res); for (j = 0; j < mysql_num_fields(query_res); j++) { if (db_common_expand(dict_mysql->ctx, dict_mysql->result_format, row[j], name, result, 0) && dict_mysql->expansion_limit > 0 && ++expansion > dict_mysql->expansion_limit) { msg_warn("%s: %s: Expansion limit exceeded for key: '%s'", myname, dict_mysql->parser->name, name); dict->error = DICT_ERR_RETRY; break; } } } mysql_free_result(query_res); r = vstring_str(result); return ((dict->error == 0 && *r) ? r : 0); }
static const char *dict_pgsql_lookup(DICT *dict, const char *name) { const char *myname = "dict_pgsql_lookup"; PGSQL_RES *query_res; DICT_PGSQL *dict_pgsql; PLPGSQL *pldb; static VSTRING *query; static VSTRING *result; int i; int j; int numrows; int numcols; int expansion; const char *r; int domain_rc; dict_pgsql = (DICT_PGSQL *) dict; pldb = dict_pgsql->pldb; #define INIT_VSTR(buf, len) do { \ if (buf == 0) \ buf = vstring_alloc(len); \ VSTRING_RESET(buf); \ VSTRING_TERMINATE(buf); \ } while (0) INIT_VSTR(query, 10); INIT_VSTR(result, 10); dict->error = 0; /* * Optionally fold the key. */ if (dict->flags & DICT_FLAG_FOLD_FIX) { if (dict->fold_buf == 0) dict->fold_buf = vstring_alloc(10); vstring_strcpy(dict->fold_buf, name); name = lowercase(vstring_str(dict->fold_buf)); } /* * If there is a domain list for this map, then only search for addresses * in domains on the list. This can significantly reduce the load on the * server. */ if ((domain_rc = db_common_check_domain(dict_pgsql->ctx, name)) == 0) { if (msg_verbose) msg_info("%s: Skipping lookup of '%s'", myname, name); return (0); } if (domain_rc < 0) DICT_ERR_VAL_RETURN(dict, domain_rc, (char *) 0); /* * Suppress the actual lookup if the expansion is empty. * * This initial expansion is outside the context of any specific host * connection, we just want to check the key pre-requisites, so when * quoting happens separately for each connection, we don't bother with * quoting... */ if (!db_common_expand(dict_pgsql->ctx, dict_pgsql->query, name, 0, query, 0)) return (0); /* do the query - set dict->error & cleanup if there's an error */ if ((query_res = plpgsql_query(dict_pgsql, name, query, dict_pgsql->dbname, dict_pgsql->username, dict_pgsql->password)) == 0) { dict->error = DICT_ERR_RETRY; return 0; } numrows = PQntuples(query_res); if (msg_verbose) msg_info("%s: retrieved %d rows", myname, numrows); if (numrows == 0) { PQclear(query_res); return 0; } numcols = PQnfields(query_res); for (expansion = i = 0; i < numrows && dict->error == 0; i++) { for (j = 0; j < numcols; j++) { r = PQgetvalue(query_res, i, j); if (db_common_expand(dict_pgsql->ctx, dict_pgsql->result_format, r, name, result, 0) && dict_pgsql->expansion_limit > 0 && ++expansion > dict_pgsql->expansion_limit) { msg_warn("%s: %s: Expansion limit exceeded for key: '%s'", myname, dict_pgsql->parser->name, name); dict->error = DICT_ERR_RETRY; break; } } } PQclear(query_res); r = vstring_str(result); return ((dict->error == 0 && *r) ? r : 0); }