static ssize_t redis_xlat(UNUSED TALLOC_CTX *ctx, char **out, size_t outlen, void const *mod_inst, UNUSED void const *xlat_inst, REQUEST *request, char const *fmt) { rlm_redis_t const *inst = mod_inst; fr_redis_conn_t *conn; bool read_only = false; uint8_t const *key = NULL; size_t key_len = 0; fr_redis_cluster_state_t state; fr_redis_rcode_t status; redisReply *reply = NULL; int s_ret; size_t len; int ret; char const *p = fmt, *q; int argc; char const *argv[MAX_REDIS_ARGS]; char argv_buf[MAX_REDIS_COMMAND_LEN]; if (p[0] == '-') { p++; read_only = true; } /* * Hack to allow querying against a specific node for testing */ if (p[0] == '@') { fr_socket_addr_t node_addr; fr_pool_t *pool; RDEBUG3("Overriding node selection"); p++; q = strchr(p, ' '); if (!q) { REDEBUG("Found node specifier but no command, format is [-][@<host>[:port]] <redis command>"); return -1; } if (fr_inet_pton_port(&node_addr.ipaddr, &node_addr.port, p, q - p, AF_UNSPEC, true, true) < 0) { RPEDEBUG("Failed parsing node address"); return -1; } p = q + 1; if (fr_redis_cluster_pool_by_node_addr(&pool, inst->cluster, &node_addr, true) < 0) { RPEDEBUG("Failed locating cluster node"); return -1; } conn = fr_pool_connection_get(pool, request); if (!conn) { REDEBUG("No connections available for cluster node"); return -1; } argc = rad_expand_xlat(request, p, MAX_REDIS_ARGS, argv, false, sizeof(argv_buf), argv_buf); if (argc <= 0) { RPEDEBUG("Invalid command: %s", p); arg_error: fr_pool_connection_release(pool, request, conn); return -1; } if (argc >= (MAX_REDIS_ARGS - 1)) { RPEDEBUG("Too many parameters; increase MAX_REDIS_ARGS and recompile: %s", p); goto arg_error; } RDEBUG2("Executing command: %s", argv[0]); if (argc > 1) { RDEBUG2("With argments"); RINDENT(); for (int i = 1; i < argc; i++) RDEBUG2("[%i] %s", i, argv[i]); REXDENT(); } if (!read_only) { reply = redisCommandArgv(conn->handle, argc, argv, NULL); status = fr_redis_command_status(conn, reply); } else if (redis_command_read_only(&status, &reply, request, conn, argc, argv) == -2) { goto close_conn; } if (!reply) goto fail; switch (status) { case REDIS_RCODE_SUCCESS: goto reply_parse; case REDIS_RCODE_RECONNECT: close_conn: fr_pool_connection_close(pool, request, conn); ret = -1; goto finish; default: fail: fr_pool_connection_release(pool, request, conn); ret = -1; goto finish; } } /* * Normal node selection and execution based on key */ argc = rad_expand_xlat(request, p, MAX_REDIS_ARGS, argv, false, sizeof(argv_buf), argv_buf); if (argc <= 0) { RPEDEBUG("Invalid command: %s", p); ret = -1; goto finish; } if (argc >= (MAX_REDIS_ARGS - 1)) { RPEDEBUG("Too many parameters; increase MAX_REDIS_ARGS and recompile: %s", p); ret = -1; goto finish; } /* * If we've got multiple arguments, the second one is usually the key. * The Redis docs say commands should be analysed first to get key * positions, but this involves sending them to the server, which is * just as expensive as sending them to the wrong server and receiving * a redirect. */ if (argc > 1) { key = (uint8_t const *)argv[1]; key_len = strlen((char const *)key); } for (s_ret = fr_redis_cluster_state_init(&state, &conn, inst->cluster, request, key, key_len, read_only); s_ret == REDIS_RCODE_TRY_AGAIN; /* Continue */ s_ret = fr_redis_cluster_state_next(&state, &conn, inst->cluster, request, status, &reply)) { RDEBUG2("Executing command: %s", argv[0]); if (argc > 1) { RDEBUG2("With arguments"); RINDENT(); for (int i = 1; i < argc; i++) RDEBUG2("[%i] %s", i, argv[i]); REXDENT(); } if (!read_only) { reply = redisCommandArgv(conn->handle, argc, argv, NULL); status = fr_redis_command_status(conn, reply); } else if (redis_command_read_only(&status, &reply, request, conn, argc, argv) == -2) { state.close_conn = true; } } if (s_ret != REDIS_RCODE_SUCCESS) { ret = -1; goto finish; } if (!fr_cond_assert(reply)) { ret = -1; goto finish; } reply_parse: switch (reply->type) { case REDIS_REPLY_INTEGER: ret = snprintf(*out, outlen, "%lld", reply->integer); break; case REDIS_REPLY_STATUS: case REDIS_REPLY_STRING: len = (((size_t)reply->len) >= outlen) ? outlen - 1: (size_t) reply->len; memcpy(*out, reply->str, len); (*out)[len] = '\0'; ret = reply->len; break; default: REDEBUG("Server returned non-value type \"%s\"", fr_int2str(redis_reply_types, reply->type, "<UNKNOWN>")); ret = -1; break; } finish: fr_redis_reply_free(reply); return ret; }
/** Insert a new entry into the data store * * @copydetails cache_entry_insert_t */ static cache_status_t cache_entry_insert(UNUSED rlm_cache_config_t const *config, void *driver_inst, REQUEST *request, UNUSED void *handle, const rlm_cache_entry_t *c) { rlm_cache_redis_t *driver = driver_inst; TALLOC_CTX *pool; vp_map_t *map; fr_redis_conn_t *conn; fr_redis_cluster_state_t state; fr_redis_rcode_t status; redisReply *reply = NULL; int s_ret; static char const command[] = "RPUSH"; char const **argv; size_t *argv_len; char const **argv_p; size_t *argv_len_p; int pipelined = 0; /* How many commands pending in the pipeline */ redisReply *replies[5]; /* Should have the same number of elements as pipelined commands */ size_t reply_num = 0, i; char *p; int cnt; vp_tmpl_t expires_value; vp_map_t expires = { .op = T_OP_SET, .lhs = &driver->expires_attr, .rhs = &expires_value, }; vp_tmpl_t created_value; vp_map_t created = { .op = T_OP_SET, .lhs = &driver->created_attr, .rhs = &created_value, .next = &expires }; /* * Encode the entry created date */ tmpl_init(&created_value, TMPL_TYPE_DATA, "<TEMP>", 6, T_BARE_WORD); created_value.tmpl_data_type = PW_TYPE_DATE; created_value.tmpl_data_length = sizeof(created_value.tmpl_data_value.date); created_value.tmpl_data_value.date = c->created; /* * Encode the entry expiry time * * Although Redis objects expire on their own, we still need this * to ignore entries that were created before the last epoch. */ tmpl_init(&expires_value, TMPL_TYPE_DATA, "<TEMP>", 6, T_BARE_WORD); expires_value.tmpl_data_type = PW_TYPE_DATE; expires_value.tmpl_data_length = sizeof(expires_value.tmpl_data_value.date); expires_value.tmpl_data_value.date = c->expires; expires.next = c->maps; /* Head of the list */ for (cnt = 0, map = &created; map; cnt++, map = map->next); /* * The majority of serialized entries should be under 1k. * * @todo We should really calculate this using some sort of moving average. */ pool = talloc_pool(request, 1024); if (!pool) return CACHE_ERROR; argv_p = argv = talloc_array(pool, char const *, (cnt * 3) + 2); /* pair = 3 + cmd + key */ argv_len_p = argv_len = talloc_array(pool, size_t, (cnt * 3) + 2); /* pair = 3 + cmd + key */ *argv_p++ = command; *argv_len_p++ = sizeof(command) - 1; *argv_p++ = (char const *)c->key; *argv_len_p++ = c->key_len; /* * Add the maps to the command string in reverse order */ for (map = &created; map; map = map->next) { if (fr_redis_tuple_from_map(pool, argv_p, argv_len_p, map) < 0) { REDEBUG("Failed encoding map as Redis K/V pair"); talloc_free(pool); return CACHE_ERROR; } argv_p += 3; argv_len_p += 3; } RDEBUG3("Pipelining commands"); RINDENT(); for (s_ret = fr_redis_cluster_state_init(&state, &conn, driver->cluster, request, c->key, c->key_len, false); s_ret == REDIS_RCODE_TRY_AGAIN; /* Continue */ s_ret = fr_redis_cluster_state_next(&state, &conn, driver->cluster, request, status, &reply)) { /* * Start the transaction, as we need to set an expiry time too. */ if (c->expires > 0) { RDEBUG3("MULTI"); if (redisAppendCommand(conn->handle, "MULTI") != REDIS_OK) { append_error: REXDENT(); RERROR("Failed appending Redis command to output buffer: %s", conn->handle->errstr); talloc_free(pool); return CACHE_ERROR; } pipelined++; } if (RDEBUG_ENABLED3) { p = fr_asprint(request, (char const *)c->key, c->key_len, '\0'); RDEBUG3("DEL \"%s\"", p); talloc_free(p); } if (redisAppendCommand(conn->handle, "DEL %b", c->key, c->key_len) != REDIS_OK) goto append_error; pipelined++; if (RDEBUG_ENABLED3) { RDEBUG3("argv command"); RINDENT(); for (i = 0; i < talloc_array_length(argv); i++) { p = fr_asprint(request, argv[i], argv_len[i], '\0'); RDEBUG3("%s", p); talloc_free(p); } REXDENT(); } redisAppendCommandArgv(conn->handle, talloc_array_length(argv), argv, argv_len); pipelined++; /* * Set the expiry time and close out the transaction. */ if (c->expires > 0) { if (RDEBUG_ENABLED3) { p = fr_asprint(request, (char const *)c->key, c->key_len, '\"'); RDEBUG3("EXPIREAT \"%s\" %li", p, (long)c->expires); talloc_free(p); } if (redisAppendCommand(conn->handle, "EXPIREAT %b %i", c->key, c->key_len, c->expires) != REDIS_OK) goto append_error; pipelined++; RDEBUG3("EXEC"); if (redisAppendCommand(conn->handle, "EXEC") != REDIS_OK) goto append_error; pipelined++; } REXDENT(); reply_num = fr_redis_pipeline_result(&status, replies, sizeof(replies) / sizeof(*replies), conn, pipelined); reply = replies[0]; } talloc_free(pool); if (s_ret != REDIS_RCODE_SUCCESS) { RERROR("Failed inserting entry"); return CACHE_ERROR; } RDEBUG3("Command results"); RINDENT(); for (i = 0; i < reply_num; i++) { fr_redis_reply_print(L_DBG_LVL_3, replies[i], request, i); fr_redis_reply_free(replies[i]); } REXDENT(); return CACHE_OK; } /** Call delete the cache entry from redis * * @copydetails cache_entry_expire_t */ static cache_status_t cache_entry_expire(UNUSED rlm_cache_config_t const *config, void *driver_inst, REQUEST *request, UNUSED void *handle, uint8_t const *key, size_t key_len) { rlm_cache_redis_t *driver = driver_inst; fr_redis_cluster_state_t state; fr_redis_conn_t *conn; fr_redis_rcode_t status; redisReply *reply = NULL; int s_ret; for (s_ret = fr_redis_cluster_state_init(&state, &conn, driver->cluster, request, key, key_len, false); s_ret == REDIS_RCODE_TRY_AGAIN; /* Continue */ s_ret = fr_redis_cluster_state_next(&state, &conn, driver->cluster, request, status, &reply)) { reply = redisCommand(conn->handle, "DEL %b", key, key_len); status = fr_redis_command_status(conn, reply); } if (s_ret != REDIS_RCODE_SUCCESS) { RERROR("Failed expiring entry"); fr_redis_reply_free(reply); return CACHE_ERROR; } rad_assert(reply); /* clang scan */ if (reply->type == REDIS_REPLY_INTEGER) { fr_redis_reply_free(reply); if (reply->integer) return CACHE_OK; /* Affected */ return CACHE_MISS; } REDEBUG("Bad result type, expected integer, got %s", fr_int2str(redis_reply_types, reply->type, "<UNKNOWN>")); fr_redis_reply_free(reply); return CACHE_ERROR; } extern cache_driver_t rlm_cache_redis; cache_driver_t rlm_cache_redis = { .name = "rlm_cache_redis", .instantiate = mod_instantiate, .inst_size = sizeof(rlm_cache_redis_t), .free = cache_entry_free, .find = cache_entry_find, .insert = cache_entry_insert, .expire = cache_entry_expire, };
/** Locate a cache entry in redis * * @copydetails cache_entry_find_t */ static cache_status_t cache_entry_find(rlm_cache_entry_t **out, UNUSED rlm_cache_config_t const *config, void *driver_inst, REQUEST *request, UNUSED void *handle, uint8_t const *key, size_t key_len) { rlm_cache_redis_t *driver = driver_inst; size_t i; fr_redis_cluster_state_t state; fr_redis_conn_t *conn; fr_redis_rcode_t status; redisReply *reply = NULL; int s_ret; vp_map_t *head = NULL, **last = &head; #ifdef HAVE_TALLOC_POOLED_OBJECT size_t pool_size = 0; #endif rlm_cache_entry_t *c; for (s_ret = fr_redis_cluster_state_init(&state, &conn, driver->cluster, request, key, key_len, false); s_ret == REDIS_RCODE_TRY_AGAIN; /* Continue */ s_ret = fr_redis_cluster_state_next(&state, &conn, driver->cluster, request, status, &reply)) { /* * Grab all the data for this hash, should return an array * of alternating keys/values which we then convert into maps. */ if (RDEBUG_ENABLED3) { char *p; p = fr_asprint(NULL, (char const *)key, key_len, '"'); RDEBUG3("LRANGE %s 0 -1", key); talloc_free(p); } reply = redisCommand(conn->handle, "LRANGE %b 0 -1", key, key_len); status = fr_redis_command_status(conn, reply); } if (s_ret != REDIS_RCODE_SUCCESS) { RERROR("Failed retrieving entry"); fr_redis_reply_free(reply); return CACHE_ERROR; } rad_assert(reply); /* clang scan */ if (reply->type != REDIS_REPLY_ARRAY) { REDEBUG("Bad result type, expected array, got %s", fr_int2str(redis_reply_types, reply->type, "<UNKNOWN>")); fr_redis_reply_free(reply); return CACHE_ERROR; } RDEBUG3("Entry contains %zu elements", reply->elements); if (reply->elements == 0) { fr_redis_reply_free(reply); return CACHE_MISS; } if (reply->elements % 3) { REDEBUG("Invalid number of reply elements (%zu). " "Reply must contain triplets of keys operators and values", reply->elements); fr_redis_reply_free(reply); return CACHE_ERROR; } #ifdef HAVE_TALLOC_POOLED_OBJECT /* * We can get a pretty good idea of the required size of the pool */ for (i = 0; i < reply->elements; i += 3) { pool_size += sizeof(vp_map_t) + (sizeof(vp_tmpl_t) * 2); if (reply->element[i]->type == REDIS_REPLY_STRING) pool_size += reply->element[i]->len + 1; } /* * reply->elements gives us the number of chunks, as the maps are triplets, and there * are three chunks per map */ c = talloc_pooled_object(NULL, rlm_cache_entry_t, reply->elements, pool_size); memset(&pool, 0, sizeof(rlm_cache_entry_t)); #else c = talloc_zero(NULL, rlm_cache_entry_t); #endif /* * Convert the key/value pairs back into maps */ for (i = 0; i < reply->elements; i += 3) { if (fr_redis_reply_to_map(c, last, request, reply->element[i], reply->element[i + 1], reply->element[i + 2]) < 0) { talloc_free(c); fr_redis_reply_free(reply); return CACHE_ERROR; } last = &(*last)->next; } fr_redis_reply_free(reply); /* * Pull out the cache created date */ if ((head->lhs->tmpl_da->vendor == 0) && (head->lhs->tmpl_da->attr == PW_CACHE_CREATED)) { vp_map_t *map; c->created = head->rhs->tmpl_data_value.date; map = head; head = head->next; talloc_free(map); } /* * Pull out the cache expires date */ if ((head->lhs->tmpl_da->vendor == 0) && (head->lhs->tmpl_da->attr == PW_CACHE_EXPIRES)) { vp_map_t *map; c->expires = head->rhs->tmpl_data_value.date; map = head; head = head->next; talloc_free(map); } c->key = talloc_memdup(c, key, key_len); c->key_len = key_len; c->maps = head; *out = c; return CACHE_OK; }
/** Execute a script against Redis cluster * * Handles uploading the script to the server if required. * * @note All replies will be freed on error. * * @param[out] out Where to write Redis reply object resulting from the command. * @param[in] request The current request. * @param[in] cluster configuration. * @param[in] key to use to determine the cluster node. * @param[in] key_len length of the key. * @param[in] wait_num If > 0 wait until this many slaves have replicated the data * from the last command. * @param[in] wait_timeout How long to wait for slaves. * @param[in] digest of script. * @param[in] script to upload. * @param[in] cmd EVALSHA command to execute. * @param[in] ... Arguments for the eval command. * @return status of the command. */ static fr_redis_rcode_t ippool_script(redisReply **out, REQUEST *request, fr_redis_cluster_t *cluster, uint8_t const *key, size_t key_len, uint32_t wait_num, uint32_t wait_timeout, char const digest[], char const *script, char const *cmd, ...) { fr_redis_conn_t *conn; redisReply *replies[5]; /* Must be equal to the maximum number of pipelined commands */ size_t reply_cnt = 0, i; fr_redis_cluster_state_t state; fr_redis_rcode_t s_ret, status; unsigned int pipelined = 0; va_list ap; *out = NULL; #ifndef NDEBUG memset(replies, 0, sizeof(replies)); #endif va_start(ap, cmd); for (s_ret = fr_redis_cluster_state_init(&state, &conn, cluster, request, key, key_len, false); s_ret == REDIS_RCODE_TRY_AGAIN; /* Continue */ s_ret = fr_redis_cluster_state_next(&state, &conn, cluster, request, status, &replies[0])) { va_list copy; RDEBUG3("Calling script 0x%s", digest); va_copy(copy, ap); /* copy or segv */ redisvAppendCommand(conn->handle, cmd, copy); va_end(copy); pipelined = 1; if (wait_num) { redisAppendCommand(conn->handle, "WAIT %i %i", wait_num, wait_timeout); pipelined++; } reply_cnt = fr_redis_pipeline_result(&pipelined, &status, replies, sizeof(replies) / sizeof(*replies), conn); if (status != REDIS_RCODE_NO_SCRIPT) continue; /* * Clear out the existing reply */ fr_redis_pipeline_free(replies, reply_cnt); /* * Last command failed with NOSCRIPT, this means * we have to send the Lua script up to the node * so it can be cached. */ RDEBUG3("Loading script 0x%s", digest); redisAppendCommand(conn->handle, "MULTI"); redisAppendCommand(conn->handle, "SCRIPT LOAD %s", script); va_copy(copy, ap); /* copy or segv */ redisvAppendCommand(conn->handle, cmd, copy); va_end(copy); redisAppendCommand(conn->handle, "EXEC"); pipelined = 4; if (wait_num) { redisAppendCommand(conn->handle, "WAIT %i %i", wait_num, wait_timeout); pipelined++; } reply_cnt = fr_redis_pipeline_result(&pipelined, &status, replies, sizeof(replies) / sizeof(*replies), conn); if (status == REDIS_RCODE_SUCCESS) { if (RDEBUG_ENABLED3) for (i = 0; i < reply_cnt; i++) { fr_redis_reply_print(L_DBG_LVL_3, replies[i], request, i); } if (replies[3]->type != REDIS_REPLY_ARRAY) { REDEBUG("Bad response to EXEC, expected array got %s", fr_int2str(redis_reply_types, replies[3]->type, "<UNKNOWN>")); error: fr_redis_pipeline_free(replies, reply_cnt); status = REDIS_RCODE_ERROR; goto finish; } if (replies[3]->elements != 2) { REDEBUG("Bad response to EXEC, expected 2 result elements, got %zu", replies[3]->elements); goto error; } if (replies[3]->element[0]->type != REDIS_REPLY_STRING) { REDEBUG("Bad response to SCRIPT LOAD, expected string got %s", fr_int2str(redis_reply_types, replies[3]->element[0]->type, "<UNKNOWN>")); goto error; } if (strcmp(replies[3]->element[0]->str, digest) != 0) { RWDEBUG("Incorrect SHA1 from SCRIPT LOAD, expected %s, got %s", digest, replies[3]->element[0]->str); goto error; } } } if (s_ret != REDIS_RCODE_SUCCESS) goto error; switch (reply_cnt) { case 2: /* EVALSHA with wait */ if (ippool_wait_check(request, wait_num, replies[1]) < 0) goto error; fr_redis_reply_free(&replies[1]); /* Free the wait response */ break; case 1: /* EVALSHA */ *out = replies[0]; break; case 5: /* LOADSCRIPT + EVALSHA + WAIT */ if (ippool_wait_check(request, wait_num, replies[4]) < 0) goto error; fr_redis_reply_free(&replies[4]); /* Free the wait response */ /* FALL-THROUGH */ case 4: /* LOADSCRIPT + EVALSHA */ fr_redis_reply_free(&replies[2]); /* Free the queued cmd response*/ fr_redis_reply_free(&replies[1]); /* Free the queued script load response */ fr_redis_reply_free(&replies[0]); /* Free the queued multi response */ *out = replies[3]->element[1]; replies[3]->element[1] = NULL; /* Prevent double free */ fr_redis_reply_free(&replies[3]); /* This works because hiredis checks for NULL elements */ break; case 0: break; } finish: va_end(ap); return s_ret; }