/** * The function does not free @m->skb_list, the caller is responsible for that. */ void tfw_http_msg_free(TfwHttpMsg *m) { TFW_DBG("Free msg: %p\n", m); if (!m) return; if (m->conn && m->conn->msg == (TfwMsg *)m) m->conn->msg = NULL; while (1) { /* * The skbs are passed to us by put_skb_to_msg() call, * so we're responsible to free them. */ struct sk_buff *skb = ss_skb_dequeue(&m->msg.skb_list); if (!skb) break; TFW_DBG("free skb %p: truesize=%d sk=%p, destructor=%p" " users=%d type=%s\n", skb, skb->truesize, skb->sk, skb->destructor, atomic_read(&skb->users), m->conn && TFW_CONN_TYPE(m->conn) & Conn_Clnt ? "Conn_Clnt" : m->conn && TFW_CONN_TYPE(m->conn) & Conn_Srv ? "Conn_Srv" : "Unknown"); kfree_skb(skb); } tfw_pool_free(m->pool); }
static unsigned int __add_field(TfwFuzzContext *ctx, int type, char **p, char *end, int t, int n) { BUG_ON(t < 0); BUG_ON(t >= TRANSFER_ENCODING_NUM); BUG_ON(!fld_data[t].vals); if (n < gen_vector[t].size) { unsigned int r; FuzzMsg fmsg = fld_data[t].vals[n]; add_string(p, end, fmsg.sval); if (fld_data[t].func) r = fld_data[t].func(ctx, type, t, n); else r = fmsg.flags; if (r & FUZZ_MSG_F_INVAL) { TFW_DBG("generate invalid field %d for header %d\n", n, t); r |= FUZZ_INVALID; } ctx->fld_flags[t] |= r; return r; } else { char *v = *p; int len = n * 256; unsigned int r; if (n % INVALID_FIELD_PERIOD || ctx->is_only_valid) { if (gen_vector[t].max_val_len) len = gen_vector[t].max_val_len; add_rand_string(p, end, len, gen_vector[t].a_val); r = FUZZ_VALID; } else { add_rand_string(p, end, len, gen_vector[t].a_inval); r = FUZZ_INVALID; } if (t == CONTENT_LENGTH && r == FUZZ_VALID) { strncpy(ctx->content_length, v, len); ctx->content_length[len] = '\0'; } else { ctx->content_length[0] = '\0'; } if (r == FUZZ_INVALID) TFW_DBG("generate invalid random field for header %d\n", t); return r; } }
/** * Create a file in the debugfs, also create parent directories if needed and * remove the old file if it exists. * * @param path Path to a file to be created. * The path is always treated relative to the Tempesta root * directory in the debugfs (see tfw_debugfs_root). * @param data A pointer to some data which is saved in 'file' and 'inode' * structures. It may be retrieved by any function in @fops as * file->private_data or file->f_inode->i_private (it is copied * into both places). * @param fops A set of functions that handle system calls on the created file. * * * The function creates a file in the debugfs, but does it in a robust way: * - the file is replaced if it already exists * - all parent directories are created if they don't exist * * Returns: An ERR_PTR if the file is not created. */ static struct dentry * create_with_parents(const char *path, void *data, const struct file_operations *fops) { size_t name_size; char *buf, *pos, *component; struct dentry *parent, *child; /* Copy the path to a temporary buffer where it can be modified. */ name_size = strlen(path) + 1; buf = kmalloc(name_size, GFP_KERNEL); BUG_ON(ZERO_OR_NULL_PTR(buf)); strlcpy(buf, path, name_size); /* Eat the leading slash to allow specify either /foo/bar or foo/bar */ pos = buf; if (*pos == '/') ++pos; /* Walk over the path and create non-existing directories. */ parent = tfw_debugfs_root; component = pos; do { if (*pos != '/') continue; *pos = '\0'; child = lookup_file(component, parent); if (!child) { child = debugfs_create_dir(component, parent); BUG_ON(!parent); } parent = child; component = pos + 1; } while (*(++pos)); /* Remove the file if it already exists. */ child = lookup_file(component, parent); if (child) { TFW_DBG("Removing already existing debugfs file: %s\n", path); debugfs_remove(child); } /* Create the actual file. */ child = debugfs_create_file(component, S_IRWXU, parent, data, fops); if (IS_ERR_OR_NULL(child)) { int err = PTR_ERR(child); TFW_WARN("Can't create debugfs file: %s (%d)\n", path, err); } else { TFW_DBG("Created debugfs file: %s\n", path); } kfree(buf); return child; }
static unsigned int __add_duplicates(TfwFuzzContext *ctx, int type, char **p, char *end, int t, int n) { int i, tmp = 0; unsigned int v = FUZZ_VALID; if (ctx->curr_duplicates++ % DUPLICATES_PERIOD) return FUZZ_VALID; if (ctx->is_only_valid && gen_vector[t].singular) return FUZZ_VALID; for (i = 0; i < ctx->curr_duplicates % MAX_DUPLICATES; ++i) { if (gen_vector[t].dissipation) { tmp = ctx->i[t]; ctx->i[t] = (ctx->i[t] + i) % gen_vector[t].size; } v |= __add_header(ctx, type, p, end, t, n); if (gen_vector[t].dissipation) ctx->i[t] = tmp; } if (gen_vector[t].singular && i > 0) { TFW_DBG("generate duplicate for singular header %d\n", t); return FUZZ_INVALID; } return v; }
static unsigned int __add_header(TfwFuzzContext *ctx, int type, char **p, char *end, int t, int n) { unsigned int v = 0, i; BUG_ON(t < 0); BUG_ON(t >= TRANSFER_ENCODING_NUM); BUG_ON(!fld_data[t].key); add_string(p, end, fld_data[t].key); v |= add_field(ctx, type, p, end, SPACES); v |= add_field(ctx, type, p, end, t); for (i = 0; i < n; ++i) { addch(p, end, ','); v |= add_field(ctx, type, p, end, SPACES); v |= __add_field(ctx, type, p, end, t, (i * 256) % (gen_vector[t].size + gen_vector[t].over)); } add_string(p, end, "\r\n"); if (v & FUZZ_INVALID) TFW_DBG("generate invalid header %d\n", t); ctx->hdr_flags |= 1 << t; return v; }
/** * Find a client corresponding to the @sk. * * The returned TfwClient reference must be released via tfw_client_put() * when the @sk is closed. */ TfwClient * tfw_client_obtain(struct sock *sk) { int daddr_len; TfwAddr daddr; /* * TODO: currently there is one to one socket-client * mapping, which isn't appropriate since a client can * have more than one socket with the server. * * We need to look up a client by the socket and create * a new one only if it's really new. */ TfwClient *cli = kmem_cache_alloc(cli_cache, GFP_ATOMIC); if (!cli) return NULL; /* Derive client's IP address from @sk. */ if (ss_getpeername(sk, &daddr.sa, &daddr_len)) { kmem_cache_free(cli_cache, cli); return NULL; } tfw_peer_init((TfwPeer *)cli, &daddr); atomic_set(&cli->conn_users, 1); TFW_DBG("new client: cli=%p\n", cli); return cli; }
static int tfw_sched_http_cfg_finish_rules(TfwCfgSpec *cs) { TFW_DBG("sched_http: finish sched_http_rules\n"); BUG_ON(!tfw_sched_http_rules); return 0; }
static int __gfsm_fsm_exec(TfwGState *st, int fsm_id, struct sk_buff *skb, unsigned int off) { int r, slot, dummy; st->curr = slot = __gfsm_fsm_lookup(st, fsm_id, &dummy); BUG_ON(st->curr < 0); FSM_STATE(st) |= TFW_GFSM_ONSTACK; TFW_DBG("GFSM exec fsm %d, state %#x\n", fsm_id, st->states[slot]); r = fsm_htbl[fsm_id](st->obj, skb, off); /* If current FSM finishes, remove its state. */ if ((st->states[slot] & TFW_GFSM_STATE_MASK) == TFW_GFSM_STATE_LAST) { FSM_STATE(st) = BAD_STATE; st->curr = -1; } else { st->states[slot] &= ~TFW_GFSM_ONSTACK; } return r; }
void tfw_destroy_server(struct sock *s) { TfwConnection *conn = s->sk_user_data; TfwServer *srv; BUG_ON(!conn); srv = conn->hndl; /* The call back can be called twise bou our and Linux code. */ if (unlikely(!srv)) return; TFW_DBG("Destroy server socket %p\n", s); if (tfw_sched_del_srv(srv)) TFW_WARN("Try to delete orphaned server from" " requests scheduler"); srv->sock = NULL; conn->hndl = NULL; /* FIXME clear the server references from all current sessions. */ #if 0 kmem_cache_free(srv_cache, srv); #endif conn->sk_destruct(s); }
void tfw_sched_http_exit(void) { TFW_DBG("sched_http: exit\n"); BUG_ON(tfw_sched_http_rules); tfw_sched_unregister(&tfw_sched_http); tfw_cfg_mod_unregister(&tfw_sched_http_cfg_mod); }
/** * Handle the "sched_http_rules" section. * Allocate the tfw_sched_http_rules list. All nested rules are added to the list. */ static int tfw_sched_http_cfg_begin_rules(TfwCfgSpec *cs, TfwCfgEntry *ce) { TFW_DBG("sched_http: begin sched_http_rules\n"); if (!tfw_sched_http_rules) tfw_sched_http_rules = tfw_http_match_list_alloc(); if (!tfw_sched_http_rules) return -ENOMEM; return 0; }
/** * Move the FSM to new state @state and call all registered hooks for it. * * Iterates over all priorities for current state of top (current) FSM and * switch to the registered FSMs. * * Currently there is TFW_GFSM_WC_BMAP_SZ priorities (and each priority * has 32-bit states bitmap), so we use this fact to speedup the iteration. */ int tfw_gfsm_move(TfwGState *st, unsigned short state, struct sk_buff *skb, unsigned int off) { int r = TFW_PASS, p, fsm; unsigned int *hooks = fsm_hooks_bm[FSM(st)]; unsigned long mask = 1 << state; TFW_DBG("GFSM move from %#x to %#x\n", FSM_STATE(st), state); /* Remember current FSM context. */ SET_STATE(st, state); /* Start from higest priority. */ for (p = TFW_GFSM_HOOK_PRIORITY_HIGH; p < TFW_GFSM_HOOK_PRIORITY_NUM; ++p) { /* * TODO Handle different priorities by ordering the hooks, * rather than fixed priority levels to avoid spinning in vain. */ if (!(hooks[p] & mask)) return TFW_PASS; /* Switch context to other FSM. */ fsm = tfw_gfsm_switch(st, state, p); /* * Don't execute FSM handler who executed us, * the FSM will just continue it's processing when all other * executed FSMs exit. */ if (FSM_STATE(st) & TFW_GFSM_ONSTACK) continue; switch (__gfsm_fsm_exec(st, fsm, skb, off)) { case TFW_BLOCK: return TFW_BLOCK; case TFW_POSTPONE: /* * Postpone processing if at least one FSM * needs more data. */ r = TFW_POSTPONE; } } return r; }
/* * Find a connection for an outgoing HTTP request. * * The search is based on contents of an HTTP request and match rules * that specify which Server Group the request should be forwarded to. */ static TfwConnection * tfw_sched_http_sched_grp(TfwMsg *msg) { TfwSrvGroup *sg; TfwConnection *conn; TfwSchedHttpRule *rule; if(!tfw_sched_http_rules || list_empty(&tfw_sched_http_rules->list)) return NULL; rule = tfw_http_match_req_entry((TfwHttpReq *)msg, tfw_sched_http_rules, TfwSchedHttpRule, rule); if (unlikely(!rule)) { TFW_DBG("sched_http: No matching rule found.\n"); return NULL; } sg = rule->main_sg; BUG_ON(!sg); TFW_DBG("sched_http: use server group: '%s'\n", sg->name); conn = sg->sched->sched_srv(msg, sg); if (unlikely(!conn && rule->backup_sg)) { sg = rule->backup_sg; TFW_DBG("sched_http: the main group is offline, use backup:" " '%s'\n", sg->name); conn = sg->sched->sched_srv(msg, sg); } if (unlikely(!conn)) TFW_WARN("sched_http: Unable to select server from group" " '%s'\n", sg->name); return conn; }
static int start_listen_socks(void) { struct socket *sock; int i, r; FOR_EACH_SOCK(sock, i) { /* TODO adjust /proc/sys/net/core/somaxconn */ TFW_DBG("start listening on socket: sk=%p\n", sock->sk); r = sock->ops->listen(sock, LISTEN_SOCK_BACKLOG_LEN); if (r) { TFW_ERR("can't listen on front-end socket sk=%p (%d)\n", sock->sk, r); return r; } }
/** * Context switch from current FSM at state @state to next FSM. */ static int tfw_gfsm_switch(TfwGState *st, int state, int prio) { int shift = prio * TFW_GFSM_PRIO_N + (state & TFW_GFSM_STATE_MASK); int fsm_next = fsm_hooks[FSM(st)][shift].fsm_id; int fsm_curr = state >> TFW_GFSM_FSM_SHIFT; int free_slot; TFW_DBG("GFSM switch from fsm %d at state %d to fsm %d at state %#x\n", fsm_curr, state, fsm_next, fsm_hooks[fsm_curr][shift].st0); st->curr = __gfsm_fsm_lookup(st, fsm_next, &free_slot); if (st->curr < 0) { /* Create new clear state for the next FSM. */ BUG_ON(free_slot < 0); st->curr = free_slot; FSM_STATE(st) = fsm_hooks[fsm_curr][shift].st0; } return fsm_next; }
/** * Parse IP address, create a socket and bind it with the address, * but not yet start listening. */ static int add_listen_sock(TfwAddr *addr, int type) { int r; SsProto *proto; struct socket *s; if (listen_socks_n == ARRAY_SIZE(listen_socks)) { TFW_ERR("maximum number of listen sockets (%d) is reached\n", listen_socks_n); return -ENOBUFS; } r = sock_create_kern(addr->sa.sa_family, SOCK_STREAM, IPPROTO_TCP, &s); if (r) { TFW_ERR("can't create socket (err: %d)\n", r); return r; } inet_sk(s->sk)->freebind = 1; s->sk->sk_reuse = 1; r = s->ops->bind(s, &addr->sa, tfw_addr_sa_len(addr)); if (r) { TFW_ERR_ADDR("can't bind to", addr); sock_release(s); return r; } proto = &protos[listen_socks_n]; proto->type = type; BUG_ON(proto->listener); ss_tcp_set_listen(s, proto); TFW_DBG("created front-end socket: sk=%p\n", s->sk); BUG_ON(listen_socks[listen_socks_n]); listen_socks[listen_socks_n] = s; ++listen_socks_n; return 0; }
int tfw_sched_http_init(void) { int ret; TFW_DBG("sched_http: init\n"); ret = tfw_cfg_mod_register(&tfw_sched_http_cfg_mod); if (ret) { TFW_ERR("sched_http: can't register configuration module\n"); return ret; } ret = tfw_sched_register(&tfw_sched_http); if (ret) { TFW_ERR("sched_http: can't register scheduler module\n"); tfw_cfg_mod_unregister(&tfw_sched_http_cfg_mod); return ret; } return 0; }
/** * Create a listening front-end socket. */ static int __open_listen_socket(SsProto *proto, void *addr) { struct socket *s; unsigned short family = *(unsigned short *)addr; unsigned short sza = family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6); int r; r = sock_create_kern(family, SOCK_STREAM, IPPROTO_TCP, &s); if (r) { TFW_ERR("Can't create front-end listening socket (%d)\n", r); return r; } inet_sk(s->sk)->freebind = 1; s->sk->sk_reuse = 1; r = s->ops->bind(s, (struct sockaddr *)addr, sza); if (r) { TFW_ERR("Can't bind front-end listening socket (%d)\n", r); goto err; } ss_tcp_set_listen(s, proto); TFW_DBG("Created listening socket %p\n", s->sk); /* TODO adjust /proc/sys/net/core/somaxconn */ r = s->ops->listen(s, 1024); if (r) { TFW_ERR("Can't listen on front-end socket (%d)\n", r); goto err; } return r; err: sock_release(s); return r; }
static unsigned int add_body(TfwFuzzContext *ctx, char **p, char *end, int type) { size_t len = 0, i, j; char *len_str; int err, ret = FUZZ_VALID; i = ctx->i[CONTENT_LENGTH]; len_str = (i < gen_vector[CONTENT_LENGTH].size) ? (content_len[i].flags & FUZZ_MSG_F_INVAL) ? "500" /* Generate content of arbitrary size for invalid * Content-Length value. */ : content_len[i].sval : ctx->content_length; err = kstrtoul(len_str, 10, &len); if (err) { TFW_ERR("error %d on getting content length from \"%s\"" "(%lu)\n", err, len_str, i); return FUZZ_INVALID; } if (!(ctx->fld_flags[TRANSFER_ENCODING] & FUZZ_FLD_F_CHUNKED)) { if (!ctx->is_only_valid && len && !(i % INVALID_BODY_PERIOD)) { len /= 2; ret = FUZZ_INVALID; TFW_DBG("1/2 invalid body %lu\n", len); } add_rand_string(p, end, len, A_BODY); } else { int chunks = ctx->i[BODY_CHUNKS_NUM] + 1; size_t chlen, rem, step; BUG_ON(chunks <= 0); if (len > 0) { chlen = len / chunks; rem = len % chunks; for (j = 0; j < chunks; j++) { char buf[256]; step = chlen; if (rem) { step += rem; rem = 0; } snprintf(buf, sizeof(buf), "%zx", step); add_string(p, end, buf); add_string(p, end, "\r\n"); if (!ctx->is_only_valid && step && !(i % INVALID_BODY_PERIOD)) { step /= 2; ret = FUZZ_INVALID; TFW_DBG("1/2 invalid chunked body %lu," " chunks %d\n", len, chunks); } add_rand_string(p, end, step, A_BODY); add_string(p, end, "\r\n"); } } add_string(p, end, "0\r\n\r\n"); } return ret; }
/** * Handle a "match" entry within "sched_http_rules" section, e.g.: * sched_http_rules { * match group1 uri prefix "/foo"; * match group2 host eq "example.com"; * } * * This callback is invoked for every such "match" entry. * It resolves name of the group, parses the rule and adds the entry to the * tfw_sched_http_rules list. * * Syntax: * +------------------------ a reference to "srv_group"; * | +------------------ HTTP request field * | | +------------ operator (eq, prefix, substr, etc) * | | | +---- argument for the operator (any string) * | | | | * V V V V * match group3 uri prefix "/foo/bar/baz.html" backup=group4 * ^ * | * a backup "srv_group" (optional)----+ * */ static int tfw_sched_http_cfg_handle_match(TfwCfgSpec *cs, TfwCfgEntry *e) { int r; size_t arg_size; TfwSchedHttpRule *rule; tfw_http_match_op_t op; tfw_http_match_fld_t field; tfw_http_match_arg_t type; TfwSrvGroup *main_sg, *backup_sg; const char *in_main_sg, *in_field, *in_op, *in_arg, *in_backup_sg; r = tfw_cfg_check_val_n(e, 4); if (r) return r; in_main_sg = e->vals[0]; in_field = e->vals[1]; in_op = e->vals[2]; in_arg = e->vals[3]; in_backup_sg = tfw_cfg_get_attr(e, "backup", NULL); main_sg = tfw_sg_lookup(in_main_sg); if (!main_sg) { TFW_ERR("sched_http: srv_group is not found: '%s'\n", in_main_sg); return -EINVAL; } if (!in_backup_sg) { backup_sg = NULL; } else { backup_sg = tfw_sg_lookup(in_backup_sg); if (!backup_sg) { TFW_ERR("sched_http: backup srv_group is not found:" " '%s'\n", in_backup_sg); return -EINVAL; } } r = tfw_cfg_map_enum(tfw_sched_http_cfg_field_enum, in_field, &field); if (r) { TFW_ERR("sched_http: invalid HTTP request field: '%s'\n", in_field); return -EINVAL; } r = tfw_cfg_map_enum(tfw_sched_http_cfg_op_enum, in_op, &op); if (r) { TFW_ERR("sched_http: invalid matching operator: '%s'\n", in_op); return -EINVAL; } arg_size = strlen(in_arg) + 1; type = tfw_sched_http_cfg_arg_tbl[field]; rule = tfw_http_match_entry_new(tfw_sched_http_rules, TfwSchedHttpRule, rule, arg_size); if (!rule) { TFW_ERR("sched_http: can't allocate memory for parsed rule\n"); return -ENOMEM; } TFW_DBG("sched_http: parsed rule: match" " '%s'=%p '%s'=%d '%s'=%d '%s'\n", in_main_sg, main_sg, in_field, field, in_op, op, in_arg); if (type == TFW_HTTP_MATCH_A_STR || type == TFW_HTTP_MATCH_A_WILDCARD) { tfw_http_match_rule_init(&rule->rule, field, op, type, in_arg); } else { BUG(); // TODO: parsing not string matching rules } rule->main_sg = main_sg; rule->backup_sg = backup_sg; return 0; }