Example #1
0
/**
 * Build ce->resp and ce->resp->msg that it can be sent via TCP socket.
 *
 * Cache entry data is set as paged fragments of skb.
 * See do_tcp_sendpages() as reference.
 *
 * We return skbs in the cache entry response w/o setting any
 * network headers - tcp_transmit_skb() will do it for us.
 */
static int
tfw_cache_build_resp(TfwCacheEntry *ce)
{
	int f = 0;
	TdbVRec *trec = &ce->trec;
	char *data;
	struct sk_buff *skb = NULL;

	/*
	 * Allocated response won't be checked by any filters and
	 * is used for sending response data only, so don't initialize
	 * connection and GFSM fields.
	 */
	ce->resp = (TfwHttpResp *)tfw_http_msg_alloc(Conn_Srv);
	if (!ce->resp)
		return -ENOMEM;

	/* Deserialize offsets to pointers. */
	ce->key = TDB_PTR(db->hdr, (unsigned long)ce->key);
	ce->hdr_lens = TDB_PTR(db->hdr, (unsigned long)ce->hdr_lens);
	ce->hdrs = TDB_PTR(db->hdr, (unsigned long)ce->hdrs);
	ce->body = TDB_PTR(db->hdr, (unsigned long)ce->body);

	/* See tfw_cache_copy_resp(). */
	BUG_ON((char *)(trec + 1) + trec->len <= ce->hdrs);

	trec = TDB_PTR(db->hdr, TDB_DI2O(trec->chunk_next));
	for (data = ce->hdrs;
	     (long)trec != (long)db->hdr;
	     trec = TDB_PTR(db->hdr, TDB_DI2O(trec->chunk_next)),
		data = trec->data)
	{
		int off, size = trec->len;

		if (!skb || f == MAX_SKB_FRAGS) {
			/* Protocol headers are placed in linear data only. */
			skb = alloc_skb(SKB_HDR_SZ, GFP_ATOMIC);
			if (!skb)
				goto err_skb;
			skb_reserve(skb, SKB_HDR_SZ);
			ss_skb_queue_tail(&ce->resp->msg.skb_list, skb);
			f = 0;
		}

		off = (unsigned long)data & ~PAGE_MASK;
		size = (char *)(trec + 1) + trec->len - data;

		skb_fill_page_desc(skb, f, virt_to_page(data), off, size);

		++f;
	}

	return 0;
err_skb:
	tfw_http_msg_free((TfwHttpMsg *)ce->resp);
	return -ENOMEM;
}
Example #2
0
/**
 * Directly insert all skbs from @skb_list into @sk TCP write queue regardless
 * write buffer size. This allows directly forward modified packets without
 * copying. See do_tcp_sendpages() and tcp_sendmsg() in linux/net/ipv4/tcp.c.
 *
 * Can be called in softirq context as well as from kernel thread.
 *
 * TODO use MSG_MORE untill we reach end of message.
 */
int
ss_send(struct sock *sk, SsSkbList *skb_list, bool pass_skb)
{
	int r = 0;
	struct sk_buff *skb, *skb_copy;
	SsWork sw = {
		.sk	= sk,
		.action	= SS_SEND,
	};

	BUG_ON(!sk);
	BUG_ON(ss_skb_queue_empty(skb_list));
	SS_DBG("%s: cpu=%d sk=%p (cpu=%d) state=%s\n", __func__,
	       smp_processor_id(), sk, sk->sk_incoming_cpu,
	       ss_statename[sk->sk_state]);

	/*
	 * Remove the skbs from Tempesta lists if we won't use them,
	 * or copy them if they're going to be used by Tempesta during
	 * and after the transmission.
	 */
	if (pass_skb) {
		sw.skb_list = *skb_list;
		ss_skb_queue_head_init(skb_list);
	} else {
		ss_skb_queue_head_init(&sw.skb_list);
		for (skb = ss_skb_peek(skb_list); skb; skb = ss_skb_next(skb)) {
			/* tcp_transmit_skb() will clone the skb. */
			skb_copy = pskb_copy_for_clone(skb, GFP_ATOMIC);
			if (!skb_copy) {
				SS_WARN("Unable to copy an egress SKB.\n");
				r = -ENOMEM;
				goto err;
			}
			ss_skb_queue_tail(&sw.skb_list, skb_copy);
		}
	}

	/*
	 * Schedule the socket for TX softirq processing.
	 * Only part of @skb_list could be passed to send queue.
	 */
	if (ss_wq_push(&sw)) {
		SS_WARN("Cannot schedule socket %p for transmission\n", sk);
		r = -EBUSY;
		goto err;
	}

	return 0;
err:
	if (!pass_skb)
		while ((skb = ss_skb_dequeue(&sw.skb_list)))
			kfree_skb(skb);
	return r;
}