static void __cache_req_process_node(TfwHttpReq *req, unsigned long key, void (*action)(TfwHttpReq *, TfwHttpResp *, void *), void *data) { TfwCacheEntry *ce; TfwHttpResp *resp = NULL; ce = tdb_lookup(db, key); if (!ce) goto finish_req_processing; /* TODO process collisions. */ if (!ce->resp) if (tfw_cache_build_resp(ce)) /* * It seems we have the cache entry, * but there is memory issues. * Try to send send the request to backend in hope * that we have memory when we get an answer. */ goto finish_req_processing; /* We have already assembled response. */ resp = ce->resp; finish_req_processing: action(req, resp, data); tfw_http_msg_free((TfwHttpMsg *)req); }
void test_req_free(TfwHttpReq *req) { /* In tests we are stricter: we don't allow to free a NULL pointer * to be sure exactly what we are free'ing and to catch bugs early. */ BUG_ON(!req); tfw_http_msg_free((TfwHttpMsg *)req); }
/** * Build ce->resp and ce->resp->msg that it can be sent via TCP socket. * * Cache entry data is set as paged fragments of skb. * See do_tcp_sendpages() as reference. * * We return skbs in the cache entry response w/o setting any * network headers - tcp_transmit_skb() will do it for us. */ static int tfw_cache_build_resp(TfwCacheEntry *ce) { int f = 0; TdbVRec *trec = &ce->trec; char *data; struct sk_buff *skb = NULL; /* * Allocated response won't be checked by any filters and * is used for sending response data only, so don't initialize * connection and GFSM fields. */ ce->resp = (TfwHttpResp *)tfw_http_msg_alloc(Conn_Srv); if (!ce->resp) return -ENOMEM; /* Deserialize offsets to pointers. */ ce->key = TDB_PTR(db->hdr, (unsigned long)ce->key); ce->hdr_lens = TDB_PTR(db->hdr, (unsigned long)ce->hdr_lens); ce->hdrs = TDB_PTR(db->hdr, (unsigned long)ce->hdrs); ce->body = TDB_PTR(db->hdr, (unsigned long)ce->body); /* See tfw_cache_copy_resp(). */ BUG_ON((char *)(trec + 1) + trec->len <= ce->hdrs); trec = TDB_PTR(db->hdr, TDB_DI2O(trec->chunk_next)); for (data = ce->hdrs; (long)trec != (long)db->hdr; trec = TDB_PTR(db->hdr, TDB_DI2O(trec->chunk_next)), data = trec->data) { int off, size = trec->len; if (!skb || f == MAX_SKB_FRAGS) { /* Protocol headers are placed in linear data only. */ skb = alloc_skb(SKB_HDR_SZ, GFP_ATOMIC); if (!skb) goto err_skb; skb_reserve(skb, SKB_HDR_SZ); ss_skb_queue_tail(&ce->resp->msg.skb_list, skb); f = 0; } off = (unsigned long)data & ~PAGE_MASK; size = (char *)(trec + 1) + trec->len - data; skb_fill_page_desc(skb, f, virt_to_page(data), off, size); ++f; } return 0; err_skb: tfw_http_msg_free((TfwHttpMsg *)ce->resp); return -ENOMEM; }
void tfw_cache_add(TfwHttpResp *resp, TfwHttpReq *req) { TfwCWork *cw; TfwCacheEntry *ce, cdata = {{}}; unsigned long key; size_t len = sizeof(cdata); if (!tfw_cfg.cache) goto out; key = tfw_cache_key_calc(req); /* TODO copy at least first part of URI here. */ ce = (TfwCacheEntry *)tdb_entry_create(db, key, &cdata, &len); BUG_ON(len != sizeof(cdata)); if (!ce) goto out; ce->resp = resp; /* * We must write the entry key now because the request dies * when the function finishes. */ if (tfw_cache_entry_key_copy(ce, req)) goto out; cw = kmem_cache_alloc(c_cache, GFP_ATOMIC); if (!cw) goto out; INIT_WORK(&cw->work, tfw_cache_copy_resp); cw->cw_ce = ce; queue_work_on(tfw_cache_sched_work_cpu(numa_node_id()), cache_wq, (struct work_struct *)cw); out: /* Now we don't need the request and the reponse anymore. */ tfw_http_msg_free((TfwHttpMsg *)req); tfw_http_msg_free((TfwHttpMsg *)resp); }
void test_resp_free(TfwHttpResp *resp) { BUG_ON(!resp); tfw_http_msg_free((TfwHttpMsg *)resp); }