static void errorMapFetchHeaders(void *data, mem_node_ref nr, ssize_t size) { ErrorMapState *state = data; HttpReply *reply; http_status status; if (EBIT_TEST(state->e->flags, ENTRY_ABORTED)) goto abort; if (size == 0) goto abort; if (!cbdataValid(state->callback_data)) goto abort; reply = state->e->mem_obj->reply; status = reply->sline.status; if (status != HTTP_OK) goto abort; /* Send object to caller (cbdataValid verified above) */ state->callback(state->e, reply->hdr_sz, httpHeaderGetSize(&reply->header, HDR_CONTENT_LENGTH), state->callback_data); errorMapFetchComplete(state); stmemNodeUnref(&nr); return; abort: errorMapFetchAbort(state); stmemNodeUnref(&nr); return; }
static void peerMonitorFetchReplyHeaders(void *data, mem_node_ref nr, ssize_t size) { PeerMonitor *pm = data; const char *buf = NULL; http_status status; HttpReply *reply; if (EBIT_TEST(pm->running.e->flags, ENTRY_ABORTED)) goto completed; if (size <= 0) goto completed; if (!cbdataValid(pm->peer)) goto completed; buf = nr.node->data + nr.offset; reply = pm->running.e->mem_obj->reply; assert(reply); status = reply->sline.status; pm->running.status = status; if (status != HTTP_OK) goto completed; if (size > reply->hdr_sz) { pm->running.size = size - reply->hdr_sz; pm->running.offset = size; } else { pm->running.size = 0; pm->running.offset = reply->hdr_sz; } storeClientRef(pm->running.sc, pm->running.e, pm->running.offset, pm->running.offset, SM_PAGE_SIZE, peerMonitorFetchReply, pm); stmemNodeUnref(&nr); return; completed: /* We are fully done with this monitoring request. Clean up */ stmemNodeUnref(&nr); peerMonitorCompleted(pm); return; }
static void peerDigestSwapInMask(void *data, mem_node_ref nr, ssize_t size) { const char *buf = nr.node->data + nr.offset; DigestFetchState *fetch = data; PeerDigest *pd; assert(size <= nr.node->len - nr.offset); if (peerDigestFetchedEnough(fetch, size, "peerDigestSwapInMask")) { stmemNodeUnref(&nr); return; } pd = fetch->pd; assert(pd->cd && pd->cd->mask); /* Copy data into the peer digest mask */ if (size > 0) { assert(size + fetch->mask_offset < pd->cd->mask_size); memcpy(pd->cd->mask + fetch->mask_offset, buf, size); } stmemNodeUnref(&nr); fetch->offset += size; fetch->mask_offset += size; if (fetch->mask_offset >= pd->cd->mask_size) { debug(72, 2) ("peerDigestSwapInMask: Done! Got %" PRINTF_OFF_T ", expected %d\n", fetch->mask_offset, pd->cd->mask_size); assert(fetch->mask_offset == pd->cd->mask_size); assert(peerDigestFetchedEnough(fetch, 0, "peerDigestSwapInMask")); } else { const size_t buf_sz = pd->cd->mask_size - fetch->mask_offset; assert(buf_sz > 0); storeClientRef(fetch->sc, fetch->entry, fetch->offset, fetch->offset, buf_sz, peerDigestSwapInMask, fetch); } }
static void peerMonitorFetchReply(void *data, mem_node_ref nr, ssize_t size) { PeerMonitor *pm = data; if (size <= 0 || !cbdataValid(pm->peer)) { peerMonitorCompleted(pm); } else { pm->running.size += size; pm->running.offset += size; storeClientRef(pm->running.sc, pm->running.e, pm->running.offset, pm->running.offset, SM_PAGE_SIZE, peerMonitorFetchReply, pm); } stmemNodeUnref(&nr); }
static void storeClientCopyHeadersCB(void *data, mem_node_ref nr, ssize_t size) { store_client *sc = data; assert(sc->header_cbdata); assert(sc->header_callback); stmemNodeUnref(&nr); /* XXX should cbdata lock/unlock the cbdata? */ if (size < 0 || !memHaveHeaders(sc->entry->mem_obj)) { sc->header_callback(sc->header_cbdata, NULL); return; } sc->header_callback(sc->header_cbdata, sc->entry->mem_obj->reply); }
static void peerDigestSwapInCBlock(void *data, mem_node_ref nr, ssize_t size) { const char *buf = nr.node->data + nr.offset; DigestFetchState *fetch = data; if (peerDigestFetchedEnough(fetch, size, "peerDigestSwapInCBlock")) goto finish; fetch->offset += size; memcpy(fetch->buf + fetch->buf_used, buf, size); fetch->buf_used += size; if (fetch->buf_used >= StoreDigestCBlockSize) { PeerDigest *pd = fetch->pd; HttpReply *rep = fetch->entry->mem_obj->reply; assert(pd && rep); if (peerDigestSetCBlock(pd, fetch->buf)) { /* XXX: soon we will have variable header size */ fetch->offset -= fetch->buf_used - StoreDigestCBlockSize; /* switch to CD buffer and fetch digest guts */ memFree(fetch->buf, MEM_4K_BUF); fetch->buf = NULL; fetch->buf_used = 0; assert(pd->cd->mask); storeClientRef(fetch->sc, fetch->entry, fetch->offset, fetch->offset, pd->cd->mask_size, peerDigestSwapInMask, fetch); } else { peerDigestFetchAbort(fetch, "invalid digest cblock"); } } else { /* need more data, do we have space? */ if (fetch->buf_used >= SM_PAGE_SIZE) peerDigestFetchAbort(fetch, "digest cblock too big"); else storeClientRef(fetch->sc, fetch->entry, fetch->offset, fetch->offset, SM_PAGE_SIZE - fetch->buf_used, peerDigestSwapInCBlock, fetch); } finish: stmemNodeUnref(&nr); }
/* * This routine hasn't been optimised to take advantage of the * passed sc. Yet. */ int storeClientUnregister(store_client * sc, StoreEntry * e, void *owner) { MemObject *mem = e->mem_obj; if (sc == NULL) return 0; debug(20, 3) ("storeClientUnregister: called for '%s'\n", storeKeyText(e->hash.key)); #if STORE_CLIENT_LIST_DEBUG assert(sc == storeClientListSearch(e->mem_obj, owner)); #endif assert(sc->entry == e); if (mem->clients.head == NULL) return 0; dlinkDelete(&sc->node, &mem->clients); mem->nclients--; if (e->store_status == STORE_OK && e->swap_status != SWAPOUT_DONE) storeSwapOut(e); if (sc->swapin_sio) { storeClose(sc->swapin_sio); cbdataUnlock(sc->swapin_sio); sc->swapin_sio = NULL; statCounter.swap.ins++; } if (NULL != sc->new_callback) { /* callback with ssize = -1 to indicate unexpected termination */ debug(20, 3) ("storeClientUnregister: store_client for %s has a callback\n", mem->url); storeClientCallback(sc, -1); } stmemNodeUnref(&sc->node_ref); #if DELAY_POOLS delayUnregisterDelayIdPtr(&sc->delay_id); #endif storeSwapOutMaintainMemObject(e); if (mem->nclients == 0) CheckQuickAbort(e); storeUnlockObject(sc->entry); sc->entry = NULL; cbdataFree(sc); return 1; }
/* fetch headers from disk, pass on to SwapInCBlock */ static void peerDigestSwapInHeaders(void *data, mem_node_ref nr, ssize_t size) { DigestFetchState *fetch = data; assert(size <= nr.node->len - nr.offset); if (peerDigestFetchedEnough(fetch, size, "peerDigestSwapInHeaders")) goto finish; assert(!fetch->offset); assert(fetch->entry->mem_obj->reply); if (fetch->entry->mem_obj->reply->sline.status != HTTP_OK) { debug(72, 1) ("peerDigestSwapInHeaders: %s status %d got cached!\n", strBuf(fetch->pd->host), fetch->entry->mem_obj->reply->sline.status); peerDigestFetchAbort(fetch, "internal status error"); goto finish; } fetch->offset = fetch->entry->mem_obj->reply->hdr_sz; fetch->buf = memAllocate(MEM_4K_BUF); storeClientRef(fetch->sc, fetch->entry, fetch->offset, fetch->offset, SM_PAGE_SIZE, peerDigestSwapInCBlock, fetch); finish: stmemNodeUnref(&nr); }
static void asHandleReply(void *data, mem_node_ref nr, ssize_t size) { ASState *asState = data; StoreEntry *e = asState->entry; char *s; char *t; LOCAL_ARRAY(char, buf, SM_PAGE_SIZE); debug(53, 3) ("asHandleReply: Called with size=%ld\n", (long int) size); if (EBIT_TEST(e->flags, ENTRY_ABORTED)) { stmemNodeUnref(&nr); asStateFree(asState); return; } if (size == 0 && e->mem_obj->inmem_hi > 0) { asStateFree(asState); stmemNodeUnref(&nr); return; } else if (size < 0) { debug(53, 1) ("asHandleReply: Called with size=%ld\n", (long int) size); asStateFree(asState); stmemNodeUnref(&nr); return; } else if (HTTP_OK != e->mem_obj->reply->sline.status) { debug(53, 1) ("WARNING: AS %d whois request failed\n", asState->as_number); stmemNodeUnref(&nr); asStateFree(asState); return; } assert((nr.offset + size) < SM_PAGE_SIZE); memcpy(buf, nr.node->data + nr.offset, size); stmemNodeUnref(&nr); s = buf; while (s - buf < size && *s != '\0') { while (*s && xisspace(*s)) s++; for (t = s; *t; t++) { if (xisspace(*t)) break; } if (*t == '\0') { /* oof, word should continue on next block */ break; } *t = '\0'; debug(53, 3) ("asHandleReply: AS# %s (%d)\n", s, asState->as_number); asnAddNet(s, asState->as_number); s = t + 1; } asState->seen = asState->offset + size; asState->offset += (s - buf); debug(53, 3) ("asState->seen = %ld, asState->offset = %ld\n", (long int) asState->seen, (long int) asState->offset); if (e->store_status == STORE_PENDING) { debug(53, 3) ("asHandleReply: store_status == STORE_PENDING: %s\n", storeUrl(e)); storeClientRef(asState->sc, e, asState->seen, asState->offset, SM_PAGE_SIZE, asHandleReply, asState); } else if (asState->seen < e->mem_obj->inmem_hi) { debug(53, 3) ("asHandleReply: asState->seen < e->mem_obj->inmem_hi %s\n", storeUrl(e)); storeClientRef(asState->sc, e, asState->seen, asState->offset, SM_PAGE_SIZE, asHandleReply, asState); } else { debug(53, 3) ("asHandleReply: Done: %s\n", storeUrl(e)); asStateFree(asState); } }
static void netdbExchangeHandleReply(void *data, mem_node_ref nr, ssize_t size) { const char *buf = nr.node->data + nr.offset; netdbExchangeState *ex = data; int rec_sz = 0; ssize_t o; struct in_addr addr; double rtt; double hops; const char *p; int j; HttpReply *rep; size_t hdr_sz; int nused = 0; assert(size <= nr.node->len - nr.offset); rec_sz = 0; rec_sz += 1 + sizeof(addr.s_addr); rec_sz += 1 + sizeof(int); rec_sz += 1 + sizeof(int); ex->seen = ex->used + size; debug(38, 3) ("netdbExchangeHandleReply: %d bytes\n", (int) size); if (!cbdataValid(ex->p)) { debug(38, 3) ("netdbExchangeHandleReply: Peer became invalid\n"); netdbExchangeDone(ex); goto finish; } debug(38, 3) ("netdbExchangeHandleReply: for %s'\n", ex->p->name); p = buf; if (0 == ex->used) { /* skip reply headers */ rep = ex->e->mem_obj->reply; hdr_sz = rep->hdr_sz; debug(38, 5) ("netdbExchangeHandleReply: hdr_sz = %ld\n", (long int) hdr_sz); debug(38, 3) ("netdbExchangeHandleReply: reply status %d\n", rep->sline.status); if (HTTP_OK != rep->sline.status) { netdbExchangeDone(ex); goto finish; } ex->used += hdr_sz; if (size < hdr_sz) { size -= hdr_sz; p += hdr_sz; } else { size = 0; } } debug(38, 5) ("netdbExchangeHandleReply: start parsing loop, size = %ld\n", (long int) size); while (size >= rec_sz) { debug(38, 5) ("netdbExchangeHandleReply: in parsing loop, size = %ld\n", (long int) size); addr.s_addr = any_addr.s_addr; hops = rtt = 0.0; for (o = 0; o < rec_sz;) { switch ((int) *(p + o)) { case NETDB_EX_NETWORK: o++; xmemcpy(&addr.s_addr, p + o, sizeof(addr.s_addr)); o += sizeof(addr.s_addr); break; case NETDB_EX_RTT: o++; xmemcpy(&j, p + o, sizeof(int)); o += sizeof(int); rtt = (double) ntohl(j) / 1000.0; break; case NETDB_EX_HOPS: o++; xmemcpy(&j, p + o, sizeof(int)); o += sizeof(int); hops = (double) ntohl(j) / 1000.0; break; default: debug(38, 1) ("netdbExchangeHandleReply: corrupt data, aborting\n"); netdbExchangeDone(ex); goto finish; } } if (addr.s_addr != any_addr.s_addr && rtt > 0) netdbExchangeUpdatePeer(addr, ex->p, rtt, hops); assert(o == rec_sz); ex->used += rec_sz; size -= rec_sz; p += rec_sz; /* * This is a fairly cpu-intensive loop, break after adding * just a few */ if (++nused == 20) break; } debug(38, 3) ("netdbExchangeHandleReply: used %d entries, (x %d bytes) == %d bytes total\n", nused, rec_sz, nused * rec_sz); debug(38, 3) ("netdbExchangeHandleReply: seen %ld, used %ld\n", (long int) ex->seen, (long int) ex->used); if (EBIT_TEST(ex->e->flags, ENTRY_ABORTED)) { debug(38, 3) ("netdbExchangeHandleReply: ENTRY_ABORTED\n"); netdbExchangeDone(ex); } else if (ex->e->store_status == STORE_PENDING) { debug(38, 3) ("netdbExchangeHandleReply: STORE_PENDING\n"); storeClientRef(ex->sc, ex->e, ex->seen, ex->used, SM_PAGE_SIZE, netdbExchangeHandleReply, ex); } else if (ex->seen < ex->e->mem_obj->inmem_hi) { debug(38, 3) ("netdbExchangeHandleReply: ex->e->mem_obj->inmem_hi\n"); storeClientRef(ex->sc, ex->e, ex->seen, ex->used, SM_PAGE_SIZE, netdbExchangeHandleReply, ex); } else { debug(38, 3) ("netdbExchangeHandleReply: Done\n"); netdbExchangeDone(ex); } finish: buf = NULL; stmemNodeUnref(&nr); }
static void storeClientReadHeader(void *data, const char *buf_unused, ssize_t len) { static int md5_mismatches = 0; store_client *sc = data; StoreEntry *e = sc->entry; MemObject *mem = e->mem_obj; int swap_hdr_sz = 0; size_t body_sz; size_t copy_sz; tlv *tlv_list; tlv *t; char *cbuf; int swap_object_ok = 1; char *new_url = NULL; char *new_store_url = NULL; assert(sc->flags.disk_io_pending); sc->flags.disk_io_pending = 0; assert(sc->new_callback); assert(sc->node_ref.node); cbuf = sc->node_ref.node->data; debug(20, 3) ("storeClientReadHeader: len %d\n", (int) len); /* XXX update how much data in that mem page is active; argh this should be done in a storage layer */ sc->node_ref.node->len = len; if (len < 0) { debug(20, 3) ("storeClientReadHeader: %s\n", xstrerror()); storeClientCallback(sc, len); return; } assert(len <= SM_PAGE_SIZE); tlv_list = storeSwapMetaUnpack(cbuf, &swap_hdr_sz); if (swap_hdr_sz > len) { /* oops, bad disk file? */ debug(20, 1) ("WARNING: swapfile header too small\n"); storeClientCallback(sc, -1); return; } if (tlv_list == NULL) { debug(20, 1) ("WARNING: failed to unpack meta data\n"); storeClientCallback(sc, -1); return; } /* * Check the meta data and make sure we got the right object. */ for (t = tlv_list; t && swap_object_ok; t = t->next) { switch (t->type) { case STORE_META_KEY: assert(t->length == SQUID_MD5_DIGEST_LENGTH); if (!EBIT_TEST(e->flags, KEY_PRIVATE) && memcmp(t->value, e->hash.key, SQUID_MD5_DIGEST_LENGTH)) { debug(20, 2) ("storeClientReadHeader: swapin MD5 mismatch\n"); debug(20, 2) ("\t%s\n", storeKeyText(t->value)); debug(20, 2) ("\t%s\n", storeKeyText(e->hash.key)); if (isPowTen(++md5_mismatches)) debug(20, 1) ("WARNING: %d swapin MD5 mismatches\n", md5_mismatches); swap_object_ok = 0; } break; case STORE_META_URL: new_url = xstrdup(t->value); break; case STORE_META_STOREURL: new_store_url = xstrdup(t->value); break; case STORE_META_OBJSIZE: break; case STORE_META_STD: case STORE_META_STD_LFS: break; case STORE_META_VARY_HEADERS: if (mem->vary_headers) { if (strcmp(mem->vary_headers, t->value) != 0) swap_object_ok = 0; } else { /* Assume the object is OK.. remember the vary request headers */ mem->vary_headers = xstrdup(t->value); } break; default: debug(20, 2) ("WARNING: got unused STORE_META type %d\n", t->type); break; } } /* Check url / store_url */ do { if (new_url == NULL) { debug(20, 1) ("storeClientReadHeader: no URL!\n"); swap_object_ok = 0; break; } /* * If we have a store URL then it must match the requested object URL. * The theory is that objects with a store URL have been normalised * and thus a direct access which didn't go via the rewrite framework * are illegal! */ if (new_store_url) { if (NULL == mem->store_url) mem->store_url = new_store_url; else if (0 == strcasecmp(mem->store_url, new_store_url)) (void) 0; /* a match! */ else { debug(20, 1) ("storeClientReadHeader: store URL mismatch\n"); debug(20, 1) ("\t{%s} != {%s}\n", (char *) new_store_url, mem->store_url); swap_object_ok = 0; break; } } /* If we have no store URL then the request and the memory URL must match */ if ((!new_store_url) && mem->url && strcasecmp(mem->url, new_url) != 0) { debug(20, 1) ("storeClientReadHeader: URL mismatch\n"); debug(20, 1) ("\t{%s} != {%s}\n", (char *) new_url, mem->url); swap_object_ok = 0; break; } } while (0); storeSwapTLVFree(tlv_list); xfree(new_url); /* don't free new_store_url if its owned by the mem object now */ if (mem->store_url != new_store_url) xfree(new_store_url); if (!swap_object_ok) { storeClientCallback(sc, -1); return; } mem->swap_hdr_sz = swap_hdr_sz; mem->object_sz = e->swap_file_sz - swap_hdr_sz; /* * If our last read got some data the client wants, then give * it to them, otherwise schedule another read. */ body_sz = len - swap_hdr_sz; if (sc->copy_offset < body_sz) { /* * we have (part of) what they want */ copy_sz = XMIN(sc->copy_size, body_sz); debug(20, 3) ("storeClientReadHeader: copying %d bytes of body\n", (int) copy_sz); debug(20, 8) ("sc %p; node_ref->node %p; data %p; copy size %d; data size %d\n", sc, sc->node_ref.node, sc->node_ref.node->data, (int) copy_sz, (int) len); xmemmove(cbuf, cbuf + swap_hdr_sz, copy_sz); if (sc->copy_offset == 0 && len > 0 && memHaveHeaders(mem) == 0) (void) storeClientParseHeader(sc, cbuf, copy_sz); storeClientCallback(sc, copy_sz); return; } /* * we don't have what the client wants, but at least we now * know the swap header size. */ /* Just in case there's a node here; free it */ stmemNodeUnref(&sc->node_ref); storeClientFileRead(sc); }
static void storeClientCopy3(StoreEntry * e, store_client * sc) { MemObject *mem = e->mem_obj; ssize_t sz = -1; if (storeClientNoMoreToSend(e, sc)) { /* There is no more to send! */ storeClientCallback(sc, 0); return; } if (e->store_status == STORE_PENDING && sc->seen_offset >= mem->inmem_hi) { /* client has already seen this, wait for more */ debug(20, 3) ("storeClientCopy3: Waiting for more\n"); /* If the read is backed off and all clients have seen all the data in * memory, re-poll the fd */ if ((EBIT_TEST(e->flags, ENTRY_DEFER_READ)) && (storeLowestMemReaderOffset(e) == mem->inmem_hi)) { debug(20, 3) ("storeClientCopy3: %s - clearing ENTRY_DEFER_READ\n", e->mem_obj->url); /* Clear the flag and re-poll the fd */ storeResumeRead(e); } return; } /* * Slight weirdness here. We open a swapin file for any * STORE_DISK_CLIENT, even if we can copy the requested chunk * from memory in the next block. We must try to open the * swapin file before sending any data to the client side. If * we postpone the open, and then can not open the file later * on, the client loses big time. Its transfer just gets cut * off. Better to open it early (while the client side handler * is clientCacheHit) so that we can fall back to a cache miss * if needed. */ if (STORE_DISK_CLIENT == sc->type && NULL == sc->swapin_sio) { debug(20, 3) ("storeClientCopy3: Need to open swap in file\n"); /* gotta open the swapin file */ if (storeTooManyDiskFilesOpen()) { /* yuck -- this causes a TCP_SWAPFAIL_MISS on the client side */ storeClientCallback(sc, -1); return; } else if (!sc->flags.disk_io_pending) { /* Don't set store_io_pending here */ storeSwapInStart(sc); if (NULL == sc->swapin_sio) { storeClientCallback(sc, -1); return; } /* * If the open succeeds we either copy from memory, or * schedule a disk read in the next block. */ } else { debug(20, 1) ("WARNING: Averted multiple fd operation (1)\n"); return; } } if (sc->copy_offset >= mem->inmem_lo && sc->copy_offset < mem->inmem_hi) { /* What the client wants is in memory */ debug(20, 3) ("storeClientCopy3: Copying from memory\n"); assert(sc->new_callback); assert(sc->node_ref.node == NULL); /* We should never, ever have a node here; or we'd leak! */ sz = stmemRef(&mem->data_hdr, sc->copy_offset, &sc->node_ref); if (EBIT_TEST(e->flags, RELEASE_REQUEST)) storeSwapOutMaintainMemObject(e); storeClientCallback(sc, sz); return; } /* What the client wants is not in memory. Schedule a disk read */ assert(STORE_DISK_CLIENT == sc->type); assert(!sc->flags.disk_io_pending); debug(20, 3) ("storeClientCopy3: reading from STORE\n"); /* Just in case there's a node here; free it */ stmemNodeUnref(&sc->node_ref); storeClientFileRead(sc); }
/* wait for full http headers to be received */ static void peerDigestFetchReply(void *data, mem_node_ref nr, ssize_t size) { const char *buf = nr.node->data + nr.offset; DigestFetchState *fetch = data; PeerDigest *pd = fetch->pd; http_status status; HttpReply *reply; assert(pd && buf); assert(!fetch->offset); assert(size <= nr.node->len - nr.offset); if (peerDigestFetchedEnough(fetch, size, "peerDigestFetchReply")) goto finish; reply = fetch->entry->mem_obj->reply; assert(reply); status = reply->sline.status; debug(72, 3) ("peerDigestFetchReply: %s status: %d, expires: %ld (%+d)\n", strBuf(pd->host), status, (long int) reply->expires, (int) (reply->expires - squid_curtime)); /* this "if" is based on clientHandleIMSReply() */ if (status == HTTP_NOT_MODIFIED) { request_t *r = NULL; /* our old entry is fine */ assert(fetch->old_entry); if (!fetch->old_entry->mem_obj->request) fetch->old_entry->mem_obj->request = r = requestLink(fetch->entry->mem_obj->request); assert(fetch->old_entry->mem_obj->request); httpReplyUpdateOnNotModified(fetch->old_entry->mem_obj->reply, reply); storeTimestampsSet(fetch->old_entry); /* get rid of 304 reply */ storeClientUnregister(fetch->sc, fetch->entry, fetch); storeUnlockObject(fetch->entry); /* And prepare to swap in old entry if needed */ fetch->entry = fetch->old_entry; fetch->old_entry = NULL; fetch->sc = fetch->old_sc; fetch->old_sc = NULL; /* preserve request -- we need its size to update counters */ /* requestUnlink(r); */ /* fetch->entry->mem_obj->request = NULL; */ } else if (status == HTTP_OK) { /* get rid of old entry if any */ if (fetch->old_entry) { debug(72, 3) ("peerDigestFetchReply: got new digest, releasing old one\n"); storeClientUnregister(fetch->old_sc, fetch->old_entry, fetch); storeReleaseRequest(fetch->old_entry); storeUnlockObject(fetch->old_entry); fetch->old_entry = NULL; } } else { /* some kind of a bug */ peerDigestFetchAbort(fetch, httpStatusLineReason(&reply->sline)); goto finish; } /* must have a ready-to-use store entry if we got here */ /* can we stay with the old in-memory digest? */ if (status == HTTP_NOT_MODIFIED && fetch->pd->cd) peerDigestFetchStop(fetch, "Not modified"); else storeClientRef(fetch->sc, fetch->entry, /* have to swap in */ 0, 0, SM_PAGE_SIZE, peerDigestSwapInHeaders, fetch); finish: stmemNodeUnref(&nr); }