void storeLog(int tag, const StoreEntry * e) { MemObject *mem = e->mem_obj; HttpReply *reply; if (NULL == storelog) return; #if UNUSED_CODE if (EBIT_TEST(e->flags, ENTRY_DONT_LOG)) return; #endif if (mem != NULL) { if (mem->log_url == NULL) { debug(20, 1) ("storeLog: NULL log_url for %s\n", mem->url); storeMemObjectDump(mem); mem->log_url = xstrdup(mem->url); } reply = mem->reply; /* * XXX Ok, where should we print the dir number here? * Because if we print it before the swap file number, it'll break * the existing log format. */ logfilePrintf(storelog, "%9d.%03d %-7s %02d %08X %s %4d %9d %9d %9d %s %d/%d %s %s\n", (int) current_time.tv_sec, (int) current_time.tv_usec / 1000, storeLogTags[tag], e->swap_dirn, e->swap_filen, storeKeyText(e->hash.key), reply->sline.status, (int) reply->date, (int) reply->last_modified, (int) reply->expires, strLen(reply->content_type) ? strBuf(reply->content_type) : "unknown", reply->content_length, (int) (mem->inmem_hi - mem->reply->hdr_sz), RequestMethodStr[mem->method], mem->log_url); } else { /* no mem object. Most RELEASE cases */ logfilePrintf(storelog, "%9d.%03d %-7s %02d %08X %s ? ? ? ? ?/? ?/? ? ?\n", (int) current_time.tv_sec, (int) current_time.tv_usec / 1000, storeLogTags[tag], e->swap_dirn, e->swap_filen, storeKeyText(e->hash.key)); } }
void storeLog(int tag, const StoreEntry * e) { MemObject *mem = e->mem_obj; HttpReply *reply; if (NULL == storelog) return; #if UNUSED_CODE if (EBIT_TEST(e->flags, ENTRY_DONT_LOG)) return; #endif if (mem != NULL) { reply = mem->reply; /* * XXX Ok, where should we print the dir number here? * Because if we print it before the swap file number, it'll break * the existing log format. */ logfileLineStart(storelog); logfilePrintf(storelog, "%9ld.%03d %-7s %02d %08X %s %4d %9ld %9ld %9ld %s %" PRINTF_OFF_T "/%" PRINTF_OFF_T " %s %s\n", (long int) current_time.tv_sec, (int) current_time.tv_usec / 1000, storeLogTags[tag], e->swap_dirn, e->swap_filen, storeKeyText(e->hash.key), reply->sline.status, (long int) reply->date, (long int) reply->last_modified, (long int) reply->expires, strLen(reply->content_type) ? strBuf(reply->content_type) : "unknown", reply->content_length, mem->inmem_hi - mem->reply->hdr_sz, RequestMethods[mem->method].str, rfc1738_escape_unescaped(mem->url)); logfileLineEnd(storelog); } else { /* no mem object. Most RELEASE cases */ logfileLineStart(storelog); logfilePrintf(storelog, "%9ld.%03d %-7s %02d %08X %s ? ? ? ? ?/? ?/? ? ?\n", (long int) current_time.tv_sec, (int) current_time.tv_usec / 1000, storeLogTags[tag], e->swap_dirn, e->swap_filen, storeKeyText(e->hash.key)); logfileLineEnd(storelog); } }
/* starts swap out sequence for the digest */ static void storeDigestRewriteStart(void *datanotused) { request_flags flags; char *url; StoreEntry *e; assert(store_digest); /* prevent overlapping if rewrite schedule is too tight */ if (sd_state.rewrite_lock) { debug(71, 1) ("storeDigestRewriteStart: overlap detected, consider increasing rewrite period\n"); return; } debug(71, 2) ("storeDigestRewriteStart: start rewrite #%d\n", sd_state.rewrite_count + 1); /* make new store entry */ url = internalStoreUri("/squid-internal-periodic/", StoreDigestFileName); flags = null_request_flags; flags.cachable = 1; e = storeCreateEntry(url, flags, METHOD_GET); assert(e); sd_state.rewrite_lock = cbdataAlloc(generic_cbdata); sd_state.rewrite_lock->data = e; debug(71, 3) ("storeDigestRewriteStart: url: %s key: %s\n", url, storeKeyText(e->hash.key)); e->mem_obj->request = requestLink(urlParse(METHOD_GET, url)); /* wait for rebuild (if any) to finish */ if (sd_state.rebuild_lock) { debug(71, 2) ("storeDigestRewriteStart: waiting for rebuild to finish.\n"); return; } storeDigestRewriteResume(); }
/* copy bytes requested by the client */ void storeClientCopy(store_client * sc, StoreEntry * e, squid_off_t seen_offset, squid_off_t copy_offset, size_t size, char *buf, STCB * callback, void *data) { assert(!EBIT_TEST(e->flags, ENTRY_ABORTED)); debug(20, 3) ("storeClientCopy: %s, seen %" PRINTF_OFF_T ", want %" PRINTF_OFF_T ", size %d, cb %p, cbdata %p\n", storeKeyText(e->hash.key), seen_offset, copy_offset, (int) size, callback, data); assert(sc != NULL); #if STORE_CLIENT_LIST_DEBUG assert(sc == storeClientListSearch(e->mem_obj, data)); #endif assert(sc->callback == NULL); assert(sc->entry == e); sc->seen_offset = seen_offset; sc->callback = callback; sc->copy_buf = buf; sc->copy_size = size; sc->copy_offset = copy_offset; storeClientCopy2(e, sc); }
static void storeClientCopy2(StoreEntry * e, store_client * sc) { if (sc->flags.copy_event_pending) return; if (EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) { debug(20, 5) ("storeClientCopy2: returning because ENTRY_FWD_HDR_WAIT set\n"); return; } if (sc->flags.store_copying) { sc->flags.copy_event_pending = 1; debug(20, 3) ("storeClientCopy2: Queueing storeClientCopyEvent()\n"); eventAdd("storeClientCopyEvent", storeClientCopyEvent, sc, 0.0, 0); return; } cbdataLock(sc); /* ick, prevent sc from getting freed */ sc->flags.store_copying = 1; debug(20, 3) ("storeClientCopy2: %s\n", storeKeyText(e->hash.key)); assert(sc->callback != NULL); /* * We used to check for ENTRY_ABORTED here. But there were some * problems. For example, we might have a slow client (or two) and * the server-side is reading far ahead and swapping to disk. Even * if the server-side aborts, we want to give the client(s) * everything we got before the abort condition occurred. */ storeClientCopy3(e, sc); sc->flags.store_copying = 0; cbdataUnlock(sc); /* ick, allow sc to be freed */ }
void storeSwapInStart(store_client * sc) { StoreEntry *e = sc->entry; assert(e->mem_status == NOT_IN_MEMORY); if (!EBIT_TEST(e->flags, ENTRY_VALIDATED)) { /* We're still reloading and haven't validated this entry yet */ return; } debug(20, 3) ("storeSwapInStart: called for %08X %s \n", e->swap_file_number, storeKeyText(e->key)); if (e->swap_status != SWAPOUT_WRITING && e->swap_status != SWAPOUT_DONE) { debug(20, 1) ("storeSwapInStart: bad swap_status (%s)\n", swapStatusStr[e->swap_status]); return; } if (e->swap_file_number < 0) { debug(20, 1) ("storeSwapInStart: swap_file_number < 0\n"); return; } assert(e->mem_obj != NULL); debug(20, 3) ("storeSwapInStart: Opening fileno %08X\n", e->swap_file_number); sc->swapin_sio = storeOpen(e->swap_file_number, O_RDONLY, storeSwapInFileClosed, sc); cbdataLock(sc->swapin_sio); }
/* copy bytes requested by the client */ void storeClientCopy(StoreEntry * e, off_t seen_offset, off_t copy_offset, size_t size, char *buf, STCB * callback, void *data) { store_client *sc; assert(!EBIT_TEST(e->flags, ENTRY_ABORTED)); debug(20, 3) ("storeClientCopy: %s, seen %d, want %d, size %d, cb %p, cbdata %p\n", storeKeyText(e->key), (int) seen_offset, (int) copy_offset, (int) size, callback, data); sc = storeClientListSearch(e->mem_obj, data); assert(sc != NULL); assert(sc->callback == NULL); sc->copy_offset = copy_offset; sc->seen_offset = seen_offset; sc->callback = callback; sc->copy_buf = buf; sc->copy_size = size; sc->copy_offset = copy_offset; storeClientCopy2(e, sc); }
/* should we digest this entry? used by storeDigestAdd() */ static int storeDigestAddable(const StoreEntry * e) { /* add some stats! XXX */ debug(71, 6) ("storeDigestAddable: checking entry, key: %s\n", storeKeyText(e->hash.key)); /* check various entry flags (mimics storeCheckCachable XXX) */ if (!EBIT_TEST(e->flags, ENTRY_CACHABLE)) { debug(71, 6) ("storeDigestAddable: NO: not cachable\n"); return 0; } if (EBIT_TEST(e->flags, KEY_PRIVATE)) { debug(71, 6) ("storeDigestAddable: NO: private key\n"); return 0; } if (EBIT_TEST(e->flags, ENTRY_NEGCACHED)) { debug(71, 6) ("storeDigestAddable: NO: negative cached\n"); return 0; } if (EBIT_TEST(e->flags, RELEASE_REQUEST)) { debug(71, 6) ("storeDigestAddable: NO: release requested\n"); return 0; } if (e->store_status == STORE_OK && EBIT_TEST(e->flags, ENTRY_BAD_LENGTH)) { debug(71, 6) ("storeDigestAddable: NO: wrong content-length\n"); return 0; } /* do not digest huge objects */ if (e->swap_file_sz > Config.Store.maxObjectSize) { debug(71, 6) ("storeDigestAddable: NO: too big\n"); return 0; } /* still here? check staleness */ /* Note: We should use the time of the next rebuild, not (cur_time+period) */ if (refreshCheckDigest(e, Config.digest.rebuild_period)) { debug(71, 6) ("storeDigestAddable: entry expires within %d secs, ignoring\n", (int) Config.digest.rebuild_period); return 0; } /* * idea: how about also skipping very fresh (thus, potentially * unstable) entries? Should be configurable through * cd_refresh_pattern, of course. */ /* * idea: skip objects that are going to be purged before the next * update. */ #if OLD_UNUSED_CODE /* This code isn't applicable anymore, we can't fix it atm either :( */ if ((squid_curtime + Config.digest.rebuild_period) - e->lastref > storeExpiredReferenceAge()) return 0; #endif return 1; }
/* * Key generation function to implement the LRU policy. Normally * one would not do this with a heap -- use the linked list instead. * For testing and performance characterization it was useful. * Don't use it unless you are trying to compare performance among * heap-based replacement policies... */ heap_key HeapKeyGen_StoreEntry_LRU(void *entry, double age) { StoreEntry *e = entry; debug(81, 3) ("HeapKeyGen_StoreEntry_LRU: %s age=%f lastref=%f\n", storeKeyText(e->hash.key), age, (double) e->lastref); if (e->mem_obj && e->mem_obj->url) debug(81, 3) ("HeapKeyGen_StoreEntry_LRU: url=%s\n", e->mem_obj->url); return (heap_key) e->lastref; }
/* generates a "unique" boundary string for multipart responses * the caller is responsible for cleaning the string */ String httpHdrRangeBoundaryStr(clientHttpRequest * http) { const char *key; String b = StringNull; assert(http); stringAppend(&b, full_appname_string, strlen(full_appname_string)); stringAppend(&b, ":", 1); key = storeKeyText(http->entry->key); stringAppend(&b, key, strlen(key)); return b; }
static void icmpHandleSourcePing(const struct sockaddr_in *from, const char *buf) { const cache_key *key; icp_common_t header; const char *url; xmemcpy(&header, buf, sizeof(icp_common_t)); url = buf + sizeof(icp_common_t); key = icpGetCacheKey(url, (int) header.reqnum); debug(37, 3) ("icmpHandleSourcePing: from %s, key '%s'\n", inet_ntoa(from->sin_addr), storeKeyText(key)); /* call neighborsUdpAck even if ping_status != PING_WAITING */ neighborsUdpAck(key, &header, from); }
int storeUnregister(StoreEntry * e, void *data) { MemObject *mem = e->mem_obj; store_client *sc; store_client **S; STCB *callback; if (mem == NULL) return 0; debug(20, 3) ("storeUnregister: called for '%s'\n", storeKeyText(e->key)); for (S = &mem->clients; (sc = *S) != NULL; S = &(*S)->next) { if (sc->callback_data == data) break; } if (sc == NULL) return 0; if (sc == mem->clients) { /* * If we are unregistering the _first_ client for this * entry, then we have to reset the client FD to -1. */ mem->fd = -1; } *S = sc->next; mem->nclients--; sc->flags.disk_io_pending = 0; if (e->store_status == STORE_OK && e->swap_status != SWAPOUT_DONE) storeSwapOut(e); if (sc->swapin_sio) { storeClose(sc->swapin_sio); sc->swapin_sio = NULL; } if ((callback = sc->callback) != NULL) { /* callback with ssize = -1 to indicate unexpected termination */ debug(20, 3) ("storeUnregister: store_client for %s has a callback\n", mem->url); sc->callback = NULL; if (cbdataValid(sc->callback_data)) callback(sc->callback_data, sc->copy_buf, -1); } #if DELAY_POOLS delayUnregisterDelayIdPtr(&sc->delay_id); #endif cbdataUnlock(sc->callback_data); /* we're done with it now */ cbdataFree(sc); assert(e->lock_count > 0); if (mem->nclients == 0) CheckQuickAbort(e); return 1; }
/* * Key generation function to implement the GDS-Frequency policy. * Similar to Greedy Dual-Size Hits policy, but adds aging of * documents to prevent pollution. Maximizes object hit rate by * keeping more small, popular objects in cache. Achieves lower * byte hit rate than LFUDA because there are fewer large objects * in cache. * * This version implements a tie-breaker based upon recency * (e->lastref): for objects that have the same reference count * the most recent object wins (gets a higher key value). * * Note: this does not properly handle when the aging factor * gets so huge that the added value is outside of the * precision of double. However, Squid has to stay up * for quite a extended period of time (number of requests) * for this to become a problem. (estimation is 10^8 cache * turnarounds) */ heap_key HeapKeyGen_StoreEntry_GDSF(void *entry, double age) { StoreEntry *e = entry; heap_key key; double size = e->swap_file_sz ? (double) e->swap_file_sz : 1.0; double tie = (e->lastref > 1) ? (1.0 / e->lastref) : 1.0; key = age + ((double) e->refcount / size) - tie; debug(81, 3) ("HeapKeyGen_StoreEntry_GDSF: %s size=%f refcnt=%d lastref=%ld age=%f tie=%f -> %f\n", storeKeyText(e->hash.key), size, (int) e->refcount, (long int) e->lastref, age, tie, key); if (e->mem_obj && e->mem_obj->url) debug(81, 3) ("HeapKeyGen_StoreEntry_GDSF: url=%s\n", e->mem_obj->url); return key; }
void neighborsHtcpReply(const cache_key * key, htcpReplyData * htcp, const struct sockaddr_in *from) { StoreEntry *e = storeGet(key); MemObject *mem = NULL; peer *p; peer_t ntype = PEER_NONE; debug(15, 6) ("neighborsHtcpReply: %s %s\n", htcp->hit ? "HIT" : "MISS", storeKeyText(key)); if (NULL != (e = storeGet(key))) mem = e->mem_obj; if ((p = whichPeer(from))) neighborAliveHtcp(p, mem, htcp); /* Does the entry exist? */ if (NULL == e) { debug(12, 3) ("neighborsHtcpReply: Cache key '%s' not found\n", storeKeyText(key)); neighborCountIgnored(p); return; } /* check if someone is already fetching it */ if (EBIT_TEST(e->flags, ENTRY_DISPATCHED)) { debug(15, 3) ("neighborsHtcpReply: '%s' already being fetched.\n", storeKeyText(key)); neighborCountIgnored(p); return; } if (mem == NULL) { debug(15, 2) ("Ignoring reply for missing mem_obj: %s\n", storeKeyText(key)); neighborCountIgnored(p); return; } if (e->ping_status != PING_WAITING) { debug(15, 2) ("neighborsHtcpReply: Entry %s is not PING_WAITING\n", storeKeyText(key)); neighborCountIgnored(p); return; } if (e->lock_count == 0) { debug(12, 1) ("neighborsHtcpReply: '%s' has no locks\n", storeKeyText(key)); neighborCountIgnored(p); return; } if (p) { ntype = neighborType(p, mem->request); neighborUpdateRtt(p, mem); } if (ignoreMulticastReply(p, mem)) { neighborCountIgnored(p); return; } debug(15, 3) ("neighborsHtcpReply: e = %p\n", e); mem->ping_reply_callback(p, ntype, PROTO_HTCP, htcp, mem->ircb_data); }
void storeDigestDel(const StoreEntry * entry) { #if USE_CACHE_DIGESTS if (!Config.onoff.digest_generation) { return; } assert(entry && store_digest); debug(71, 6) ("storeDigestDel: checking entry, key: %s\n", storeKeyText(entry->hash.key)); if (!EBIT_TEST(entry->flags, KEY_PRIVATE)) { if (!cacheDigestTest(store_digest, entry->hash.key)) { sd_stats.del_lost_count++; debug(71, 6) ("storeDigestDel: lost entry, key: %s url: %s\n", storeKeyText(entry->hash.key), storeUrl(entry)); } else { sd_stats.del_count++; cacheDigestDel(store_digest, entry->hash.key); debug(71, 6) ("storeDigestDel: deled entry, key: %s\n", storeKeyText(entry->hash.key)); } } #endif }
static void cacheDigestHashKey(const CacheDigest * cd, const cache_key * key) { const unsigned int bit_count = cd->mask_size * 8; unsigned int tmp_keys[4]; /* we must memcpy to ensure alignment */ xmemcpy(tmp_keys, key, sizeof(tmp_keys)); hashed_keys[0] = htonl(tmp_keys[0]) % bit_count; hashed_keys[1] = htonl(tmp_keys[1]) % bit_count; hashed_keys[2] = htonl(tmp_keys[2]) % bit_count; hashed_keys[3] = htonl(tmp_keys[3]) % bit_count; debug(70, 9) ("cacheDigestHashKey: %s -(%d)-> %d %d %d %d\n", storeKeyText(key), bit_count, hashed_keys[0], hashed_keys[1], hashed_keys[2], hashed_keys[3]); }
static void storeDigestAdd(const StoreEntry * entry) { assert(entry && store_digest); if (storeDigestAddable(entry)) { sd_stats.add_count++; if (cacheDigestTest(store_digest, entry->hash.key)) sd_stats.add_coll_count++; cacheDigestAdd(store_digest, entry->hash.key); debug(71, 6) ("storeDigestAdd: added entry, key: %s\n", storeKeyText(entry->hash.key)); } else { sd_stats.rej_count++; if (cacheDigestTest(store_digest, entry->hash.key)) sd_stats.rej_coll_count++; } }
/* * This routine hasn't been optimised to take advantage of the * passed sc. Yet. */ int storeUnregister(store_client * sc, StoreEntry * e, void *data) { MemObject *mem = e->mem_obj; #if STORE_CLIENT_LIST_DEBUG assert(sc == storeClientListSearch(e->mem_obj, data)); #endif if (mem == NULL) return 0; debug(20, 3) ("storeUnregister: called for '%s'\n", storeKeyText(e->hash.key)); if (sc == NULL) return 0; if (mem->clients.head == NULL) return 0; dlinkDelete(&sc->node, &mem->clients); mem->nclients--; if (e->store_status == STORE_OK && e->swap_status != SWAPOUT_DONE) storeSwapOut(e); if (sc->swapin_sio) { storeClose(sc->swapin_sio); cbdataUnlock(sc->swapin_sio); sc->swapin_sio = NULL; statCounter.swap.ins++; } if (NULL != sc->callback) { /* callback with ssize = -1 to indicate unexpected termination */ debug(20, 3) ("storeUnregister: store_client for %s has a callback\n", mem->url); storeClientCallback(sc, -1); } #if DELAY_POOLS delayUnregisterDelayIdPtr(&sc->delay_id); #endif cbdataUnlock(sc->callback_data); /* we're done with it now */ /*assert(!sc->flags.disk_io_pending); */ cbdataFree(sc); assert(e->lock_count > 0); storeSwapOutMaintainMemObject(e); if (mem->nclients == 0) CheckQuickAbort(e); return 1; }
/* Call handlers waiting for data to be appended to E. */ void InvokeHandlers(StoreEntry * e) { int i = 0; MemObject *mem = e->mem_obj; store_client *sc; store_client *nx = NULL; assert(mem->clients != NULL || mem->nclients == 0); debug(20, 3) ("InvokeHandlers: %s\n", storeKeyText(e->key)); /* walk the entire list looking for valid callbacks */ for (sc = mem->clients; sc; sc = nx) { nx = sc->next; debug(20, 3) ("InvokeHandlers: checking client #%d\n", i++); if (sc->callback_data == NULL) continue; if (sc->callback == NULL) continue; storeClientCopy2(e, sc); } }
/* * Key generation function to implement the LFU-DA policy (Least * Frequently Used with Dynamic Aging). Similar to classical LFU * but with aging to handle turnover of the popular document set. * Maximizes byte hit rate by keeping more currently popular objects * in cache regardless of size. Achieves lower hit rate than GDS * because there are more large objects in cache (so less room for * smaller popular objects). * * This version implements a tie-breaker based upon recency * (e->lastref): for objects that have the same reference count * the most recent object wins (gets a higher key value). * * Note: this does not properly handle when the aging factor * gets so huge that the added value is outside of the * precision of double. However, Squid has to stay up * for quite a extended period of time (number of requests) * for this to become a problem. (estimation is 10^8 cache * turnarounds) */ heap_key HeapKeyGen_StoreEntry_LFUDA(void *entry, double age) { StoreEntry *e = entry; heap_key key; double tie; if (e->lastref <= 0) tie = 0.0; else if (squid_curtime <= e->lastref) tie = 0.0; else tie = 1.0 - exp((double) (e->lastref - squid_curtime) / 86400.0); key = age + (double) e->refcount - tie; debug(81, 3) ("HeapKeyGen_StoreEntry_LFUDA: %s refcnt=%d lastref=%ld age=%f tie=%f -> %f\n", storeKeyText(e->hash.key), (int) e->refcount, (long int) e->lastref, age, tie, key); if (e->mem_obj && e->mem_obj->url) debug(81, 3) ("HeapKeyGen_StoreEntry_LFUDA: url=%s\n", e->mem_obj->url); return (double) key; }
/* * This routine hasn't been optimised to take advantage of the * passed sc. Yet. */ int storeClientUnregister(store_client * sc, StoreEntry * e, void *owner) { MemObject *mem = e->mem_obj; if (sc == NULL) return 0; debug(20, 3) ("storeClientUnregister: called for '%s'\n", storeKeyText(e->hash.key)); #if STORE_CLIENT_LIST_DEBUG assert(sc == storeClientListSearch(e->mem_obj, owner)); #endif assert(sc->entry == e); if (mem->clients.head == NULL) return 0; dlinkDelete(&sc->node, &mem->clients); mem->nclients--; if (e->store_status == STORE_OK && e->swap_status != SWAPOUT_DONE) storeSwapOut(e); if (sc->swapin_sio) { storeClose(sc->swapin_sio); cbdataUnlock(sc->swapin_sio); sc->swapin_sio = NULL; statCounter.swap.ins++; } if (NULL != sc->new_callback) { /* callback with ssize = -1 to indicate unexpected termination */ debug(20, 3) ("storeClientUnregister: store_client for %s has a callback\n", mem->url); storeClientCallback(sc, -1); } stmemNodeUnref(&sc->node_ref); #if DELAY_POOLS delayUnregisterDelayIdPtr(&sc->delay_id); #endif storeSwapOutMaintainMemObject(e); if (mem->nclients == 0) CheckQuickAbort(e); storeUnlockObject(sc->entry); sc->entry = NULL; cbdataFree(sc); return 1; }
/* * An entry written to the swap log MUST have the following * properties. * 1. It MUST be a public key. It does no good to log * a public ADD, change the key, then log a private * DEL. So we need to log a DEL before we change a * key from public to private. * 2. It MUST have a valid (> -1) swap_file_number. */ void storeDirSwapLog(const StoreEntry * e, int op) { int dirn = e->swap_file_number >> SWAP_DIR_SHIFT; SwapDir *sd; assert(dirn < Config.cacheSwap.n_configured); assert(!EBIT_TEST(e->flags, KEY_PRIVATE)); assert(e->swap_file_number >= 0); /* * icons and such; don't write them to the swap log */ if (EBIT_TEST(e->flags, ENTRY_SPECIAL)) return; assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX); debug(20, 3) ("storeDirSwapLog: %s %s %08X\n", swap_log_op_str[op], storeKeyText(e->key), e->swap_file_number); sd = &Config.cacheSwap.swapDirs[dirn]; sd->log.write(sd, e, op); }
/* Call handlers waiting for data to be appended to E. */ void InvokeHandlers(StoreEntry * e) { int i = 0; MemObject *mem = e->mem_obj; store_client *sc; dlink_node *nx = NULL; dlink_node *node; debug(20, 3) ("InvokeHandlers: %s\n", storeKeyText(e->hash.key)); /* walk the entire list looking for valid callbacks */ for (node = mem->clients.head; node; node = nx) { sc = node->data; nx = node->next; debug(20, 3) ("InvokeHandlers: checking client #%d\n", i++); if (sc->callback == NULL) continue; if (sc->flags.disk_io_pending) continue; storeClientCopy2(e, sc); } }
/* copy bytes requested by the client */ void storeClientCopy(store_client * sc, StoreEntry * e, squid_off_t seen_offset, squid_off_t copy_offset, size_t size, char *buf, STCB * callback, void *data) { debug(20, 3) ("storeClientCopy: %s, seen %" PRINTF_OFF_T ", want %" PRINTF_OFF_T ", size %d, cb %p, cbdata %p\n", storeKeyText(e->hash.key), seen_offset, copy_offset, (int) size, callback, data); assert(sc != NULL); #if STORE_CLIENT_LIST_DEBUG assert(sc == storeClientListSearch(e->mem_obj, data)); #endif assert(sc->callback == NULL); assert(sc->entry == e); sc->seen_offset = seen_offset; sc->callback = callback; sc->callback_data = data; cbdataLock(sc->callback_data); sc->copy_buf = buf; sc->copy_size = size; sc->copy_offset = copy_offset; /* If the read is being deferred, run swapout in case this client has the * lowest seen_offset. storeSwapOut() frees the memory and clears the * ENTRY_DEFER_READ bit if necessary */ if (EBIT_TEST(e->flags, ENTRY_DEFER_READ)) { storeSwapOut(e); } storeClientCopy2(e, sc); }
static void storeClientCopy2(StoreEntry * e, store_client * sc) { STCB *callback = sc->callback; MemObject *mem = e->mem_obj; size_t sz; if (sc->flags.copy_event_pending) return; if (EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) { debug(20, 5) ("storeClientCopy2: returning because ENTRY_FWD_HDR_WAIT set\n"); return; } if (sc->flags.store_copying) { sc->flags.copy_event_pending = 1; debug(20, 3) ("storeClientCopy2: Queueing storeClientCopyEvent()\n"); eventAdd("storeClientCopyEvent", storeClientCopyEvent, sc, 0.0, 0); return; } cbdataLock(sc); /* ick, prevent sc from getting freed */ sc->flags.store_copying = 1; debug(20, 3) ("storeClientCopy2: %s\n", storeKeyText(e->key)); assert(callback != NULL); /* * We used to check for ENTRY_ABORTED here. But there were some * problems. For example, we might have a slow client (or two) and * the server-side is reading far ahead and swapping to disk. Even * if the server-side aborts, we want to give the client(s) * everything we got before the abort condition occurred. */ if (storeClientNoMoreToSend(e, sc)) { /* There is no more to send! */ sc->flags.disk_io_pending = 0; sc->callback = NULL; callback(sc->callback_data, sc->copy_buf, 0); } else if (e->store_status == STORE_PENDING && sc->seen_offset >= mem->inmem_hi) { /* client has already seen this, wait for more */ debug(20, 3) ("storeClientCopy2: Waiting for more\n"); } else if (sc->copy_offset >= mem->inmem_lo && sc->copy_offset < mem->inmem_hi) { /* What the client wants is in memory */ debug(20, 3) ("storeClientCopy2: Copying from memory\n"); sz = stmemCopy(&mem->data_hdr, sc->copy_offset, sc->copy_buf, sc->copy_size); sc->flags.disk_io_pending = 0; sc->callback = NULL; callback(sc->callback_data, sc->copy_buf, sz); } else if (sc->swapin_sio == NULL) { debug(20, 3) ("storeClientCopy2: Need to open swap in file\n"); assert(sc->type == STORE_DISK_CLIENT); /* gotta open the swapin file */ if (storeTooManyDiskFilesOpen()) { /* yuck -- this causes a TCP_SWAPFAIL_MISS on the client side */ sc->callback = NULL; callback(sc->callback_data, sc->copy_buf, -1); } else if (!sc->flags.disk_io_pending) { sc->flags.disk_io_pending = 1; storeSwapInStart(sc); if (NULL == sc->swapin_sio) { sc->flags.disk_io_pending = 0; sc->callback = NULL; callback(sc->callback_data, sc->copy_buf, -1); } else { storeClientFileRead(sc); } } else { debug(20, 2) ("storeClientCopy2: Averted multiple fd operation\n"); } } else { debug(20, 3) ("storeClientCopy: reading from STORE\n"); assert(sc->type == STORE_DISK_CLIENT); if (!sc->flags.disk_io_pending) { sc->flags.disk_io_pending = 1; storeClientFileRead(sc); } else { debug(20, 2) ("storeClientCopy2: Averted multiple fd operation\n"); } } sc->flags.store_copying = 0; cbdataUnlock(sc); /* ick, allow sc to be freed */ }
/* ask store for a digest */ static void peerDigestRequest(PeerDigest * pd) { peer *p = pd->peer; StoreEntry *e, *old_e; char *url; const cache_key *key; request_t *req; DigestFetchState *fetch = NULL; pd->req_result = NULL; pd->flags.requested = 1; /* compute future request components */ if (p->digest_url) url = xstrdup(p->digest_url); else url = internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", StoreDigestFileName); key = storeKeyPublic(url, METHOD_GET); debug(72, 2) ("peerDigestRequest: %s key: %s\n", url, storeKeyText(key)); req = urlParse(METHOD_GET, url); assert(req); /* add custom headers */ assert(!req->header.len); httpHeaderPutStr(&req->header, HDR_ACCEPT, StoreDigestMimeStr); httpHeaderPutStr(&req->header, HDR_ACCEPT, "text/html"); if (p->login) xstrncpy(req->login, p->login, MAX_LOGIN_SZ); /* create fetch state structure */ fetch = memAllocate(MEM_DIGEST_FETCH_STATE); cbdataAdd(fetch, memFree, MEM_DIGEST_FETCH_STATE); fetch->request = requestLink(req); fetch->pd = pd; fetch->offset = 0; /* update timestamps */ fetch->start_time = squid_curtime; pd->times.requested = squid_curtime; pd_last_req_time = squid_curtime; req->flags.cachable = 1; /* the rest is based on clientProcessExpired() */ req->flags.refresh = 1; old_e = fetch->old_entry = storeGet(key); if (old_e) { debug(72, 5) ("peerDigestRequest: found old entry\n"); storeLockObject(old_e); storeCreateMemObject(old_e, url, url); storeClientListAdd(old_e, fetch); } e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method); assert(EBIT_TEST(e->flags, KEY_PRIVATE)); storeClientListAdd(e, fetch); /* set lastmod to trigger IMS request if possible */ if (old_e) e->lastmod = old_e->lastmod; /* push towards peer cache */ debug(72, 3) ("peerDigestRequest: forwarding to fwdStart...\n"); fwdStart(-1, e, req); cbdataLock(fetch); cbdataLock(fetch->pd); storeClientCopy(e, 0, 0, 4096, memAllocate(MEM_4K_BUF), peerDigestFetchReply, fetch); }
/* I should attach these records to the entry. We take the first * hit we get our wait until everyone misses. The timeout handler * call needs to nip this shopping list or call one of the misses. * * If a hit process is already started, then sobeit */ void neighborsUdpAck(const cache_key * key, icp_common_t * header, const struct sockaddr_in *from) { peer *p = NULL; StoreEntry *entry; MemObject *mem = NULL; peer_t ntype = PEER_NONE; const char *opcode_d; icp_opcode opcode = (icp_opcode) header->opcode; debug(15, 6) ("neighborsUdpAck: opcode %d '%s'\n", (int) opcode, storeKeyText(key)); if (NULL != (entry = storeGet(key))) mem = entry->mem_obj; if ((p = whichPeer(from))) neighborAlive(p, mem, header); if (opcode > ICP_END) return; opcode_d = icp_opcode_str[opcode]; if (p) neighborUpdateRtt(p, mem); /* Does the entry exist? */ if (NULL == entry) { debug(12, 3) ("neighborsUdpAck: Cache key '%s' not found\n", storeKeyText(key)); neighborCountIgnored(p); return; } /* check if someone is already fetching it */ if (EBIT_TEST(entry->flags, ENTRY_DISPATCHED)) { debug(15, 3) ("neighborsUdpAck: '%s' already being fetched.\n", storeKeyText(key)); neighborCountIgnored(p); return; } if (mem == NULL) { debug(15, 2) ("Ignoring %s for missing mem_obj: %s\n", opcode_d, storeKeyText(key)); neighborCountIgnored(p); return; } if (entry->ping_status != PING_WAITING) { debug(15, 2) ("neighborsUdpAck: Late %s for %s\n", opcode_d, storeKeyText(key)); neighborCountIgnored(p); return; } if (entry->lock_count == 0) { debug(12, 1) ("neighborsUdpAck: '%s' has no locks\n", storeKeyText(key)); neighborCountIgnored(p); return; } debug(15, 3) ("neighborsUdpAck: %s for '%s' from %s \n", opcode_d, storeKeyText(key), p ? p->name : "source"); if (p) { ntype = neighborType(p, mem->request); } if (ignoreMulticastReply(p, mem)) { neighborCountIgnored(p); } else if (opcode == ICP_MISS) { if (p == NULL) { neighborIgnoreNonPeer(from, opcode); } else { mem->ping_reply_callback(p, ntype, PROTO_ICP, header, mem->ircb_data); } } else if (opcode == ICP_HIT) { if (p == NULL) { neighborIgnoreNonPeer(from, opcode); } else { header->opcode = ICP_HIT; mem->ping_reply_callback(p, ntype, PROTO_ICP, header, mem->ircb_data); } } else if (opcode == ICP_DECHO) { if (p == NULL) { neighborIgnoreNonPeer(from, opcode); } else if (ntype == PEER_SIBLING) { debug_trap("neighborsUdpAck: Found non-ICP cache as SIBLING\n"); debug_trap("neighborsUdpAck: non-ICP neighbors must be a PARENT\n"); } else { mem->ping_reply_callback(p, ntype, PROTO_ICP, header, mem->ircb_data); } } else if (opcode == ICP_SECHO) { if (p) { debug(15, 1) ("Ignoring SECHO from neighbor %s\n", p->name); neighborCountIgnored(p); #if ALLOW_SOURCE_PING } else if (Config.onoff.source_ping) { mem->ping_reply_callback(NULL, ntype, PROTO_ICP, header, mem->ircb_data); #endif } else { debug(15, 1) ("Unsolicited SECHO from %s\n", inet_ntoa(from->sin_addr)); } } else if (opcode == ICP_DENIED) { if (p == NULL) { neighborIgnoreNonPeer(from, opcode); } else if (p->stats.pings_acked > 100) { if (100 * p->icp.counts[ICP_DENIED] / p->stats.pings_acked > 95) { debug(15, 0) ("95%% of replies from '%s' are UDP_DENIED\n", p->name); debug(15, 0) ("Disabling '%s', please check your configuration.\n", p->name); neighborRemove(p); p = NULL; } else { neighborCountIgnored(p); } } } else if (opcode == ICP_MISS_NOFETCH) { mem->ping_reply_callback(p, ntype, PROTO_ICP, header, mem->ircb_data); } else { debug(15, 0) ("neighborsUdpAck: Unexpected ICP reply: %s\n", opcode_d); } }
int neighborsUdpPing(request_t * request, StoreEntry * entry, IRCB * callback, void *callback_data, int *exprep, int *timeout) { const char *url = storeUrl(entry); MemObject *mem = entry->mem_obj; peer *p = NULL; int i; int reqnum = 0; int flags; icp_common_t *query; int queries_sent = 0; int peers_pinged = 0; int parent_timeout = 0, parent_exprep = 0; int sibling_timeout = 0, sibling_exprep = 0; int mcast_timeout = 0, mcast_exprep = 0; if (Config.peers == NULL) return 0; if (theOutIcpConnection < 0) fatal("neighborsUdpPing: There is no ICP socket!"); assert(entry->swap_status == SWAPOUT_NONE); mem->start_ping = current_time; mem->ping_reply_callback = callback; mem->ircb_data = callback_data; reqnum = icpSetCacheKey(entry->hash.key); for (i = 0, p = first_ping; i++ < Config.npeers; p = p->next) { if (p == NULL) p = Config.peers; debug(15, 5) ("neighborsUdpPing: Peer %s\n", p->name); if (!peerWouldBePinged(p, request)) continue; /* next peer */ peers_pinged++; debug(15, 4) ("neighborsUdpPing: pinging peer %s for '%s'\n", p->name, url); if (p->type == PEER_MULTICAST) mcastSetTtl(theOutIcpConnection, p->mcast.ttl); debug(15, 3) ("neighborsUdpPing: key = '%s'\n", storeKeyText(entry->hash.key)); debug(15, 3) ("neighborsUdpPing: reqnum = %d\n", reqnum); #if USE_HTCP if (p->options.htcp && !p->options.htcp_only_clr) { debug(15, 3) ("neighborsUdpPing: sending HTCP query\n"); htcpQuery(entry, request, p); } else #endif if (p->icp.port == echo_port) { debug(15, 4) ("neighborsUdpPing: Looks like a dumb cache, send DECHO ping\n"); echo_hdr.reqnum = reqnum; query = icpCreateMessage(ICP_DECHO, 0, url, reqnum, 0); icpUdpSend(theOutIcpConnection, &p->in_addr, query, LOG_ICP_QUERY, 0); } else { flags = 0; if (Config.onoff.query_icmp) if (p->icp.version == ICP_VERSION_2) flags |= ICP_FLAG_SRC_RTT; query = icpCreateMessage(ICP_QUERY, flags, url, reqnum, 0); icpUdpSend(theOutIcpConnection, &p->in_addr, query, LOG_ICP_QUERY, 0); } queries_sent++; p->stats.pings_sent++; if (p->type == PEER_MULTICAST) { mcast_exprep += p->mcast.n_replies_expected; mcast_timeout += (p->stats.rtt * p->mcast.n_replies_expected); } else if (neighborUp(p)) { /* its alive, expect a reply from it */ if (neighborType(p, request) == PEER_PARENT) { parent_exprep++; parent_timeout += p->stats.rtt; } else { sibling_exprep++; sibling_timeout += p->stats.rtt; } } else { /* Neighbor is dead; ping it anyway, but don't expect a reply */ /* log it once at the threshold */ if (p->stats.logged_state == PEER_ALIVE) { debug(15, 1) ("Detected DEAD %s: %s\n", neighborTypeStr(p), p->name); p->stats.logged_state = PEER_DEAD; } } p->stats.last_query = squid_curtime; /* * keep probe_start == 0 for a multicast peer, * so neighborUp() never says this peer is dead. */ if ((p->type != PEER_MULTICAST) && (p->stats.probe_start == 0)) p->stats.probe_start = squid_curtime; } if ((first_ping = first_ping->next) == NULL) first_ping = Config.peers; #if ALLOW_SOURCE_PING /* only do source_ping if we have neighbors */ if (Config.npeers) { const ipcache_addrs *ia = NULL; struct sockaddr_in to_addr; char *host = request->host; if (!Config.onoff.source_ping) { debug(15, 6) ("neighborsUdpPing: Source Ping is disabled.\n"); } else if ((ia = ipcache_gethostbyname(host, 0))) { debug(15, 6) ("neighborsUdpPing: Source Ping: to %s for '%s'\n", host, url); echo_hdr.reqnum = reqnum; if (icmp_sock != -1) { icmpSourcePing(ia->in_addrs[ia->cur], &echo_hdr, url); } else { to_addr.sin_family = AF_INET; to_addr.sin_addr = ia->in_addrs[ia->cur]; to_addr.sin_port = htons(echo_port); query = icpCreateMessage(ICP_SECHO, 0, url, reqnum, 0); icpUdpSend(theOutIcpConnection, &to_addr, query, LOG_ICP_QUERY, 0); } } else { debug(15, 6) ("neighborsUdpPing: Source Ping: unknown host: %s\n", host); } } #endif /* * How many replies to expect? */ *exprep = parent_exprep + sibling_exprep + mcast_exprep; /* * If there is a configured timeout, use it */ if (Config.Timeout.icp_query) *timeout = Config.Timeout.icp_query; else { if (*exprep > 0) { if (parent_exprep) *timeout = 2 * parent_timeout / parent_exprep; else if (mcast_exprep) *timeout = 2 * mcast_timeout / mcast_exprep; else *timeout = 2 * sibling_timeout / sibling_exprep; } else *timeout = 2000; /* 2 seconds */ if (Config.Timeout.icp_query_max) if (*timeout > Config.Timeout.icp_query_max) *timeout = Config.Timeout.icp_query_max; if (*timeout < Config.Timeout.icp_query_min) *timeout = Config.Timeout.icp_query_min; } return peers_pinged; }
static void storeClientReadHeader(void *data, const char *buf, ssize_t len) { static int md5_mismatches = 0; store_client *sc = data; StoreEntry *e = sc->entry; MemObject *mem = e->mem_obj; int swap_hdr_sz = 0; size_t body_sz; size_t copy_sz; tlv *tlv_list; tlv *t; int swap_object_ok = 1; char *new_url = NULL; char *new_store_url = NULL; assert(sc->flags.disk_io_pending); sc->flags.disk_io_pending = 0; assert(sc->callback != NULL); debug(20, 3) ("storeClientReadHeader: len %d\n", (int) len); if (len < 0) { debug(20, 3) ("storeClientReadHeader: %s\n", xstrerror()); storeClientCallback(sc, len); return; } tlv_list = storeSwapMetaUnpack(buf, &swap_hdr_sz); if (swap_hdr_sz > len) { /* oops, bad disk file? */ debug(20, 1) ("WARNING: swapfile header too small\n"); storeClientCallback(sc, -1); return; } if (tlv_list == NULL) { debug(20, 1) ("WARNING: failed to unpack meta data\n"); storeClientCallback(sc, -1); return; } /* * Check the meta data and make sure we got the right object. */ for (t = tlv_list; t && swap_object_ok; t = t->next) { switch (t->type) { case STORE_META_KEY: assert(t->length == SQUID_MD5_DIGEST_LENGTH); if (!EBIT_TEST(e->flags, KEY_PRIVATE) && memcmp(t->value, e->hash.key, SQUID_MD5_DIGEST_LENGTH)) { debug(20, 2) ("storeClientReadHeader: swapin MD5 mismatch\n"); debug(20, 2) ("\t%s\n", storeKeyText(t->value)); debug(20, 2) ("\t%s\n", storeKeyText(e->hash.key)); if (isPowTen(++md5_mismatches)) debug(20, 1) ("WARNING: %d swapin MD5 mismatches\n", md5_mismatches); swap_object_ok = 0; } break; case STORE_META_URL: new_url = xstrdup(t->value); break; case STORE_META_STOREURL: new_store_url = xstrdup(t->value); break; case STORE_META_OBJSIZE: break; case STORE_META_STD: case STORE_META_STD_LFS: break; case STORE_META_VARY_HEADERS: if (mem->vary_headers) { if (strcmp(mem->vary_headers, t->value) != 0) swap_object_ok = 0; } else { /* Assume the object is OK.. remember the vary request headers */ mem->vary_headers = xstrdup(t->value); } break; default: debug(20, 2) ("WARNING: got unused STORE_META type %d\n", t->type); break; } } /* Check url / store_url */ do { if (new_url == NULL) { debug(20, 1) ("storeClientReadHeader: no URL!\n"); swap_object_ok = 0; break; } /* * If we have a store URL then it must match the requested object URL. * The theory is that objects with a store URL have been normalised * and thus a direct access which didn't go via the rewrite framework * are illegal! */ if (new_store_url) { if (NULL == mem->store_url) mem->store_url = new_store_url; else if (0 == strcasecmp(mem->store_url, new_store_url)) (void) 0; /* a match! */ else { debug(20, 1) ("storeClientReadHeader: store URL mismatch\n"); debug(20, 1) ("\t{%s} != {%s}\n", (char *) new_store_url, mem->store_url); swap_object_ok = 0; break; } } /* If we have no store URL then the request and the memory URL must match */ /* if ((!new_store_url) && mem->url && strcasecmp(mem->url, new_url) != 0) { debug(20, 1) ("storeClientReadHeader: URL mismatch\n"); debug(20, 1) ("\t{%s} != {%s}\n", (char *) new_url, mem->url); swap_object_ok = 0; break; } */ } while (0); storeSwapTLVFree(tlv_list); xfree(new_url); /* don't free new_store_url if its owned by the mem object now */ if (mem->store_url != new_store_url) xfree(new_store_url); if (!swap_object_ok) { storeClientCallback(sc, -1); return; } mem->swap_hdr_sz = swap_hdr_sz; mem->object_sz = e->swap_file_sz - swap_hdr_sz; /* * If our last read got some data the client wants, then give * it to them, otherwise schedule another read. */ body_sz = len - swap_hdr_sz; if (sc->copy_offset < body_sz) { /* * we have (part of) what they want */ copy_sz = XMIN(sc->copy_size, body_sz); debug(20, 3) ("storeClientReadHeader: copying %d bytes of body\n", (int) copy_sz); xmemmove(sc->copy_buf, sc->copy_buf + swap_hdr_sz, copy_sz); if (sc->copy_offset == 0 && len > 0 && memHaveHeaders(mem) == 0) httpReplyParse(mem->reply, sc->copy_buf, headersEnd(sc->copy_buf, copy_sz)); storeClientCallback(sc, copy_sz); return; } /* * we don't have what the client wants, but at least we now * know the swap header size. */ storeClientFileRead(sc); }
static void storeClientReadHeader(void *data, const char *buf, ssize_t len) { static int md5_mismatches = 0; store_client *sc = data; StoreEntry *e = sc->entry; MemObject *mem = e->mem_obj; int swap_hdr_sz = 0; size_t body_sz; size_t copy_sz; tlv *tlv_list; tlv *t; int swap_object_ok = 1; assert(sc->flags.disk_io_pending); sc->flags.disk_io_pending = 0; assert(sc->callback != NULL); debug(20, 3) ("storeClientReadHeader: len %d\n", (int) len); if (len < 0) { debug(20, 3) ("storeClientReadHeader: %s\n", xstrerror()); storeClientCallback(sc, len); return; } tlv_list = storeSwapMetaUnpack(buf, &swap_hdr_sz); if (swap_hdr_sz > len) { /* oops, bad disk file? */ debug(20, 1) ("WARNING: swapfile header too small\n"); storeClientCallback(sc, -1); return; } if (tlv_list == NULL) { debug(20, 1) ("WARNING: failed to unpack meta data\n"); storeClientCallback(sc, -1); return; } /* * Check the meta data and make sure we got the right object. */ for (t = tlv_list; t && swap_object_ok; t = t->next) { switch (t->type) { case STORE_META_KEY: assert(t->length == MD5_DIGEST_CHARS); if (!EBIT_TEST(e->flags, KEY_PRIVATE) && memcmp(t->value, e->hash.key, MD5_DIGEST_CHARS)) { debug(20, 2) ("storeClientReadHeader: swapin MD5 mismatch\n"); debug(20, 2) ("\t%s\n", storeKeyText(t->value)); debug(20, 2) ("\t%s\n", storeKeyText(e->hash.key)); if (isPowTen(++md5_mismatches)) debug(20, 1) ("WARNING: %d swapin MD5 mismatches\n", md5_mismatches); swap_object_ok = 0; } break; case STORE_META_URL: if (NULL == mem->url) (void) 0; /* can't check */ else if (0 == strcasecmp(mem->url, t->value)) (void) 0; /* a match! */ else { debug(20, 1) ("storeClientReadHeader: URL mismatch\n"); debug(20, 1) ("\t{%s} != {%s}\n", (char *) t->value, mem->url); swap_object_ok = 0; break; } break; case STORE_META_STD: case STORE_META_STD_LFS: break; case STORE_META_VARY_HEADERS: if (mem->vary_headers) { if (strcmp(mem->vary_headers, t->value) != 0) swap_object_ok = 0; } else { /* Assume the object is OK.. remember the vary request headers */ mem->vary_headers = xstrdup(t->value); } break; default: debug(20, 2) ("WARNING: got unused STORE_META type %d\n", t->type); break; } } storeSwapTLVFree(tlv_list); if (!swap_object_ok) { storeClientCallback(sc, -1); return; } mem->swap_hdr_sz = swap_hdr_sz; mem->object_sz = e->swap_file_sz - swap_hdr_sz; /* * If our last read got some data the client wants, then give * it to them, otherwise schedule another read. */ body_sz = len - swap_hdr_sz; if (sc->copy_offset < body_sz) { /* * we have (part of) what they want */ copy_sz = XMIN(sc->copy_size, body_sz); debug(20, 3) ("storeClientReadHeader: copying %d bytes of body\n", (int) copy_sz); xmemmove(sc->copy_buf, sc->copy_buf + swap_hdr_sz, copy_sz); if (sc->copy_offset == 0 && len > 0 && mem->reply->sline.status == 0) httpReplyParse(mem->reply, sc->copy_buf, headersEnd(sc->copy_buf, copy_sz)); storeClientCallback(sc, copy_sz); return; } /* * we don't have what the client wants, but at least we now * know the swap header size. */ storeClientFileRead(sc); }