int errorMapStart(const errormap * map, request_t * client_req, HttpReply * reply, const char *aclname, ERRMAPCB * callback, void *callback_data) { char squid_error[100]; int len = 0; const char *errorUrl; ErrorMapState *state; const char *tmp; http_status status; request_t *req; HttpHeaderPos hdrpos; HttpHeaderEntry *hdr; if (!client_req || !reply) return 0; status = reply->sline.status; tmp = httpHeaderGetStr(&reply->header, HDR_X_SQUID_ERROR); squid_error[0] = '\0'; if (tmp) { xstrncpy(squid_error, tmp, sizeof(squid_error)); len = strcspn(squid_error, " "); } squid_error[len] = '\0'; errorUrl = getErrorMap(map, status, squid_error, aclname); if (!errorUrl) return 0; req = urlParse(urlMethodGetKnownByCode(METHOD_GET), (char *) errorUrl); if (!req) { debug(0, 0) ("errorMapStart: Invalid error URL '%s'\n", errorUrl); return 0; } req->urlgroup = xstrdup("error"); state = cbdataAlloc(ErrorMapState); state->req = requestLink(req); state->e = storeCreateEntry(errorUrl, req->flags, req->method); state->sc = storeClientRegister(state->e, state); state->callback = callback; state->callback_data = callback_data; cbdataLock(callback_data); hdrpos = HttpHeaderInitPos; while ((hdr = httpHeaderGetEntry(&client_req->header, &hdrpos)) != NULL) { if (CBIT_TEST(client_headers, hdr->id)) httpHeaderAddClone(&req->header, hdr); } hdrpos = HttpHeaderInitPos; while ((hdr = httpHeaderGetEntry(&reply->header, &hdrpos)) != NULL) { if (CBIT_TEST(server_headers, hdr->id)) httpHeaderAddClone(&req->header, hdr); } httpHeaderPutInt(&req->header, HDR_X_ERROR_STATUS, (int) reply->sline.status); httpHeaderPutStr(&req->header, HDR_X_REQUEST_URI, urlCanonical(client_req)); fwdStart(-1, state->e, req); storeClientRef(state->sc, state->e, 0, 0, SM_PAGE_SIZE, errorMapFetchHeaders, state); return 1; }
static void asnCacheStart(int as) { LOCAL_ARRAY(char, asres, 4096); StoreEntry *e; request_t *req; ASState *asState; method_t *method_get; method_get = urlMethodGetKnownByCode(METHOD_GET); asState = cbdataAlloc(ASState); debug(53, 3) ("asnCacheStart: AS %d\n", as); snprintf(asres, 4096, "whois://%s/!gAS%d", Config.as_whois_server, as); asState->as_number = as; req = urlParse(method_get, asres); assert(NULL != req); asState->request = requestLink(req); if ((e = storeGetPublic(asres, method_get)) == NULL) { e = storeCreateEntry(asres, null_request_flags, method_get); asState->sc = storeClientRegister(e, asState); fwdStart(-1, e, asState->request); } else { storeLockObject(e); asState->sc = storeClientRegister(e, asState); } asState->entry = e; asState->seen = 0; asState->offset = 0; storeClientRef(asState->sc, e, asState->seen, asState->offset, SM_PAGE_SIZE, asHandleReply, asState); }
void netdbExchangeStart(void *data) { #if USE_ICMP peer *p = data; char *uri; netdbExchangeState *ex; method_t *method_get; CBDATA_INIT_TYPE(netdbExchangeState); ex = cbdataAlloc(netdbExchangeState); cbdataLock(p); ex->p = p; uri = internalRemoteUri(p->host, p->http_port, "/squid-internal-dynamic/", "netdb"); debug(38, 3) ("netdbExchangeStart: Requesting '%s'\n", uri); assert(NULL != uri); method_get = urlMethodGetKnownByCode(METHOD_GET); ex->r = urlParse(method_get, uri); if (NULL == ex->r) { debug(38, 1) ("netdbExchangeStart: Bad URI %s\n", uri); return; } requestLink(ex->r); assert(NULL != ex->r); httpBuildVersion(&ex->r->http_ver, 1, 0); ex->e = storeCreateEntry(uri, null_request_flags, method_get); assert(NULL != ex->e); ex->sc = storeClientRegister(ex->e, ex); storeClientRef(ex->sc, ex->e, ex->seen, ex->used, SM_PAGE_SIZE, netdbExchangeHandleReply, ex); ex->r->flags.loopdetect = 1; /* cheat! -- force direct */ if (p->login) xstrncpy(ex->r->login, p->login, MAX_LOGIN_SZ); fwdStart(-1, ex->e, ex->r); #endif }
static void peerDigestSwapInCBlock(void *data, mem_node_ref nr, ssize_t size) { const char *buf = nr.node->data + nr.offset; DigestFetchState *fetch = data; if (peerDigestFetchedEnough(fetch, size, "peerDigestSwapInCBlock")) goto finish; fetch->offset += size; memcpy(fetch->buf + fetch->buf_used, buf, size); fetch->buf_used += size; if (fetch->buf_used >= StoreDigestCBlockSize) { PeerDigest *pd = fetch->pd; HttpReply *rep = fetch->entry->mem_obj->reply; assert(pd && rep); if (peerDigestSetCBlock(pd, fetch->buf)) { /* XXX: soon we will have variable header size */ fetch->offset -= fetch->buf_used - StoreDigestCBlockSize; /* switch to CD buffer and fetch digest guts */ memFree(fetch->buf, MEM_4K_BUF); fetch->buf = NULL; fetch->buf_used = 0; assert(pd->cd->mask); storeClientRef(fetch->sc, fetch->entry, fetch->offset, fetch->offset, pd->cd->mask_size, peerDigestSwapInMask, fetch); } else { peerDigestFetchAbort(fetch, "invalid digest cblock"); } } else { /* need more data, do we have space? */ if (fetch->buf_used >= SM_PAGE_SIZE) peerDigestFetchAbort(fetch, "digest cblock too big"); else storeClientRef(fetch->sc, fetch->entry, fetch->offset, fetch->offset, SM_PAGE_SIZE - fetch->buf_used, peerDigestSwapInCBlock, fetch); } finish: stmemNodeUnref(&nr); }
/* * This is the eventual API which store clients should use to fetch the headers. */ void storeClientCopyHeaders(store_client * sc, StoreEntry * e, STHCB * callback, void *callback_data) { sc->header_callback = callback; sc->header_cbdata = callback_data; /* This kicks off either the memory read, waiting for the data to appear, or the disk read */ storeClientRef(sc, e, 0, 0, SM_PAGE_SIZE, storeClientCopyHeadersCB, sc); }
static void peerMonitorFetchReply(void *data, mem_node_ref nr, ssize_t size) { PeerMonitor *pm = data; if (size <= 0 || !cbdataValid(pm->peer)) { peerMonitorCompleted(pm); } else { pm->running.size += size; pm->running.offset += size; storeClientRef(pm->running.sc, pm->running.e, pm->running.offset, pm->running.offset, SM_PAGE_SIZE, peerMonitorFetchReply, pm); } stmemNodeUnref(&nr); }
static void peerMonitorFetchReplyHeaders(void *data, mem_node_ref nr, ssize_t size) { PeerMonitor *pm = data; const char *buf = NULL; http_status status; HttpReply *reply; if (EBIT_TEST(pm->running.e->flags, ENTRY_ABORTED)) goto completed; if (size <= 0) goto completed; if (!cbdataValid(pm->peer)) goto completed; buf = nr.node->data + nr.offset; reply = pm->running.e->mem_obj->reply; assert(reply); status = reply->sline.status; pm->running.status = status; if (status != HTTP_OK) goto completed; if (size > reply->hdr_sz) { pm->running.size = size - reply->hdr_sz; pm->running.offset = size; } else { pm->running.size = 0; pm->running.offset = reply->hdr_sz; } storeClientRef(pm->running.sc, pm->running.e, pm->running.offset, pm->running.offset, SM_PAGE_SIZE, peerMonitorFetchReply, pm); stmemNodeUnref(&nr); return; completed: /* We are fully done with this monitoring request. Clean up */ stmemNodeUnref(&nr); peerMonitorCompleted(pm); return; }
static void peerMonitorRequest(void *data) { PeerMonitor *pm = data; char *url; request_t *req; if (!cbdataValid(pm->peer)) { cbdataFree(pm); return; } url = pm->peer->monitor.url; if (!url) { cbdataFree(pm); return; } req = urlParse(urlMethodGetKnownByCode(METHOD_GET), url); if (!req) { debug(DBG, 1) ("peerMonitorRequest: Failed to parse URL '%s' for cache_peer %s\n", url, pm->peer->name); cbdataFree(pm); return; } pm->last_probe = squid_curtime; pm->running.timeout_set = 1; eventAdd(pm->name, peerMonitorTimeout, pm, (double) (pm->peer->monitor.timeout ? pm->peer->monitor.timeout : pm->peer->monitor.interval), 0); httpHeaderPutStr(&req->header, HDR_ACCEPT, "*/*"); httpHeaderPutStr(&req->header, HDR_USER_AGENT, full_appname_string); if (pm->peer->login) xstrncpy(req->login, pm->peer->login, MAX_LOGIN_SZ); pm->running.req = requestLink(req); pm->running.e = storeCreateEntry(url, req->flags, req->method); pm->running.sc = storeClientRegister(pm->running.e, pm); if (pm->peer->options.monitor_direct) fwdStartPeer(NULL, pm->running.e, pm->running.req); else fwdStartPeer(pm->peer, pm->running.e, pm->running.req); storeClientRef(pm->running.sc, pm->running.e, 0, 0, SM_PAGE_SIZE, peerMonitorFetchReplyHeaders, pm); return; }
static void peerDigestSwapInMask(void *data, mem_node_ref nr, ssize_t size) { const char *buf = nr.node->data + nr.offset; DigestFetchState *fetch = data; PeerDigest *pd; assert(size <= nr.node->len - nr.offset); if (peerDigestFetchedEnough(fetch, size, "peerDigestSwapInMask")) { stmemNodeUnref(&nr); return; } pd = fetch->pd; assert(pd->cd && pd->cd->mask); /* Copy data into the peer digest mask */ if (size > 0) { assert(size + fetch->mask_offset < pd->cd->mask_size); memcpy(pd->cd->mask + fetch->mask_offset, buf, size); } stmemNodeUnref(&nr); fetch->offset += size; fetch->mask_offset += size; if (fetch->mask_offset >= pd->cd->mask_size) { debug(72, 2) ("peerDigestSwapInMask: Done! Got %" PRINTF_OFF_T ", expected %d\n", fetch->mask_offset, pd->cd->mask_size); assert(fetch->mask_offset == pd->cd->mask_size); assert(peerDigestFetchedEnough(fetch, 0, "peerDigestSwapInMask")); } else { const size_t buf_sz = pd->cd->mask_size - fetch->mask_offset; assert(buf_sz > 0); storeClientRef(fetch->sc, fetch->entry, fetch->offset, fetch->offset, buf_sz, peerDigestSwapInMask, fetch); } }
/* fetch headers from disk, pass on to SwapInCBlock */ static void peerDigestSwapInHeaders(void *data, mem_node_ref nr, ssize_t size) { DigestFetchState *fetch = data; assert(size <= nr.node->len - nr.offset); if (peerDigestFetchedEnough(fetch, size, "peerDigestSwapInHeaders")) goto finish; assert(!fetch->offset); assert(fetch->entry->mem_obj->reply); if (fetch->entry->mem_obj->reply->sline.status != HTTP_OK) { debug(72, 1) ("peerDigestSwapInHeaders: %s status %d got cached!\n", strBuf(fetch->pd->host), fetch->entry->mem_obj->reply->sline.status); peerDigestFetchAbort(fetch, "internal status error"); goto finish; } fetch->offset = fetch->entry->mem_obj->reply->hdr_sz; fetch->buf = memAllocate(MEM_4K_BUF); storeClientRef(fetch->sc, fetch->entry, fetch->offset, fetch->offset, SM_PAGE_SIZE, peerDigestSwapInCBlock, fetch); finish: stmemNodeUnref(&nr); }
static void asHandleReply(void *data, mem_node_ref nr, ssize_t size) { ASState *asState = data; StoreEntry *e = asState->entry; char *s; char *t; LOCAL_ARRAY(char, buf, SM_PAGE_SIZE); debug(53, 3) ("asHandleReply: Called with size=%ld\n", (long int) size); if (EBIT_TEST(e->flags, ENTRY_ABORTED)) { stmemNodeUnref(&nr); asStateFree(asState); return; } if (size == 0 && e->mem_obj->inmem_hi > 0) { asStateFree(asState); stmemNodeUnref(&nr); return; } else if (size < 0) { debug(53, 1) ("asHandleReply: Called with size=%ld\n", (long int) size); asStateFree(asState); stmemNodeUnref(&nr); return; } else if (HTTP_OK != e->mem_obj->reply->sline.status) { debug(53, 1) ("WARNING: AS %d whois request failed\n", asState->as_number); stmemNodeUnref(&nr); asStateFree(asState); return; } assert((nr.offset + size) < SM_PAGE_SIZE); memcpy(buf, nr.node->data + nr.offset, size); stmemNodeUnref(&nr); s = buf; while (s - buf < size && *s != '\0') { while (*s && xisspace(*s)) s++; for (t = s; *t; t++) { if (xisspace(*t)) break; } if (*t == '\0') { /* oof, word should continue on next block */ break; } *t = '\0'; debug(53, 3) ("asHandleReply: AS# %s (%d)\n", s, asState->as_number); asnAddNet(s, asState->as_number); s = t + 1; } asState->seen = asState->offset + size; asState->offset += (s - buf); debug(53, 3) ("asState->seen = %ld, asState->offset = %ld\n", (long int) asState->seen, (long int) asState->offset); if (e->store_status == STORE_PENDING) { debug(53, 3) ("asHandleReply: store_status == STORE_PENDING: %s\n", storeUrl(e)); storeClientRef(asState->sc, e, asState->seen, asState->offset, SM_PAGE_SIZE, asHandleReply, asState); } else if (asState->seen < e->mem_obj->inmem_hi) { debug(53, 3) ("asHandleReply: asState->seen < e->mem_obj->inmem_hi %s\n", storeUrl(e)); storeClientRef(asState->sc, e, asState->seen, asState->offset, SM_PAGE_SIZE, asHandleReply, asState); } else { debug(53, 3) ("asHandleReply: Done: %s\n", storeUrl(e)); asStateFree(asState); } }
static void netdbExchangeHandleReply(void *data, mem_node_ref nr, ssize_t size) { const char *buf = nr.node->data + nr.offset; netdbExchangeState *ex = data; int rec_sz = 0; ssize_t o; struct in_addr addr; double rtt; double hops; const char *p; int j; HttpReply *rep; size_t hdr_sz; int nused = 0; assert(size <= nr.node->len - nr.offset); rec_sz = 0; rec_sz += 1 + sizeof(addr.s_addr); rec_sz += 1 + sizeof(int); rec_sz += 1 + sizeof(int); ex->seen = ex->used + size; debug(38, 3) ("netdbExchangeHandleReply: %d bytes\n", (int) size); if (!cbdataValid(ex->p)) { debug(38, 3) ("netdbExchangeHandleReply: Peer became invalid\n"); netdbExchangeDone(ex); goto finish; } debug(38, 3) ("netdbExchangeHandleReply: for %s'\n", ex->p->name); p = buf; if (0 == ex->used) { /* skip reply headers */ rep = ex->e->mem_obj->reply; hdr_sz = rep->hdr_sz; debug(38, 5) ("netdbExchangeHandleReply: hdr_sz = %ld\n", (long int) hdr_sz); debug(38, 3) ("netdbExchangeHandleReply: reply status %d\n", rep->sline.status); if (HTTP_OK != rep->sline.status) { netdbExchangeDone(ex); goto finish; } ex->used += hdr_sz; if (size < hdr_sz) { size -= hdr_sz; p += hdr_sz; } else { size = 0; } } debug(38, 5) ("netdbExchangeHandleReply: start parsing loop, size = %ld\n", (long int) size); while (size >= rec_sz) { debug(38, 5) ("netdbExchangeHandleReply: in parsing loop, size = %ld\n", (long int) size); addr.s_addr = any_addr.s_addr; hops = rtt = 0.0; for (o = 0; o < rec_sz;) { switch ((int) *(p + o)) { case NETDB_EX_NETWORK: o++; xmemcpy(&addr.s_addr, p + o, sizeof(addr.s_addr)); o += sizeof(addr.s_addr); break; case NETDB_EX_RTT: o++; xmemcpy(&j, p + o, sizeof(int)); o += sizeof(int); rtt = (double) ntohl(j) / 1000.0; break; case NETDB_EX_HOPS: o++; xmemcpy(&j, p + o, sizeof(int)); o += sizeof(int); hops = (double) ntohl(j) / 1000.0; break; default: debug(38, 1) ("netdbExchangeHandleReply: corrupt data, aborting\n"); netdbExchangeDone(ex); goto finish; } } if (addr.s_addr != any_addr.s_addr && rtt > 0) netdbExchangeUpdatePeer(addr, ex->p, rtt, hops); assert(o == rec_sz); ex->used += rec_sz; size -= rec_sz; p += rec_sz; /* * This is a fairly cpu-intensive loop, break after adding * just a few */ if (++nused == 20) break; } debug(38, 3) ("netdbExchangeHandleReply: used %d entries, (x %d bytes) == %d bytes total\n", nused, rec_sz, nused * rec_sz); debug(38, 3) ("netdbExchangeHandleReply: seen %ld, used %ld\n", (long int) ex->seen, (long int) ex->used); if (EBIT_TEST(ex->e->flags, ENTRY_ABORTED)) { debug(38, 3) ("netdbExchangeHandleReply: ENTRY_ABORTED\n"); netdbExchangeDone(ex); } else if (ex->e->store_status == STORE_PENDING) { debug(38, 3) ("netdbExchangeHandleReply: STORE_PENDING\n"); storeClientRef(ex->sc, ex->e, ex->seen, ex->used, SM_PAGE_SIZE, netdbExchangeHandleReply, ex); } else if (ex->seen < ex->e->mem_obj->inmem_hi) { debug(38, 3) ("netdbExchangeHandleReply: ex->e->mem_obj->inmem_hi\n"); storeClientRef(ex->sc, ex->e, ex->seen, ex->used, SM_PAGE_SIZE, netdbExchangeHandleReply, ex); } else { debug(38, 3) ("netdbExchangeHandleReply: Done\n"); netdbExchangeDone(ex); } finish: buf = NULL; stmemNodeUnref(&nr); }
/* wait for full http headers to be received */ static void peerDigestFetchReply(void *data, mem_node_ref nr, ssize_t size) { const char *buf = nr.node->data + nr.offset; DigestFetchState *fetch = data; PeerDigest *pd = fetch->pd; http_status status; HttpReply *reply; assert(pd && buf); assert(!fetch->offset); assert(size <= nr.node->len - nr.offset); if (peerDigestFetchedEnough(fetch, size, "peerDigestFetchReply")) goto finish; reply = fetch->entry->mem_obj->reply; assert(reply); status = reply->sline.status; debug(72, 3) ("peerDigestFetchReply: %s status: %d, expires: %ld (%+d)\n", strBuf(pd->host), status, (long int) reply->expires, (int) (reply->expires - squid_curtime)); /* this "if" is based on clientHandleIMSReply() */ if (status == HTTP_NOT_MODIFIED) { request_t *r = NULL; /* our old entry is fine */ assert(fetch->old_entry); if (!fetch->old_entry->mem_obj->request) fetch->old_entry->mem_obj->request = r = requestLink(fetch->entry->mem_obj->request); assert(fetch->old_entry->mem_obj->request); httpReplyUpdateOnNotModified(fetch->old_entry->mem_obj->reply, reply); storeTimestampsSet(fetch->old_entry); /* get rid of 304 reply */ storeClientUnregister(fetch->sc, fetch->entry, fetch); storeUnlockObject(fetch->entry); /* And prepare to swap in old entry if needed */ fetch->entry = fetch->old_entry; fetch->old_entry = NULL; fetch->sc = fetch->old_sc; fetch->old_sc = NULL; /* preserve request -- we need its size to update counters */ /* requestUnlink(r); */ /* fetch->entry->mem_obj->request = NULL; */ } else if (status == HTTP_OK) { /* get rid of old entry if any */ if (fetch->old_entry) { debug(72, 3) ("peerDigestFetchReply: got new digest, releasing old one\n"); storeClientUnregister(fetch->old_sc, fetch->old_entry, fetch); storeReleaseRequest(fetch->old_entry); storeUnlockObject(fetch->old_entry); fetch->old_entry = NULL; } } else { /* some kind of a bug */ peerDigestFetchAbort(fetch, httpStatusLineReason(&reply->sline)); goto finish; } /* must have a ready-to-use store entry if we got here */ /* can we stay with the old in-memory digest? */ if (status == HTTP_NOT_MODIFIED && fetch->pd->cd) peerDigestFetchStop(fetch, "Not modified"); else storeClientRef(fetch->sc, fetch->entry, /* have to swap in */ 0, 0, SM_PAGE_SIZE, peerDigestSwapInHeaders, fetch); finish: stmemNodeUnref(&nr); }
/* ask store for a digest */ static void peerDigestRequest(PeerDigest * pd) { peer *p = pd->peer; StoreEntry *e, *old_e; char *url; const cache_key *key; request_t *req; DigestFetchState *fetch = NULL; pd->req_result = NULL; pd->flags.requested = 1; /* compute future request components */ if (p->digest_url) url = xstrdup(p->digest_url); else url = internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", StoreDigestFileName); req = urlParse(urlMethodGetKnownByCode(METHOD_GET), url); assert(req); key = storeKeyPublicByRequest(req); debug(72, 2) ("peerDigestRequest: %s key: %s\n", url, storeKeyText(key)); /* add custom headers */ assert(!req->header.len); httpHeaderPutStr(&req->header, HDR_ACCEPT, StoreDigestMimeStr); httpHeaderPutStr(&req->header, HDR_ACCEPT, "text/html"); if (p->login) xstrncpy(req->login, p->login, MAX_LOGIN_SZ); /* create fetch state structure */ CBDATA_INIT_TYPE(DigestFetchState); fetch = cbdataAlloc(DigestFetchState); fetch->request = requestLink(req); fetch->pd = pd; fetch->offset = 0; /* update timestamps */ fetch->start_time = squid_curtime; pd->times.requested = squid_curtime; pd_last_req_time = squid_curtime; req->flags.cachable = 1; /* the rest is based on clientProcessExpired() */ req->flags.refresh = 1; old_e = fetch->old_entry = storeGet(key); if (old_e) { debug(72, 5) ("peerDigestRequest: found old entry\n"); storeLockObject(old_e); storeCreateMemObject(old_e, url); fetch->old_sc = storeClientRegister(old_e, fetch); } e = fetch->entry = storeCreateEntry(url, req->flags, req->method); assert(EBIT_TEST(e->flags, KEY_PRIVATE)); fetch->sc = storeClientRegister(e, fetch); /* set lastmod to trigger IMS request if possible */ if (old_e) e->lastmod = old_e->lastmod; /* push towards peer cache */ debug(72, 3) ("peerDigestRequest: forwarding to fwdStart...\n"); fwdStart(-1, e, req); cbdataLock(fetch->pd); storeClientRef(fetch->sc, e, 0, 0, SM_PAGE_SIZE, peerDigestFetchReply, fetch); }