static void
storeClientCopy2(StoreEntry * e, store_client * sc)
{
    if (sc->flags.copy_event_pending)
	return;
    if (EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
	debug(20, 5) ("storeClientCopy2: returning because ENTRY_FWD_HDR_WAIT set\n");
	return;
    }
    if (sc->flags.store_copying) {
	sc->flags.copy_event_pending = 1;
	debug(20, 3) ("storeClientCopy2: Queueing storeClientCopyEvent()\n");
	eventAdd("storeClientCopyEvent", storeClientCopyEvent, sc, 0.0, 0);
	return;
    }
    cbdataLock(sc);		/* ick, prevent sc from getting freed */
    sc->flags.store_copying = 1;
    debug(20, 3) ("storeClientCopy2: %s\n", storeKeyText(e->hash.key));
    assert(sc->callback != NULL);
    /*
     * We used to check for ENTRY_ABORTED here.  But there were some
     * problems.  For example, we might have a slow client (or two) and
     * the server-side is reading far ahead and swapping to disk.  Even
     * if the server-side aborts, we want to give the client(s)
     * everything we got before the abort condition occurred.
     */
    storeClientCopy3(e, sc);
    sc->flags.store_copying = 0;
    cbdataUnlock(sc);		/* ick, allow sc to be freed */
}
示例#2
0
void
authenticateProxyUserCacheCleanup(void *datanotused)
{
    /*
     * We walk the hash by username as that is the unique key we use.
     * For big hashs we could consider stepping through the cache, 100/200
     * entries at a time. Lets see how it flys first.
     */
    auth_user_hash_pointer *usernamehash;
    auth_user_t *auth_user;
    char *username = NULL;
    debug(29, 3) ("authenticateProxyUserCacheCleanup: Cleaning the user cache now\n");
    debug(29, 3) ("authenticateProxyUserCacheCleanup: Current time: %ld\n", (long int) current_time.tv_sec);
    hash_first(proxy_auth_username_cache);
    while ((usernamehash = ((auth_user_hash_pointer *) hash_next(proxy_auth_username_cache)))) {
	auth_user = usernamehash->auth_user;
	username = authenticateUserUsername(auth_user);

	/* if we need to have inpedendent expiry clauses, insert a module call
	 * here */
	debug(29, 4) ("authenticateProxyUserCacheCleanup: Cache entry:\n\tType: %d\n\tUsername: %s\n\texpires: %ld\n\treferences: %ld\n", auth_user->auth_type, username, (long int) (auth_user->expiretime + Config.authenticateTTL), (long int) auth_user->references);
	if (auth_user->expiretime + Config.authenticateTTL <= current_time.tv_sec) {
	    debug(29, 5) ("authenticateProxyUserCacheCleanup: Removing user %s from cache due to timeout.\n", username);
	    /* the minus 1 accounts for the cache lock */
	    if ((authenticateAuthUserInuse(auth_user) - 1))
		debug(29, 4) ("authenticateProxyUserCacheCleanup: this cache entry has expired AND has a non-zero ref count.\n");
	    else
		authenticateAuthUserUnlock(auth_user);
	}
    }
    debug(29, 3) ("authenticateProxyUserCacheCleanup: Finished cleaning the user cache.\n");
    eventAdd("User Cache Maintenance", authenticateProxyUserCacheCleanup, NULL, Config.authenticateGCInterval, 1);
}
示例#3
0
static void
authenticateDigestNonceCacheCleanup(void *data)
{
    /*
     * We walk the hash by nonceb64 as that is the unique key we
     * use.  For big hash tables we could consider stepping through
     * the cache, 100/200 entries at a time. Lets see how it flies
     * first.
     */
    digest_nonce_h *nonce;
    debug(29, 3) ("authenticateDigestNonceCacheCleanup: Cleaning the nonce cache now\n");
    debug(29, 3) ("authenticateDigestNonceCacheCleanup: Current time: %ld\n",
	(long int) current_time.tv_sec);
    hash_first(digest_nonce_cache);
    while ((nonce = ((digest_nonce_h *) hash_next(digest_nonce_cache)))) {
	debug(29, 3) ("authenticateDigestNonceCacheCleanup: nonce entry  : %p '%s'\n", nonce, (char *) nonce->hash.key);
	debug(29, 4) ("authenticateDigestNonceCacheCleanup: Creation time: %ld\n", (long int) nonce->noncedata.creationtime);
	if (authDigestNonceIsStale(nonce)) {
	    debug(29, 4) ("authenticateDigestNonceCacheCleanup: Removing nonce %s from cache due to timeout.\n", (char *) nonce->hash.key);
	    assert(nonce->flags.incache);
	    /* invalidate nonce so future requests fail */
	    nonce->flags.valid = 0;
	    /* if it is tied to a auth_user, remove the tie */
	    authDigestNonceUserUnlink(nonce);
	    authDigestNoncePurge(nonce);
	}
    }
    debug(29, 3) ("authenticateDigestNonceCacheCleanup: Finished cleaning the nonce cache.\n");
    if (authenticateDigestActive())
	eventAdd("Digest none cache maintenance", authenticateDigestNonceCacheCleanup, NULL, digestConfig->nonceGCInterval, 1);
}
static void
storeNullDirInit(SwapDir * sd)
{
    store_dirs_rebuilding++;
    eventAdd("storeNullDirRebuildComplete", storeNullDirRebuildComplete,
	NULL, 0.0, 1);
}
示例#5
0
void AsyncSocket::reconnectLoop()
{
	while (true)
	{
		try
		{
			connect();
			return;
		}
		catch (SocketException *e)
		{
			eventAdd(SocketEvent::TYPE_CONNECTION_FAILED, e->getDescription());
			eventAdd(SocketEvent::TYPE_WAITING_RECONNECT);
			sleep(myReconnectTimeout);
		}
	}
}
示例#6
0
/* registers next digest verification */
static void
peerDigestSetCheck(PeerDigest * pd, time_t delay)
{
    eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1);
    pd->times.next_check = squid_curtime + delay;
    debug(72, 3) ("peerDigestSetCheck: will check peer %s in %d secs\n",
	strBuf(pd->host), delay);
}
示例#7
0
static void
storeCleanup(void *datanotused)
{
    static int bucketnum = -1;
    static int validnum = 0;
    static int store_errors = 0;
    int validnum_start;
    StoreEntry *e;
    hash_link *link_ptr = NULL;
    hash_link *link_next = NULL;
    int limit = opt_foreground_rebuild ? 1 << 30 : 500;
    validnum_start = validnum;

    while (validnum - validnum_start < limit) {
	if (++bucketnum >= store_hash_buckets) {
	    debug(20, 1) ("  Completed Validation Procedure\n");
	    debug(20, 1) ("  Validated %d Entries\n", validnum);
	    debug(20, 1) ("  store_swap_size = %dk\n", store_swap_size);
	    store_dirs_rebuilding--;
	    assert(0 == store_dirs_rebuilding);
	    if (opt_store_doublecheck)
		assert(store_errors == 0);
	    if (store_digest)
		storeDigestNoteStoreReady();
	    return;
	}
	link_next = hash_get_bucket(store_table, bucketnum);
	while (NULL != (link_ptr = link_next)) {
	    link_next = link_ptr->next;
	    e = (StoreEntry *) link_ptr;
	    if (EBIT_TEST(e->flags, ENTRY_VALIDATED))
		continue;
	    /*
	     * Calling storeRelease() has no effect because we're
	     * still in 'store_rebuilding' state
	     */
	    if (e->swap_filen < 0)
		continue;
	    if (opt_store_doublecheck)
		if (storeCleanupDoubleCheck(e))
		    store_errors++;
	    EBIT_SET(e->flags, ENTRY_VALIDATED);
	    /*
	     * Only set the file bit if we know its a valid entry
	     * otherwise, set it in the validation procedure
	     */
	    storeDirUpdateSwapSize(&Config.cacheSwap.swapDirs[e->swap_dirn], e->swap_file_sz, 1);
	    /* Get rid of private objects. Not useful */
	    if (EBIT_TEST(e->flags, KEY_PRIVATE))
		storeRelease(e);
	    if ((++validnum & 0x3FFFF) == 0)
		debug(20, 1) ("  %7d Entries Validated so far.\n", validnum);
	}
    }
    eventAdd("storeCleanup", storeCleanup, NULL, 0.0, 1);
}
示例#8
0
static void
idnsTickleQueue(void)
{
    if (event_queued)
	return;
    if (NULL == lru_list.tail)
	return;
    eventAdd("idnsCheckQueue", idnsCheckQueue, NULL, 1.0, 1);
    event_queued = 1;
}
示例#9
0
void
delayInitDelayData(unsigned short pools)
{
    if (!pools)
	return;
    delay_data = xcalloc(pools, sizeof(*delay_data));
    memory_used += sizeof(*delay_data);
    eventAdd("delayPoolsUpdate", delayPoolsUpdate, NULL, 1.0, 1);
    delay_id_ptr_hash = hash_create(delayIdPtrHashCmp, 256, delayIdPtrHash);
}
示例#10
0
void
authenticateInitUserCache(void)
{
    if (!proxy_auth_username_cache) {
	/* First time around, 7921 should be big enough */
	proxy_auth_username_cache =
	    hash_create((HASHCMP *) strcmp, 7921, hash_string);
	assert(proxy_auth_username_cache);
	eventAdd("User Cache Maintenance", authenticateProxyUserCacheCleanup, NULL, Config.authenticateGCInterval, 1);
    }
}
示例#11
0
static void
peerCountMcastPeersSchedule(peer * p, time_t when)
{
    if (p->mcast.flags.count_event_pending)
	return;
    eventAdd("peerCountMcastPeersStart",
	peerCountMcastPeersStart,
	p,
	(double) when, 1);
    p->mcast.flags.count_event_pending = 1;
}
示例#12
0
static void
authenticateDigestNonceSetup(void)
{
    if (!digest_nonce_pool)
	digest_nonce_pool = memPoolCreate("Digest Scheme nonce's", sizeof(digest_nonce_h));
    if (!digest_nonce_cache) {
	digest_nonce_cache = hash_create((HASHCMP *) strcmp, 7921, hash_string);
	assert(digest_nonce_cache);
	eventAdd("Digest none cache maintenance", authenticateDigestNonceCacheCleanup, NULL, digestConfig->nonceGCInterval, 1);
    }
}
示例#13
0
static void
clientdbGC(void *unused)
{
    static int bucket = 0;
    hash_link *link_next;

    link_next = hash_get_bucket(client_table, bucket++);
    while (link_next != NULL) {
	ClientInfo *c = (ClientInfo *) link_next;
	int age = squid_curtime - c->last_seen;
	link_next = link_next->next;
	if (c->n_established)
	    continue;

	if (age < 24 * 3600 && c->Http.n_requests > 100)
	    continue;
	if (age < 4 * 3600 && (c->Http.n_requests > 10 || c->Icp.n_requests > 10))
	    continue;
	if (age < 5 * 60 && (c->Http.n_requests > 1 || c->Icp.n_requests > 1))
	    continue;
	if (age < 60)
	    continue;
	hash_remove_link(client_table, &c->hash);
	clientdbFreeItem(c);
	statCounter.client_http.clients--;
	cleanup_removed++;
    }

    if (bucket < CLIENT_DB_HASH_SIZE)
	eventAdd("client_db garbage collector", clientdbGC, NULL, 0.15, 0);
    else {
	bucket = 0;
	cleanup_running = 0;
	max_clients = statCounter.client_http.clients * 3 / 2;
	if (!cleanup_scheduled) {
	    cleanup_scheduled = 1;
	    eventAdd("client_db garbage collector", clientdbScheduledGC, NULL, 3 * 3600, 0);
	}
	debug(49, 2) ("clientdbGC: Removed %d entries\n", cleanup_removed);
    }
}
示例#14
0
/* called be Rewrite to push Rebuild forward */
static void
storeDigestRebuildResume(void)
{
    assert(sd_state.rebuild_lock);
    assert(!sd_state.rewrite_lock);
    sd_state.rebuild_offset = 0;
    /* resize or clear */
    if (!storeDigestResize())
	cacheDigestClear(store_digest);		/* not clean()! */
    memset(&sd_stats, 0, sizeof(sd_stats));
    eventAdd("storeDigestRebuildStep", storeDigestRebuildStep, NULL, 0.0, 1);
}
示例#15
0
void
start_announce(void *datanotused)
{
    void *junk;
    if (0 == Config.onoff.announce)
	return;
    if (theOutIcpConnection < 0)
	return;
    cbdataAdd(junk = xmalloc(1), cbdataXfree, 0);
    ipcache_nbgethostbyname(Config.Announce.host, send_announce, junk);
    eventAdd("send_announce", start_announce, NULL, (double) Config.Announce.period, 1);
}
示例#16
0
/* same as eventAdd but adds a random offset within +-1/3 of delta_ish */
void
eventAddIsh(const char *name, EVH * func, void *arg, double delta_ish, int weight)
{
    if (delta_ish >= 3.0) {
	const double two_third = (2.0 * delta_ish) / 3.0;
	delta_ish = two_third + (drand48() * two_third);
	/*
	 * I'm sure drand48() isn't portable.  Tell me what function
	 * you have that returns a random double value in the range 0,1.
	 */
    }
    eventAdd(name, func, arg, delta_ish, weight);
}
示例#17
0
/* finishes swap out sequence for the digest; schedules next rebuild */
static void
storeDigestRebuildFinish(void)
{
    assert(sd_state.rebuild_lock);
    sd_state.rebuild_lock = 0;
    sd_state.rebuild_count++;
    debug(71, 2) ("storeDigestRebuildFinish: done.\n");
    eventAdd("storeDigestRebuildStart", storeDigestRebuildStart, NULL, (double)
	Config.digest.rebuild_period, 1);
    /* resume pending Rewrite if any */
    if (sd_state.rewrite_lock)
	storeDigestRewriteResume();
}
示例#18
0
static void
peerMonitorCompleted(PeerMonitor * pm)
{
    int state = PEER_ALIVE;
    peer *p = pm->peer;
    storeClientUnregister(pm->running.sc, pm->running.e, pm);
    storeUnlockObject(pm->running.e);
    requestUnlink(pm->running.req);
    memFree(pm->running.buf, MEM_4K_BUF);
    if (pm->running.timeout_set) {
	eventDelete(peerMonitorTimeout, pm);
	pm->running.timeout_set = 0;
    }
    if (!cbdataValid(pm->peer)) {
	cbdataFree(pm);
	return;
    }
    /* Figure out if the response was OK or not */
    if (pm->running.status != HTTP_OK) {
	debug(DBG, 1) ("peerMonitor %s: Failed, status != 200 (%d)\n",
	    p->name, pm->running.status);
	state = PEER_DEAD;
    } else if (pm->running.size < p->monitor.min) {
	debug(DBG, 1) ("peerMonitor %s: Failed, reply size %d < min %d\n",
	    p->name, pm->running.size, p->monitor.min);
	state = PEER_DEAD;
    } else if (pm->running.size > p->monitor.max && p->monitor.max > 0) {
	debug(DBG, 1) ("peerMonitor %s: Failed, reply size %d > max %d\n",
	    p->name, pm->running.size, p->monitor.max);
	state = PEER_DEAD;
    } else {
	debug(DBG, 2) ("peerMonitor %s: OK\n", p->name);
    }
    p->monitor.state = state;
    if (state != p->stats.logged_state) {
	switch (state) {
	case PEER_ALIVE:
	    debug(DBG, 1) ("Detected REVIVED %s: %s\n",
		neighborTypeStr(p), p->name);
	    peerClearRR();
	    break;
	case PEER_DEAD:
	    debug(DBG, 1) ("Detected DEAD %s: %s\n",
		neighborTypeStr(p), p->name);
	    break;
	}
	p->stats.logged_state = state;
    }
    memset(&pm->running, 0, sizeof(pm->running));
    eventAdd(pm->name, peerMonitorRequest, pm, (double) (pm->last_probe + pm->peer->monitor.interval - current_dtime), 1);
}
示例#19
0
static void
wccpHereIam(void *voidnotused)
{
    debug(80, 6) ("wccpHereIam: Called\n");

    wccp_here_i_am.id = wccp_i_see_you.id;
    send(theOutWccpConnection,
	&wccp_here_i_am,
	sizeof(wccp_here_i_am),
	0);

    if (!eventFind(wccpHereIam, NULL))
	eventAdd("wccpHereIam", wccpHereIam, NULL, 10.0, 1);
}
示例#20
0
void
wccpInit(void)
{
    debug(80, 5) ("wccpInit: Called\n");
    memset(&wccp_here_i_am, '\0', sizeof(wccp_here_i_am));
    wccp_here_i_am.type = htonl(WCCP_HERE_I_AM);
    wccp_here_i_am.version = htonl(Config.Wccp.version);
    wccp_here_i_am.revision = htonl(WCCP_REVISION);
    change = 0;
    last_assign_buckets_change = 0;
    if (Config.Wccp.router.s_addr != any_addr.s_addr)
	if (!eventFind(wccpHereIam, NULL))
	    eventAdd("wccpHereIam", wccpHereIam, NULL, 5.0, 1);
}
示例#21
0
/* 
 * void eventAddIsh(const char *name, EVH *func, void *arg, time_t delta_isa)
 *
 * Input: Name of event, function to call, arguments to pass, and frequency
 *	  of the event.
 * Output: None
 * Side Effects: Adds the event to the event list within +- 1/3 of the
 *	         specified frequency.
 */
void
eventAddIsh(const char *name, EVH * func, void *arg, time_t delta_ish)
{
	if(delta_ish >= 3.0)
	{
		const time_t two_third = (2 * delta_ish) / 3;
		delta_ish = two_third + ((rand() % 1000) * two_third) / 1000;
		/*
		 * XXX I hate the above magic, I don't even know if its right.
		 * Grr. -- adrian
		 */
	}
	eventAdd(name, func, arg, delta_ish);
}
示例#22
0
文件: balloc.c 项目: mdharris/ircd
/*! \brief Opens /dev/zero and saves the file handle for
 * future allocations.
 */
void
initBlockHeap(void)
{
#ifdef HAVE_MMAP
#ifndef MAP_ANON
  int zero_fd = open("/dev/zero", O_RDWR);

  if (zero_fd < 0)
    outofmemory();
  fd_open(&dpfd, zero_fd, 0, "Anonymous mmap()");
#endif
  eventAdd("heap_garbage_collection", &heap_garbage_collection, NULL, 119);
#endif
}
示例#23
0
static ClientInfo *
clientdbAdd(struct in_addr addr)
{
    ClientInfo *c;
    c = memAllocate(MEM_CLIENT_INFO);
    c->hash.key = xstrdup(xinet_ntoa(addr));
    c->addr = addr;
    hash_join(client_table, &c->hash);
    statCounter.client_http.clients++;
    if ((statCounter.client_http.clients > max_clients) && !cleanup_running && cleanup_scheduled < 2) {
	cleanup_scheduled++;
	eventAdd("client_db garbage collector", clientdbScheduledGC, NULL, 90, 0);
    }
    return c;
}
示例#24
0
void 
connect_server()
{
  struct Client *client = make_client(NULL);
  struct Server *server = make_server(client);
  struct Module *protomod;
  char modes[MODEBUFLEN+1] = "";
  int i, j = 0;

  protomod = find_module(Connect.protocol, NO);
  if(protomod == NULL)
  {
    ilog(L_CRIT, "Unable to connect to uplink, protocol module %s not found.",
        Connect.protocol);
    services_die("Connect error", NO);
  }

  ServerModeList = (struct ModeList *)modsym(protomod->handle, "ModeList");
  for(i = 0; ServerModeList[i].letter != '\0'; i++)
  {
    modes[j++] = ServerModeList[i].letter;
    if(j > MODEBUFLEN)
      break;
  }
  modes[j] = '\0';

  ilog(L_DEBUG, "Loaded server mode list %p %s %d", ServerModeList, modes, j);

  strlcpy(server->pass, Connect.password, sizeof(server->pass));
  strlcpy(client->name, Connect.name, sizeof(client->name));
  strlcpy(client->host, Connect.host, sizeof(client->host));

  SetConnecting(client);
  client->from = client;
    
  dlinkAdd(client, &client->node, &global_client_list);

  if(comm_open(&server->fd, AF_INET, SOCK_STREAM, 0, NULL) < 0)
  {
    ilog(L_CRIT, "connect_server: Could not open socket");
    exit(1);
  }

  comm_connect_tcp(&server->fd, Connect.host, Connect.port,
      NULL, 0, serv_connect_callback, client, AF_INET, CONNECTTIMEOUT);

  eventAdd("Server connection check", try_reconnect, NULL, 60);
}
示例#25
0
/* meta data recreated from disk image in swap directory */
void
storeRebuildComplete(struct _store_rebuild_data *dc)
{
    double dt;
    counts.objcount += dc->objcount;
    counts.expcount += dc->expcount;
    counts.scancount += dc->scancount;
    counts.clashcount += dc->clashcount;
    counts.dupcount += dc->dupcount;
    counts.cancelcount += dc->cancelcount;
    counts.invalid += dc->invalid;
    counts.badflags += dc->badflags;
    counts.bad_log_op += dc->bad_log_op;
    counts.zero_object_sz += dc->zero_object_sz;
    /*
     * When store_dirs_rebuilding == 1, it means we are done reading
     * or scanning all cache_dirs.  Now report the stats and start
     * the validation (storeCleanup()) thread.
     */
    if (store_dirs_rebuilding > 1)
	return;
    dt = tvSubDsec(rebuild_start, current_time);
    debug(20, 1) ("Finished rebuilding storage from disk.\n");
    debug(20, 1) ("  %7d Entries scanned\n", counts.scancount);
    debug(20, 1) ("  %7d Invalid entries.\n", counts.invalid);
    debug(20, 1) ("  %7d With invalid flags.\n", counts.badflags);
    debug(20, 1) ("  %7d Objects loaded.\n", counts.objcount);
    debug(20, 1) ("  %7d Objects expired.\n", counts.expcount);
    debug(20, 1) ("  %7d Objects cancelled.\n", counts.cancelcount);
    debug(20, 1) ("  %7d Duplicate URLs purged.\n", counts.dupcount);
    debug(20, 1) ("  %7d Swapfile clashes avoided.\n", counts.clashcount);
    debug(20, 1) ("  Took %3.1f seconds (%6.1f objects/sec).\n", dt,
	(double) counts.objcount / (dt > 0.0 ? dt : 1.0));
    debug(20, 1) ("Beginning Validation Procedure\n");
    eventAdd("storeCleanup", storeCleanup, NULL, 0.0, 1);
    safe_free(RebuildProgress);

    //Kim Taehee added start
    //initializing YouTubeChunkTable
    youtubeTable.head = NULL;
    youtubeTable.size = 0;


    readChunkFromTableFile();
    //Kim Taehee added end
}
示例#26
0
void AsyncSocket::run()
{
//cout << "AsyncSocket::run - Thread started\n";
	// If connection is already up then we should not connect again
	if (!isConnected())
	{
		// If we have choosen to not use automatic reconnect do not start reconnect loop
		if (myReconnectTimeout == 0)
		{
			// Connect to somewhere
			connect();
		}
		else
		{
			// Start the reconnect loop
			reconnectLoop();
		}
	}

	bool loop = true;
	try
	{
		while (loop)
		{
			// If we have triggered a forced reconnect do it here
			if (myForceReconnect)
			{
				myForceReconnect = false;
				reconnectLoop();
			}

			// Receive data
			loop = receiveData();
		}
	}
	catch (SocketException *e)
	{
		cout << "DEBUG: socket got an exception: " << e->getDescription() << endl;
		// Something bad happend and we can not continue
		eventAdd(SocketEvent::TYPE_CONNECTION_DIED, e->getDescription());
	}

//cout << "AsyncSocket::run - Thread end?\n";
	// Clean up socket if we would want to restart
	silentClose();
}
示例#27
0
static void
peerCountMcastPeersStart(void *data)
{
    peer *p = data;
    ps_state *psstate;
    StoreEntry *fake;
    MemObject *mem;
    icp_common_t *query;
    int reqnum;
    method_t *method_get;
    LOCAL_ARRAY(char, url, MAX_URL);
    assert(p->type == PEER_MULTICAST);
    method_get = urlMethodGetKnownByCode(METHOD_GET);
    p->mcast.flags.count_event_pending = 0;
    snprintf(url, MAX_URL, "http://%s/", inet_ntoa(p->in_addr.sin_addr));
    fake = storeCreateEntry(url, null_request_flags, method_get);
    psstate = cbdataAlloc(ps_state);
    psstate->request = requestLink(urlParse(method_get, url));
    psstate->entry = fake;
    psstate->callback = NULL;
    psstate->callback_data = p;
    cbdataLock(psstate->callback_data);
    psstate->ping.start = current_time;
    mem = fake->mem_obj;
    mem->request = requestLink(psstate->request);
    mem->start_ping = current_time;
    mem->ping_reply_callback = peerCountHandleIcpReply;
    mem->ircb_data = psstate;
    mcastSetTtl(theOutIcpConnection, p->mcast.ttl);
    p->mcast.id = mem->id;
    reqnum = icpSetCacheKey(fake->hash.key);
    query = icpCreateMessage(ICP_QUERY, 0, url, reqnum, 0);
    icpUdpSend(theOutIcpConnection,
	&p->in_addr,
	query,
	LOG_ICP_QUERY,
	0);
    fake->ping_status = PING_WAITING;
    eventAdd("peerCountMcastPeersDone",
	peerCountMcastPeersDone,
	psstate,
	Config.Timeout.mcast_icp_query / 1000.0, 1);
    p->mcast.flags.counting = 1;
    peerCountMcastPeersSchedule(p, MCAST_COUNT_RATE);
}
示例#28
0
static void m3u8Event(void *args)
{
    if(!is_m3u8_prefetch_alive()){
        debug(207,3)("m3u8_prefetch dont run,now starting it!\n");
        enter_suid();

        int cid = fork();
        if (cid == 0) {
            int ret = execl("/usr/local/squid/bin/m3u8_prefetch", "m3u8_prefetch","15101",(char *)0);
            if (ret < 0) {
                debug(207,3)("(m3u8) --> execl error : %s\n",xstrerror());
            }
            exit(-1);
        }
        leave_suid();
    }

    eventAdd("m3u8Event", m3u8Event, NULL, 30, 0);	
}
示例#29
0
static void
peerMonitorRequest(void *data)
{
	PeerMonitor *pm = data;
	char *url;
	request_t *req;

	if (!cbdataValid(pm->peer))
	{
		cbdataFree(pm);
		return;
	}
	url = pm->peer->monitor.url;
	if (!url)
	{
		cbdataFree(pm);
		return;
	}
	req = urlParse(METHOD_GET, url);
	if (!req)
	{
		debug(DBG, 1) ("peerMonitorRequest: Failed to parse URL '%s' for cache_peer %s\n", url, pm->peer->name);
		cbdataFree(pm);
		return;
	}
	pm->last_probe = squid_curtime;
	pm->running.timeout_set = 1;
	eventAdd(pm->name, peerMonitorTimeout, pm, (double) (pm->peer->monitor.timeout ? pm->peer->monitor.timeout : pm->peer->monitor.interval), 0);

	httpHeaderPutStr(&req->header, HDR_ACCEPT, "*/*");
	httpHeaderPutStr(&req->header, HDR_USER_AGENT, full_appname_string);
	if (pm->peer->login)
		xstrncpy(req->login, pm->peer->login, MAX_LOGIN_SZ);
	pm->running.req = requestLink(req);
	pm->running.e = storeCreateEntry(url, req->flags, req->method);
	pm->running.sc = storeClientRegister(pm->running.e, pm);
	pm->running.buf = memAllocate(MEM_4K_BUF);
	fwdStartPeer(pm->peer, pm->running.e, pm->running.req);
	storeClientCopy(pm->running.sc, pm->running.e, 0, 0, 4096, pm->running.buf, peerMonitorFetchReplyHeaders, pm);
	return;
}
示例#30
0
/* swaps out one digest "chunk" per invocation; schedules next swap out */
static void
storeDigestSwapOutStep(void *data)
{
    StoreEntry *e;
    int chunk_size = Config.digest.swapout_chunk_size;
    assert(data == sd_state.rewrite_lock);
    e = (StoreEntry *) ((generic_cbdata *) data)->data;
    assert(e);
    /* _add_ check that nothing bad happened while we were waiting @?@ @?@ */
    if (sd_state.rewrite_offset + chunk_size > store_digest->mask_size)
	chunk_size = store_digest->mask_size - sd_state.rewrite_offset;
    storeAppend(e, store_digest->mask + sd_state.rewrite_offset, chunk_size);
    debug(71, 3) ("storeDigestSwapOutStep: size: %d offset: %d chunk: %d bytes\n",
	store_digest->mask_size, sd_state.rewrite_offset, chunk_size);
    sd_state.rewrite_offset += chunk_size;
    /* are we done ? */
    if (sd_state.rewrite_offset >= store_digest->mask_size)
	storeDigestRewriteFinish(e);
    else
	eventAdd("storeDigestSwapOutStep", storeDigestSwapOutStep, data, 0.0, 1);
}