示例#1
0
static int ares_waitperform(struct connectdata *conn, int timeout_ms)
{
  struct SessionHandle *data = conn->data;
  int nfds;
  int bitmask;
  int socks[ARES_GETSOCK_MAXNUM];
  struct pollfd pfd[ARES_GETSOCK_MAXNUM];
  int m;
  int i;
  int num;

  bitmask = ares_getsock(data->state.areschannel, socks, ARES_GETSOCK_MAXNUM);

  for(i=0; i < ARES_GETSOCK_MAXNUM; i++) {
    pfd[i].events = 0;
    m=0;
    if(ARES_GETSOCK_READABLE(bitmask, i)) {
      pfd[i].fd = socks[i];
      pfd[i].events |= POLLRDNORM|POLLIN;
      m=1;
    }
    if(ARES_GETSOCK_WRITABLE(bitmask, i)) {
      pfd[i].fd = socks[i];
      pfd[i].events |= POLLWRNORM|POLLOUT;
      m=1;
    }
    pfd[i].revents=0;
    if(!m)
      break;
  }
  num = i;

  if(num)
    nfds = Curl_poll(pfd, num, timeout_ms);
  else
    nfds = 0;

  if(!nfds)
    /* Call ares_process() unconditonally here, even if we simply timed out
       above, as otherwise the ares name resolve won't timeout! */
    ares_process_fd(data->state.areschannel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
  else {
    /* move through the descriptors and ask for processing on them */
    for(i=0; i < num; i++)
      ares_process_fd(data->state.areschannel,
                      pfd[i].revents & (POLLRDNORM|POLLIN)?
                      pfd[i].fd:ARES_SOCKET_BAD,
                      pfd[i].revents & (POLLWRNORM|POLLOUT)?
                      pfd[i].fd:ARES_SOCKET_BAD);
  }
  return nfds;
}
示例#2
0
int ares_getsock(ares_channel channel,
                 int *s,
                 int numsocks) /* size of the 'socks' array */
{
  struct server_state *server;
  int i;
  int sockindex=0;
  int bitmap = 0;
  unsigned int setbits = 0xffffffff;

  ares_socket_t *socks = (ares_socket_t *)s;

  /* Are there any active queries? */
  int active_queries = !ares__is_list_empty(&(channel->all_queries));

  for (i = 0;
       (i < channel->nservers) && (sockindex < ARES_GETSOCK_MAXNUM);
       i++)
    {
      server = &channel->servers[i];
      /* We only need to register interest in UDP sockets if we have
       * outstanding queries.
       */
      if (active_queries && server->udp_socket != ARES_SOCKET_BAD)
        {
          if(sockindex >= numsocks)
            break;
          socks[sockindex] = server->udp_socket;
          bitmap |= ARES_GETSOCK_READABLE(setbits, sockindex);
          sockindex++;
        }
      /* We always register for TCP events, because we want to know
       * when the other side closes the connection, so we don't waste
       * time trying to use a broken connection.
       */
      if (server->tcp_socket != ARES_SOCKET_BAD)
       {
         if(sockindex >= numsocks)
           break;
         socks[sockindex] = server->tcp_socket;
         bitmap |= ARES_GETSOCK_READABLE(setbits, sockindex);

         if (server->qhead && active_queries)
           /* then the tcp socket is also writable! */
           bitmap |= ARES_GETSOCK_WRITABLE(setbits, sockindex);

         sockindex++;
       }
    }
  return bitmap;
}
static PyObject *
Channel_func_getsock(Channel *self)
{
    int i, bitmask;
    ares_socket_t socks[ARES_GETSOCK_MAXNUM];
    PyObject *tpl, *rfds, *wfds, *item;

    CHECK_CHANNEL(self);

    tpl = PyTuple_New(2);
    rfds = PyList_New(0);
    wfds = PyList_New(0);
    if (!tpl || !rfds || !wfds) {
        PyErr_NoMemory();
        Py_XDECREF(tpl);
        Py_XDECREF(rfds);
        Py_XDECREF(wfds);
        return NULL;
    }

    bitmask = ares_getsock(self->channel, socks, ARES_GETSOCK_MAXNUM);
    for(i=0; i < ARES_GETSOCK_MAXNUM; i++) {
        if(ARES_GETSOCK_READABLE(bitmask, i)) {
            item = PyInt_FromLong((long)socks[i]);
            PyList_Append(rfds, item);
            Py_DECREF(item);
        }
        if(ARES_GETSOCK_WRITABLE(bitmask, i)) {
            item = PyInt_FromLong((long)socks[i]);
            PyList_Append(wfds, item);
            Py_DECREF(item);
        }
    }

    PyTuple_SET_ITEM(tpl, 0, rfds);
    PyTuple_SET_ITEM(tpl, 1, wfds);
    return tpl;
}
示例#4
0
bool LLAres::process(U64 timeout)
{
	if (!gAPRPoolp)
	{
		ll_init_apr();
	}

	ares_socket_t socks[ARES_GETSOCK_MAXNUM];
	apr_pollfd_t aprFds[ARES_GETSOCK_MAXNUM];
	apr_int32_t nsds = 0;	
	int nactive = 0;
	int bitmask;

	bitmask = ares_getsock(chan_, socks, ARES_GETSOCK_MAXNUM);

	if (bitmask == 0)
	{
		return nsds > 0;
	}

	apr_status_t status;
	LLAPRPool pool;
	status = pool.getStatus() ;
	ll_apr_assert_status(status);

	for (int i = 0; i < ARES_GETSOCK_MAXNUM; i++)
	{
		if (ARES_GETSOCK_READABLE(bitmask, i))
		{
			aprFds[nactive].reqevents = APR_POLLIN | APR_POLLERR;
		}
		else if (ARES_GETSOCK_WRITABLE(bitmask, i))
		{
			aprFds[nactive].reqevents = APR_POLLOUT | APR_POLLERR;
		} else {
			continue;
		}

		apr_socket_t *aprSock = NULL;

		status = apr_os_sock_put(&aprSock, (apr_os_sock_t *) &socks[i], pool.getAPRPool());
		if (status != APR_SUCCESS)
		{
			ll_apr_warn_status(status);
			return nsds > 0;
		}

		aprFds[nactive].desc.s = aprSock;
		aprFds[nactive].desc_type = APR_POLL_SOCKET;
		aprFds[nactive].p = pool.getAPRPool();
		aprFds[nactive].rtnevents = 0;
		aprFds[nactive].client_data = &socks[i];

		nactive++;
	}

	if (nactive > 0)
	{
		status = apr_poll(aprFds, nactive, &nsds, timeout);

		if (status != APR_SUCCESS && status != APR_TIMEUP)
		{
			ll_apr_warn_status(status);
		}

		for (int i = 0; i < nactive; i++)
		{
			int evts = aprFds[i].rtnevents;
			int ifd = (evts & (APR_POLLIN | APR_POLLERR))
				? *((int *) aprFds[i].client_data) : ARES_SOCKET_BAD;
			int ofd = (evts & (APR_POLLOUT | APR_POLLERR))
				? *((int *) aprFds[i].client_data) : ARES_SOCKET_BAD;
					
			ares_process_fd(chan_, ifd, ofd);
		}
	}

	return nsds > 0;
}
示例#5
0
/// Waits for replies to outstanding DNS queries on the specified channel.
void DnsCachedResolver::wait_for_replies(DnsChannel* channel)
{
  // Wait until the expected number of results has been returned.
  while (channel->pending_queries > 0)
  {
    // Call into ares to get details of the sockets it's using.
    ares_socket_t scks[ARES_GETSOCK_MAXNUM];
    int rw_bits = ares_getsock(channel->channel, scks, ARES_GETSOCK_MAXNUM);

    // Translate these sockets into pollfd structures.
    int num_fds = 0;
    struct pollfd fds[ARES_GETSOCK_MAXNUM];
    for (int fd_idx = 0; fd_idx < ARES_GETSOCK_MAXNUM; fd_idx++)
    {
      struct pollfd* fd = &fds[fd_idx];
      fd->fd = scks[fd_idx];
      fd->events = 0;
      fd->revents = 0;
      if (ARES_GETSOCK_READABLE(rw_bits, fd_idx))
      {
        fd->events |= POLLRDNORM | POLLIN;
      }
      if (ARES_GETSOCK_WRITABLE(rw_bits, fd_idx))
      {
        fd->events |= POLLWRNORM | POLLOUT;
      }
      if (fd->events != 0)
      {
        num_fds++;
      }
    }

    // Calculate the timeout.
    struct timeval tv;
    tv.tv_sec = 0;
    tv.tv_usec = 0;
    (void)ares_timeout(channel->channel, NULL, &tv);

    // Wait for events on these file descriptors.
    if (poll(fds, num_fds, tv.tv_sec * 1000 + tv.tv_usec / 1000) != 0)
    {
      // We got at least one event, so find which file descriptor(s) this was on.
      for (int fd_idx = 0; fd_idx < num_fds; fd_idx++)
      {
        struct pollfd* fd = &fds[fd_idx];
        if (fd->revents != 0)
        {
          // Call into ares to notify it of the event.  The interface requires
          // that we pass separate file descriptors for read and write events
          // or ARES_SOCKET_BAD if no event has occurred.
          ares_process_fd(channel->channel,
                          fd->revents & (POLLRDNORM | POLLIN) ? fd->fd : ARES_SOCKET_BAD,
                          fd->revents & (POLLWRNORM | POLLOUT) ? fd->fd : ARES_SOCKET_BAD);
        }
      }
    }
    else
    {
      // No events, so just call into ares with no file descriptor to let it handle timeouts.
      ares_process_fd(channel->channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
    }
  }
}
void *auth_mgr_adns_handler_epoll(void *arg)
{
    //struct timeval *tvp, tv, tv_copy;
    ares_socket_t dns_client_fds[16] = {0};
    struct epoll_event ev, events[DNS_MAX_EVENTS];
    int i,bitmask,nfds, epollfd, timeout, fd_count, ret;

    UNUSED_ARGUMENT(arg);
    memset(dns_client_fds, 0, sizeof(dns_client_fds));

    memset((char *)&ev, 0, sizeof(struct epoll_event));
    memset((char *)&events[0], 0, sizeof(events));

    epollfd = epoll_create(DNS_MAX_SERVERS);
    if (epollfd < 0) {
        DBG_LOG(SEVERE, MOD_AUTHMGR, "epoll_create() error");
        assert(0);
    }

    prctl(PR_SET_NAME, "nvsd-adns", 0, 0, 0);

    /*Infinite loop, to process all the dns responses. Each channel can handle
     16 name servers in RR fashion. Inside each channel there is one socket
    dedicated for one nameserver. So if there are 2 nameservers in /etc/resol.conf
    , then c-ares will assign 2 fds to the channel*/
    while(1)
    {
        nfds=0;
        bitmask=0;
        for (i =0; i < DNS_MAX_SERVERS ; i++) {
            if (dns_client_fds[i] > 0) {
                if (epoll_ctl(epollfd, EPOLL_CTL_DEL, dns_client_fds[i], NULL) < 0) {
                    //not a serious problem, strange that we should hit this case
                    continue;
                }
            }
        }
        memset(dns_client_fds, 0, sizeof(dns_client_fds));
        pthread_mutex_lock(&cares_mutex);
        bitmask = ares_getsock(channel, dns_client_fds, DNS_MAX_SERVERS);
        for (i =0; i < DNS_MAX_SERVERS ; i++) {
            if (dns_client_fds[i] > 0) {
                ev.events = 0;
                if (ARES_GETSOCK_READABLE(bitmask, i)) {
                    ev.events |= EPOLLIN;
                }
                if (ARES_GETSOCK_WRITABLE(bitmask, i)) {
                    ev.events |= EPOLLOUT;
                }
                ev.data.fd = dns_client_fds[i];
                if (epoll_ctl(epollfd, EPOLL_CTL_ADD, dns_client_fds[i], &ev) < 0) {
                    if(errno == EEXIST) {
                        nfds++;
                        continue;
                    }
                    DBG_LOG(SEVERE, MOD_AUTHMGR, "%d fd has trouble when adding to epoll:%s\n",
                            dns_client_fds[i], strerror(errno));
                    continue;
                }
                nfds++;
            }
        }
        if(nfds==0)
        {
            pthread_cond_wait(&cares_cond, &cares_mutex);
            pthread_mutex_unlock(&cares_mutex);
            continue;
        }
        //tvp = ares_timeout(channel, NULL, &tv);
        //memcpy(&tv_copy, tvp, sizeof(struct timeval));
        pthread_mutex_unlock(&cares_mutex);
        //timeout = (tv_copy.tv_sec)*1000;//millisecs
        timeout = 1000;//millisecs
        /*********************************************************
        The default timeout was 5 seconds with default retries
        as 4. The timeout algorith that c-ares adopts, has
        timeout increasing linearly for every retry, and can
        go upto 75secs(~5+~10+~20+~40). To avoid such a big
        block in our select, reduced the timeout to 3secs
        and retries to 2, so max blocking limited to 9 secs
        Changes in ares_init.
        ******************************************************/
        fd_count = epoll_wait(epollfd, events, DNS_MAX_EVENTS, timeout);
        if (fd_count < 0) {
            DBG_LOG(SEVERE, MOD_AUTHMGR, "epoll_wait failed:%s", strerror(errno));
            continue;
        }
        pthread_mutex_lock(&cares_mutex);
        if (fd_count > 0) {
            for (i = 0; i < fd_count; ++i) {
                ares_process_fd(channel,
                                ((events[i].events) & (EPOLLIN) ?
                                 events[i].data.fd:ARES_SOCKET_BAD),
                                ((events[i].events) & (EPOLLOUT)?
                                 events[i].data.fd:ARES_SOCKET_BAD));
            }
        } else {
            ares_process_fd(channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
        }
        if (channel_ready == 0 ) {
            ares_destroy(channel);
            //re-init the channel to get a new port
            ret = ares_init_options(&channel, &options, optmask);
            if (ret != ARES_SUCCESS) {
                DBG_LOG(SEVERE, MOD_AUTHMGR,"ares_init: %d %s", ret, ares_strerror(ret));
                assert(0);
            }
            channel_ready = 1;
            AO_fetch_and_add1(&glob_dns_channel_reinit);
        }
        pthread_mutex_unlock(&cares_mutex);
    }
}