static void 
ngx_tcp_check_clean_event(ngx_tcp_check_peer_conf_t *peer_conf) 
{
    ngx_connection_t            *c;

    c = peer_conf->pc.connection;

    ngx_log_debug2(NGX_LOG_DEBUG_TCP, c->log, 0, 
            "tcp check clean event: index:%d, fd: %d", 
            peer_conf->index, c->fd);

    ngx_close_connection(c);

    if (peer_conf->check_timeout_ev.timer_set) {
        ngx_del_timer(&peer_conf->check_timeout_ev);
    }

    peer_conf->state = NGX_TCP_CHECK_ALL_DONE;

    if (peer_conf->check_data != NULL && peer_conf->reinit) {
        peer_conf->reinit(peer_conf);
    }

    ngx_spinlock(&peer_conf->shm->lock, ngx_pid, 1024);

    peer_conf->shm->owner = NGX_INVALID_PID;

    ngx_spinlock_unlock(&peer_conf->shm->lock);
}
void 
ngx_tcp_check_get_peer(ngx_uint_t index) 
{
    ngx_tcp_check_peer_conf_t     *peer_conf;

    if (check_peers_ctx == NULL || index >= check_peers_ctx->peers.nelts) {
        return;
    }

    peer_conf = check_peers_ctx->peers.elts;

    ngx_spinlock(&peer_conf[index].shm->lock, ngx_pid, 1024);

    peer_conf[index].shm->business++;
    peer_conf[index].shm->access_count++;

    ngx_spinlock_unlock(&peer_conf[index].shm->lock);
}
void 
ngx_tcp_check_free_peer(ngx_uint_t index) 
{
    ngx_tcp_check_peer_conf_t     *peer_conf;

    if (check_peers_ctx == NULL || index >= check_peers_ctx->peers.nelts) {
        return;
    }

    peer_conf = check_peers_ctx->peers.elts;

    ngx_spinlock(&peer_conf[index].shm->lock, ngx_pid, 1024);

    if (peer_conf[index].shm->business > 0) {
        peer_conf[index].shm->business--;
    }

    ngx_spinlock_unlock(&peer_conf[index].shm->lock);
}
//任务处理完后,epoll的通知读事件会调用该函数 
////ngx_notify通告主线程,该任务处理完毕,ngx_thread_pool_handler由主线程执行,也就是进程cycle{}通过epoll_wait返回执行,而不是由线程池中的线程执行
static void
ngx_thread_pool_handler(ngx_event_t *ev)
{
    ngx_event_t        *event;
    ngx_thread_task_t  *task;

    ngx_log_debug0(NGX_LOG_DEBUG_CORE, ev->log, 0, "thread pool handler");

    ngx_spinlock(&ngx_thread_pool_done_lock, 1, 2048);

    /* 这里是不是有问题?
        如果线程池中的线程执行任务比较快,而主进程在执行epoll_wait过程中有点阻塞,那么就检测不到ngx_notify中的epoll事件,有可能下次检测到该事件的时候
        ngx_thread_pool_done上已经积累了很多执行完的任务事件,见ngx_thread_pool_cycle。
        单这里好像只取了队列首部的任务啊?????? 队首外的任务丢弃了???????????不对吧

        答案是,这里面所有的任务都在下面的while{}中得到了执行
     */

    task = ngx_thread_pool_done.first;
    ngx_thread_pool_done.first = NULL;
    //尾部指向头,但是头已经变为空,即不执行任务  
    ngx_thread_pool_done.last = &ngx_thread_pool_done.first;

    ngx_unlock(&ngx_thread_pool_done_lock);

    while (task) {//遍历执行前面队列ngx_thread_pool_done中的每一个任务  
        ngx_log_debug1(NGX_LOG_DEBUG_CORE, ev->log, 0,
                       "run completion handler for task #%ui", task->id);

        event = &task->event;
        task = task->next;

        event->complete = 1;
        event->active = 0;

      /*如果是小文件,则一次可以读完,函数指向可以参考ngx_http_cache_thread_handler  ngx_http_copy_thread_handler  ngx_thread_read

        如果是大文件下载,则第一次走这里函数式上面的几个函数,但是由于一次最多获取32768字节,因此需要多次读取文件,就是由一次tread执行完任务后
        触发ngx_notify通道epoll,然后走到这里继续读 
        */
        event->handler(event);//这里是否应该检查event->handler是否为空,例如参考ngx_thread_pool_destroy
    }
}
static void 
ngx_tcp_check_begin_handler(ngx_event_t *event) 
{
    ngx_tcp_check_peer_conf_t     *peer_conf;
    ngx_tcp_upstream_srv_conf_t   *uscf;

    if (ngx_tcp_check_need_exit()) {
        return;
    }

    peer_conf = event->data;
    uscf = peer_conf->conf;

    ngx_add_timer(event, uscf->check_interval/2);

    /* This process are processing the event now. */
    if (peer_conf->shm->owner == ngx_pid) {
        return;
    }

    ngx_log_debug4(NGX_LOG_DEBUG_TCP, event->log, 0, 
                   "tcp check begin handler index:%ud, owner: %d, "
                   "ngx_pid: %ud, time:%d", 
                   peer_conf->index, peer_conf->shm->owner, ngx_pid, 
                   (ngx_current_msec - peer_conf->shm->access_time));

    ngx_spinlock(&peer_conf->shm->lock, ngx_pid, 1024);

    if (((ngx_current_msec - peer_conf->shm->access_time) >= uscf->check_interval) && 
            peer_conf->shm->owner == NGX_INVALID_PID)
    {
        peer_conf->shm->owner = ngx_pid;
    }

    ngx_spinlock_unlock(&peer_conf->shm->lock);

    if (peer_conf->shm->owner == ngx_pid) {
        ngx_tcp_check_connect_handler(event);
    }
}
static void ngx_http_healthcheck_try_for_ownership(ngx_event_t *event) {
    ngx_http_healthcheck_status_t * stat;
    ngx_int_t                       i_own_it;

    stat = event->data;
    if (ngx_terminate || ngx_exiting || ngx_quit) {
      ngx_http_healthcheck_clear_events(stat->health_ev.log);
      return;
    }

    i_own_it = 0;
    //  nxg_time_update(0, 0);
    //  Spinlock.  So don't own for a long time!
    //  Use spinlock so two worker processes don't try to healthcheck the same
    //  peer
    ngx_spinlock(&stat->shm->lock, ngx_pid, 1024);
    if (stat->shm->owner == ngx_pid) {
        i_own_it = 1;
    } else if (ngx_current_msec - stat->shm->action_time >=
               (stat->conf->health_delay + stat->conf->health_timeout) * 3) {
        stat->shm->owner = ngx_pid;
        stat->shm->action_time = ngx_current_msec;
        stat->state = NGX_HEALTH_WAITING;
        ngx_http_healthcheck_begin_healthcheck(&stat->health_ev);
        i_own_it = 1;
    }
    if (!ngx_atomic_cmp_set(&stat->shm->lock, ngx_pid, 0)) {
        ngx_log_error(NGX_LOG_CRIT, event->log, 0,
          "healthcheck: spinlock didn't work.  Should be %P, but isn't",
          ngx_pid);

        stat->shm->lock = 0;
    }
    if (!i_own_it) {
      // Try again for ownership later in case the guy that DOES own it dies or
      // something
        ngx_add_timer(&stat->ownership_ev, 5000);
    }
}
Ejemplo n.º 7
0
static void
ngx_thread_pool_handler(ngx_event_t *ev)
{
    ngx_event_t        *event;
    ngx_thread_task_t  *task;
    ngx_log_debug0(NGX_LOG_DEBUG_CORE, ev->log, 0, "thread pool handler");
    ngx_spinlock(&ngx_thread_pool_done_lock, 1, 2048);
    task = ngx_thread_pool_done.first;
    ngx_thread_pool_done.first = NULL;
    ngx_thread_pool_done.last = &ngx_thread_pool_done.first;
    ngx_unlock(&ngx_thread_pool_done_lock);
    while (task)
    {
        ngx_log_debug1(NGX_LOG_DEBUG_CORE, ev->log, 0,
                       "run completion handler for task #%ui", task->id);
        event = &task->event;
        task = task->next;
        event->complete = 1;
        event->active = 0;
        event->handler(event);
    }
}
static void
ngx_http_check_begin_handler(ngx_event_t *event)
{
    ngx_msec_t                          interval;
    ngx_http_check_peer_t              *peer;
    ngx_http_check_peers_t             *peers;
    ngx_http_check_peers_shm_t         *peers_shm;
    ngx_http_upstream_check_srv_conf_t *ucscf;

    if (ngx_http_check_need_exit()) {
        return;
    }

    peers = check_peers_ctx;
    if (peers == NULL) {
        return;
    }

    peers_shm = peers->peers_shm;
    if (peers_shm == NULL) {
        return;
    }

    peer = event->data;
    ucscf = peer->conf;

    ngx_add_timer(event, ucscf->check_interval/2);

    /* This process is processing this peer now. */
    if ((peer->shm->owner == ngx_pid) ||
        (peer->pc.connection != NULL) ||
        (peer->check_timeout_ev.timer_set)) {

        return;
    }

    interval = ngx_current_msec - peer->shm->access_time;
    ngx_log_debug5(NGX_LOG_DEBUG_HTTP, event->log, 0,
                   "http check begin handler index: %ud, owner: %P, "
                   "ngx_pid: %P, interval: %M, check_interval: %M",
                   peer->index, peer->shm->owner,
                   ngx_pid, interval,
                   ucscf->check_interval);

    ngx_spinlock(&peer->shm->lock, ngx_pid, 1024);

    if (peers_shm->generation != ngx_http_check_shm_generation) {
        ngx_spinlock_unlock(&peer->shm->lock);
        return;
    }

    if ((interval >= ucscf->check_interval)
            && peer->shm->owner == NGX_INVALID_PID)
    {
        peer->shm->owner = ngx_pid;
    }
    else if (interval >= (ucscf->check_interval << 4)) {
        /* If the check peer has been untouched for 4 times of
         * the check interval, activates current timer.
         * The checking process may be disappeared
         * in some circumstance, and the clean event will never
         * be triggered. */
        peer->shm->owner = ngx_pid;
        peer->shm->access_time = ngx_current_msec;
    }

    ngx_spinlock_unlock(&peer->shm->lock);

    if (peer->shm->owner == ngx_pid) {
        ngx_http_check_connect_handler(event);
    }
}
Ejemplo n.º 9
0
static void *
ngx_thread_pool_cycle(void *data)
{
    ngx_thread_pool_t *tp = data;
    int                 err;
    sigset_t            set;
    ngx_thread_task_t  *task;
#if 0
    ngx_time_update();
#endif
    ngx_log_debug1(NGX_LOG_DEBUG_CORE, tp->log, 0,
                   "thread in pool \"%V\" started", &tp->name);
    sigfillset(&set);
    sigdelset(&set, SIGILL);
    sigdelset(&set, SIGFPE);
    sigdelset(&set, SIGSEGV);
    sigdelset(&set, SIGBUS);
    err = pthread_sigmask(SIG_BLOCK, &set, NULL);
    if (err)
    {
        ngx_log_error(NGX_LOG_ALERT, tp->log, err, "pthread_sigmask() failed");
        return NULL;
    }
    for (;;)
    {
        if (ngx_thread_mutex_lock(&tp->mtx, tp->log) != NGX_OK)
        {
            return NULL;
        }
        /* the number may become negative */
        tp->waiting--;
        while (tp->queue.first == NULL)
        {
            if (ngx_thread_cond_wait(&tp->cond, &tp->mtx, tp->log)
                    != NGX_OK)
            {
                (void) ngx_thread_mutex_unlock(&tp->mtx, tp->log);
                return NULL;
            }
        }
        task = tp->queue.first;
        tp->queue.first = task->next;
        if (tp->queue.first == NULL)
        {
            tp->queue.last = &tp->queue.first;
        }
        if (ngx_thread_mutex_unlock(&tp->mtx, tp->log) != NGX_OK)
        {
            return NULL;
        }
#if 0
        ngx_time_update();
#endif
        ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0,
                       "run task #%ui in thread pool \"%V\"",
                       task->id, &tp->name);
        task->handler(task->ctx, tp->log);
        ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0,
                       "complete task #%ui in thread pool \"%V\"",
                       task->id, &tp->name);
        task->next = NULL;
        ngx_spinlock(&ngx_thread_pool_done_lock, 1, 2048);
        *ngx_thread_pool_done.last = task;
        ngx_thread_pool_done.last = &task->next;
        ngx_unlock(&ngx_thread_pool_done_lock);
        (void) ngx_notify(ngx_thread_pool_handler);
    }
}
//ngx_thread_pool_cycle和ngx_thread_task_post配合阅读
static void *
ngx_thread_pool_cycle(void *data)
{
    ngx_thread_pool_t *tp = data; //一个该结构对应一个threads_pool配置

    int                 err;
    sigset_t            set;
    ngx_thread_task_t  *task;

#if 0
    ngx_time_update();
#endif

    ngx_log_debug1(NGX_LOG_DEBUG_CORE, tp->log, 0,
                   "thread in pool \"%V\" started", &tp->name);

    sigfillset(&set);

    sigdelset(&set, SIGILL);
    sigdelset(&set, SIGFPE);
    sigdelset(&set, SIGSEGV);
    sigdelset(&set, SIGBUS);

    err = pthread_sigmask(SIG_BLOCK, &set, NULL); //
    if (err) {
        ngx_log_error(NGX_LOG_ALERT, tp->log, err, "pthread_sigmask() failed");
        return NULL;
    }

    /*
    一般主线程通过ngx_thread_task_post添加任务,线程队列中的线程执行任务,主线程号和进程号一样,线程队列中的线程和进程号不一样,例如
    2016/01/07 12:38:01[               ngx_thread_task_post,   280][yangya  [debug] 20090#20090: ngx add task to thread, task id:183
    20090#20090前面是进程号,后面是主线程号,他们相同
    */
    for ( ;; ) {//一次任务执行完后又会走到这里,循环
        if (ngx_thread_mutex_lock(&tp->mtx, tp->log) != NGX_OK) {
            return NULL;
        }

        /* the number may become negative */
        tp->waiting--;

        //如果队列中有任务,则直接执行任务,不会在while中等待conf signal
        while (tp->queue.first == NULL) { //此时任务队列为空,在条件变量上等待  配合ngx_thread_task_post阅读
            //在添加任务的时候唤醒ngx_thread_task_post -> ngx_thread_cond_signal
            if (ngx_thread_cond_wait(&tp->cond, &tp->mtx, tp->log) //等待ngx_thread_cond_signal后才会返回
                != NGX_OK)
            {
                (void) ngx_thread_mutex_unlock(&tp->mtx, tp->log);
                return NULL;
            }
        }

        //取出队首任务,然后执行
        task = tp->queue.first;
        tp->queue.first = task->next;

        if (tp->queue.first == NULL) {
            tp->queue.last = &tp->queue.first;
        }

        //这一段加锁是因为线程池是共享资源,多个线程都从队列中取线程,并且主线程会添加任务到队列中
        if (ngx_thread_mutex_unlock(&tp->mtx, tp->log) != NGX_OK) {
            return NULL;
        }

#if 0
        ngx_time_update();
#endif

        ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0,
                       "run task #%ui in thread pool name:\"%V\"",
                       task->id, &tp->name);

        task->handler(task->ctx, tp->log); //每个任务有各自的ctx,因此这里不需要加锁

        ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0,
                       "complete task #%ui in thread pool name: \"%V\"",
                       task->id, &tp->name);

        task->next = NULL;

        ngx_spinlock(&ngx_thread_pool_done_lock, 1, 2048);

        //task添加到队列尾部,同时可以保证多次添加的时候,让新task和以前的task形成一个还,first执行第一个task,last指向最后一个task
        *ngx_thread_pool_done.last = task;
        ngx_thread_pool_done.last = &task->next;

        ngx_unlock(&ngx_thread_pool_done_lock);

//ngx_notify通告主线程,该任务处理完毕,ngx_thread_pool_handler由主线程执行,也就是进程cycle{}通过epoll_wait返回执行,而不是由线程池中的线程执行
        (void) ngx_notify(ngx_thread_pool_handler); 
    }
}