예제 #1
0
void
ngx_time_sigsafe_update(void)
{
    u_char          *p, *p2;
    ngx_tm_t         tm;
    time_t           sec;
    ngx_time_t      *tp;
    struct timeval   tv;

    if (!ngx_trylock(&ngx_time_lock)) {
        return;
    }

    ngx_gettimeofday(&tv);

    sec = tv.tv_sec;

    tp = &cached_time[slot];

    if (tp->sec == sec) {
        ngx_unlock(&ngx_time_lock);
        return;
    }

    if (slot == NGX_TIME_SLOTS - 1) {
        slot = 0;
    } else {
        slot++;
    }

    tp = &cached_time[slot];

    tp->sec = 0;

    ngx_gmtime(sec + cached_gmtoff * 60, &tm);

    p = &cached_err_log_time[slot][0];

    (void) ngx_sprintf(p, "%4d/%02d/%02d %02d:%02d:%02d",
                       tm.ngx_tm_year, tm.ngx_tm_mon,
                       tm.ngx_tm_mday, tm.ngx_tm_hour,
                       tm.ngx_tm_min, tm.ngx_tm_sec);

    p2 = &cached_syslog_time[slot][0];

    (void) ngx_sprintf(p2, "%s %2d %02d:%02d:%02d",
                       months[tm.ngx_tm_mon - 1], tm.ngx_tm_mday,
                       tm.ngx_tm_hour, tm.ngx_tm_min, tm.ngx_tm_sec);

    ngx_memory_barrier();

    ngx_cached_err_log_time.data = p;
    ngx_cached_syslog_time.data = p2;

    ngx_unlock(&ngx_time_lock);
}
//任务处理完后,epoll的通知读事件会调用该函数 
////ngx_notify通告主线程,该任务处理完毕,ngx_thread_pool_handler由主线程执行,也就是进程cycle{}通过epoll_wait返回执行,而不是由线程池中的线程执行
static void
ngx_thread_pool_handler(ngx_event_t *ev)
{
    ngx_event_t        *event;
    ngx_thread_task_t  *task;

    ngx_log_debug0(NGX_LOG_DEBUG_CORE, ev->log, 0, "thread pool handler");

    ngx_spinlock(&ngx_thread_pool_done_lock, 1, 2048);

    /* 这里是不是有问题?
        如果线程池中的线程执行任务比较快,而主进程在执行epoll_wait过程中有点阻塞,那么就检测不到ngx_notify中的epoll事件,有可能下次检测到该事件的时候
        ngx_thread_pool_done上已经积累了很多执行完的任务事件,见ngx_thread_pool_cycle。
        单这里好像只取了队列首部的任务啊?????? 队首外的任务丢弃了???????????不对吧

        答案是,这里面所有的任务都在下面的while{}中得到了执行
     */

    task = ngx_thread_pool_done.first;
    ngx_thread_pool_done.first = NULL;
    //尾部指向头,但是头已经变为空,即不执行任务  
    ngx_thread_pool_done.last = &ngx_thread_pool_done.first;

    ngx_unlock(&ngx_thread_pool_done_lock);

    while (task) {//遍历执行前面队列ngx_thread_pool_done中的每一个任务  
        ngx_log_debug1(NGX_LOG_DEBUG_CORE, ev->log, 0,
                       "run completion handler for task #%ui", task->id);

        event = &task->event;
        task = task->next;

        event->complete = 1;
        event->active = 0;

      /*如果是小文件,则一次可以读完,函数指向可以参考ngx_http_cache_thread_handler  ngx_http_copy_thread_handler  ngx_thread_read

        如果是大文件下载,则第一次走这里函数式上面的几个函数,但是由于一次最多获取32768字节,因此需要多次读取文件,就是由一次tread执行完任务后
        触发ngx_notify通道epoll,然后走到这里继续读 
        */
        event->handler(event);//这里是否应该检查event->handler是否为空,例如参考ngx_thread_pool_destroy
    }
}
예제 #3
0
static void
ngx_thread_pool_handler(ngx_event_t *ev)
{
    ngx_event_t        *event;
    ngx_thread_task_t  *task;
    ngx_log_debug0(NGX_LOG_DEBUG_CORE, ev->log, 0, "thread pool handler");
    ngx_spinlock(&ngx_thread_pool_done_lock, 1, 2048);
    task = ngx_thread_pool_done.first;
    ngx_thread_pool_done.first = NULL;
    ngx_thread_pool_done.last = &ngx_thread_pool_done.first;
    ngx_unlock(&ngx_thread_pool_done_lock);
    while (task)
    {
        ngx_log_debug1(NGX_LOG_DEBUG_CORE, ev->log, 0,
                       "run completion handler for task #%ui", task->id);
        event = &task->event;
        task = task->next;
        event->complete = 1;
        event->active = 0;
        event->handler(event);
    }
}
예제 #4
0
ngx_int_t
ngx_event_thread_process_posted(ngx_cycle_t *cycle)
{
    ngx_event_t  *ev;

    for ( ;; ) {

        ev = (ngx_event_t *) ngx_posted_events;

        for ( ;; ) {

            ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                          "posted event %p", ev);

            if (ev == NULL) {
                return NGX_OK;
            }

            if (ngx_trylock(ev->lock) == 0) {

                ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                               "posted event %p is busy", ev);

                ev = ev->next;
                continue;
            }

            if (ev->lock != ev->own_lock) {
                if (*(ev->own_lock)) {
                    ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
                             "the own lock of the posted event %p is busy", ev);
                    ngx_unlock(ev->lock);
                    ev = ev->next;
                    continue;
                }
                *(ev->own_lock) = 1;
            }

            ngx_delete_posted_event(ev);

            ev->locked = 1;

            ev->ready |= ev->posted_ready;
            ev->timedout |= ev->posted_timedout;
            ev->pending_eof |= ev->posted_eof;
#if (NGX_HAVE_KQUEUE)
            ev->kq_errno |= ev->posted_errno;
#endif
            if (ev->posted_available) {
                ev->available = ev->posted_available;
            }

            ev->posted_ready = 0;
            ev->posted_timedout = 0;
            ev->posted_eof = 0;
#if (NGX_HAVE_KQUEUE)
            ev->posted_errno = 0;
#endif
            ev->posted_available = 0;

            ngx_mutex_unlock(ngx_posted_events_mutex);

            ev->handler(ev);

            ngx_mutex_lock(ngx_posted_events_mutex);

            if (ev->locked) {
                ngx_unlock(ev->lock);

                if (ev->lock != ev->own_lock) {
                    ngx_unlock(ev->own_lock);
                }
            }

            ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                           "posted event %p is done", ev);

            break;
        }
    }
}
예제 #5
0
void
ngx_time_update(void)
{
    u_char          *p0, *p1, *p2, *p3;
    ngx_tm_t         tm, gmt;
    time_t           sec;
    ngx_uint_t       msec;
    ngx_time_t      *tp;
    struct timeval   tv;

    if (!ngx_trylock(&ngx_time_lock)) {
        return;
    }

    ngx_gettimeofday(&tv);

    sec = tv.tv_sec;
    msec = tv.tv_usec / 1000;

    ngx_current_msec = (ngx_msec_t) sec * 1000 + msec;

    tp = &cached_time[slot];

    if (tp->sec == sec) {
        tp->msec = msec;
        ngx_unlock(&ngx_time_lock);
        return;
    }

    if (slot == NGX_TIME_SLOTS - 1) {
        slot = 0;
    } else {
        slot++;
    }

    tp = &cached_time[slot];

    tp->sec = sec;
    tp->msec = msec;

    ngx_gmtime(sec, &gmt);


    p0 = &cached_http_time[slot][0];

    (void) ngx_sprintf(p0, "%s, %02d %s %4d %02d:%02d:%02d GMT",
                       week[gmt.ngx_tm_wday], gmt.ngx_tm_mday,
                       months[gmt.ngx_tm_mon - 1], gmt.ngx_tm_year,
                       gmt.ngx_tm_hour, gmt.ngx_tm_min, gmt.ngx_tm_sec);

#if (NGX_HAVE_GETTIMEZONE)

    tp->gmtoff = ngx_gettimezone();
    ngx_gmtime(sec + tp->gmtoff * 60, &tm);

#elif (NGX_HAVE_GMTOFF)

    ngx_localtime(sec, &tm);
    cached_gmtoff = (ngx_int_t) (tm.ngx_tm_gmtoff / 60);
    tp->gmtoff = cached_gmtoff;

#else

    ngx_localtime(sec, &tm);
    cached_gmtoff = ngx_timezone(tm.ngx_tm_isdst);
    tp->gmtoff = cached_gmtoff;

#endif


    p1 = &cached_err_log_time[slot][0];

    (void) ngx_sprintf(p1, "%4d/%02d/%02d %02d:%02d:%02d",
                       tm.ngx_tm_year, tm.ngx_tm_mon,
                       tm.ngx_tm_mday, tm.ngx_tm_hour,
                       tm.ngx_tm_min, tm.ngx_tm_sec);


    p2 = &cached_http_log_time[slot][0];

    (void) ngx_sprintf(p2, "%02d/%s/%d:%02d:%02d:%02d %c%02d%02d",
                       tm.ngx_tm_mday, months[tm.ngx_tm_mon - 1],
                       tm.ngx_tm_year, tm.ngx_tm_hour,
                       tm.ngx_tm_min, tm.ngx_tm_sec,
                       tp->gmtoff < 0 ? '-' : '+',
                       ngx_abs(tp->gmtoff / 60), ngx_abs(tp->gmtoff % 60));

    p3 = &cached_http_log_iso8601[slot][0];

    (void) ngx_sprintf(p3, "%4d-%02d-%02dT%02d:%02d:%02d%c%02d:%02d",
                       tm.ngx_tm_year, tm.ngx_tm_mon,
                       tm.ngx_tm_mday, tm.ngx_tm_hour,
                       tm.ngx_tm_min, tm.ngx_tm_sec,
                       tp->gmtoff < 0 ? '-' : '+',
                       ngx_abs(tp->gmtoff / 60), ngx_abs(tp->gmtoff % 60));


    ngx_memory_barrier();

    ngx_cached_time = tp;
    ngx_cached_http_time.data = p0;
    ngx_cached_err_log_time.data = p1;
    ngx_cached_http_log_time.data = p2;
    ngx_cached_http_log_iso8601.data = p3;

    ngx_unlock(&ngx_time_lock);
}
예제 #6
0
void
ngx_close_connection(ngx_connection_t *c)
{
    ngx_err_t     err;
    ngx_uint_t    log_error, level;
    ngx_socket_t  fd;

    if (c->fd == -1) {
        ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed");
        return;
    }

    if (c->read->timer_set) {
        ngx_del_timer(c->read);
    }

    if (c->write->timer_set) {
        ngx_del_timer(c->write);
    }

    if (ngx_del_conn) {
        ngx_del_conn(c, NGX_CLOSE_EVENT);

    } else {
        if (c->read->active || c->read->disabled) {
            ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT);
        }

        if (c->write->active || c->write->disabled) {
            ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT);
        }
    }

#if (NGX_THREADS)

    /*
     * we have to clean the connection information before the closing
     * because another thread may reopen the same file descriptor
     * before we clean the connection
     */

    ngx_mutex_lock(ngx_posted_events_mutex);

    if (c->read->prev) {
        ngx_delete_posted_event(c->read);
    }

    if (c->write->prev) {
        ngx_delete_posted_event(c->write);
    }

    c->read->closed = 1;
    c->write->closed = 1;

    if (c->single_connection) {
        ngx_unlock(&c->lock);
        c->read->locked = 0;
        c->write->locked = 0;
    }

    ngx_mutex_unlock(ngx_posted_events_mutex);

#else

    if (c->read->prev) {
        ngx_delete_posted_event(c->read);
    }

    if (c->write->prev) {
        ngx_delete_posted_event(c->write);
    }

    c->read->closed = 1;
    c->write->closed = 1;

#endif

    ngx_reusable_connection(c, 0);

    log_error = c->log_error;

    ngx_free_connection(c);

    fd = c->fd;
    c->fd = (ngx_socket_t) -1;

    if (ngx_close_socket(fd) == -1) {

        err = ngx_socket_errno;

        if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) {

            switch (log_error) {

            case NGX_ERROR_INFO:
                level = NGX_LOG_INFO;
                break;

            case NGX_ERROR_ERR:
                level = NGX_LOG_ERR;
                break;

            default:
                level = NGX_LOG_CRIT;
            }

        } else {
            level = NGX_LOG_CRIT;
        }

        /* we use ngx_cycle->log because c->log was in c->pool */

        ngx_log_error(level, ngx_cycle->log, err,
                      ngx_close_socket_n " %d failed", fd);
    }
}
예제 #7
0
static void *
ngx_thread_pool_cycle(void *data)
{
    ngx_thread_pool_t *tp = data;
    int                 err;
    sigset_t            set;
    ngx_thread_task_t  *task;
#if 0
    ngx_time_update();
#endif
    ngx_log_debug1(NGX_LOG_DEBUG_CORE, tp->log, 0,
                   "thread in pool \"%V\" started", &tp->name);
    sigfillset(&set);
    sigdelset(&set, SIGILL);
    sigdelset(&set, SIGFPE);
    sigdelset(&set, SIGSEGV);
    sigdelset(&set, SIGBUS);
    err = pthread_sigmask(SIG_BLOCK, &set, NULL);
    if (err)
    {
        ngx_log_error(NGX_LOG_ALERT, tp->log, err, "pthread_sigmask() failed");
        return NULL;
    }
    for (;;)
    {
        if (ngx_thread_mutex_lock(&tp->mtx, tp->log) != NGX_OK)
        {
            return NULL;
        }
        /* the number may become negative */
        tp->waiting--;
        while (tp->queue.first == NULL)
        {
            if (ngx_thread_cond_wait(&tp->cond, &tp->mtx, tp->log)
                    != NGX_OK)
            {
                (void) ngx_thread_mutex_unlock(&tp->mtx, tp->log);
                return NULL;
            }
        }
        task = tp->queue.first;
        tp->queue.first = task->next;
        if (tp->queue.first == NULL)
        {
            tp->queue.last = &tp->queue.first;
        }
        if (ngx_thread_mutex_unlock(&tp->mtx, tp->log) != NGX_OK)
        {
            return NULL;
        }
#if 0
        ngx_time_update();
#endif
        ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0,
                       "run task #%ui in thread pool \"%V\"",
                       task->id, &tp->name);
        task->handler(task->ctx, tp->log);
        ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0,
                       "complete task #%ui in thread pool \"%V\"",
                       task->id, &tp->name);
        task->next = NULL;
        ngx_spinlock(&ngx_thread_pool_done_lock, 1, 2048);
        *ngx_thread_pool_done.last = task;
        ngx_thread_pool_done.last = &task->next;
        ngx_unlock(&ngx_thread_pool_done_lock);
        (void) ngx_notify(ngx_thread_pool_handler);
    }
}
//ngx_thread_pool_cycle和ngx_thread_task_post配合阅读
static void *
ngx_thread_pool_cycle(void *data)
{
    ngx_thread_pool_t *tp = data; //一个该结构对应一个threads_pool配置

    int                 err;
    sigset_t            set;
    ngx_thread_task_t  *task;

#if 0
    ngx_time_update();
#endif

    ngx_log_debug1(NGX_LOG_DEBUG_CORE, tp->log, 0,
                   "thread in pool \"%V\" started", &tp->name);

    sigfillset(&set);

    sigdelset(&set, SIGILL);
    sigdelset(&set, SIGFPE);
    sigdelset(&set, SIGSEGV);
    sigdelset(&set, SIGBUS);

    err = pthread_sigmask(SIG_BLOCK, &set, NULL); //
    if (err) {
        ngx_log_error(NGX_LOG_ALERT, tp->log, err, "pthread_sigmask() failed");
        return NULL;
    }

    /*
    一般主线程通过ngx_thread_task_post添加任务,线程队列中的线程执行任务,主线程号和进程号一样,线程队列中的线程和进程号不一样,例如
    2016/01/07 12:38:01[               ngx_thread_task_post,   280][yangya  [debug] 20090#20090: ngx add task to thread, task id:183
    20090#20090前面是进程号,后面是主线程号,他们相同
    */
    for ( ;; ) {//一次任务执行完后又会走到这里,循环
        if (ngx_thread_mutex_lock(&tp->mtx, tp->log) != NGX_OK) {
            return NULL;
        }

        /* the number may become negative */
        tp->waiting--;

        //如果队列中有任务,则直接执行任务,不会在while中等待conf signal
        while (tp->queue.first == NULL) { //此时任务队列为空,在条件变量上等待  配合ngx_thread_task_post阅读
            //在添加任务的时候唤醒ngx_thread_task_post -> ngx_thread_cond_signal
            if (ngx_thread_cond_wait(&tp->cond, &tp->mtx, tp->log) //等待ngx_thread_cond_signal后才会返回
                != NGX_OK)
            {
                (void) ngx_thread_mutex_unlock(&tp->mtx, tp->log);
                return NULL;
            }
        }

        //取出队首任务,然后执行
        task = tp->queue.first;
        tp->queue.first = task->next;

        if (tp->queue.first == NULL) {
            tp->queue.last = &tp->queue.first;
        }

        //这一段加锁是因为线程池是共享资源,多个线程都从队列中取线程,并且主线程会添加任务到队列中
        if (ngx_thread_mutex_unlock(&tp->mtx, tp->log) != NGX_OK) {
            return NULL;
        }

#if 0
        ngx_time_update();
#endif

        ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0,
                       "run task #%ui in thread pool name:\"%V\"",
                       task->id, &tp->name);

        task->handler(task->ctx, tp->log); //每个任务有各自的ctx,因此这里不需要加锁

        ngx_log_debug2(NGX_LOG_DEBUG_CORE, tp->log, 0,
                       "complete task #%ui in thread pool name: \"%V\"",
                       task->id, &tp->name);

        task->next = NULL;

        ngx_spinlock(&ngx_thread_pool_done_lock, 1, 2048);

        //task添加到队列尾部,同时可以保证多次添加的时候,让新task和以前的task形成一个还,first执行第一个task,last指向最后一个task
        *ngx_thread_pool_done.last = task;
        ngx_thread_pool_done.last = &task->next;

        ngx_unlock(&ngx_thread_pool_done_lock);

//ngx_notify通告主线程,该任务处理完毕,ngx_thread_pool_handler由主线程执行,也就是进程cycle{}通过epoll_wait返回执行,而不是由线程池中的线程执行
        (void) ngx_notify(ngx_thread_pool_handler); 
    }
}
예제 #9
0
void ngx_close_connection(ngx_connection_t *c)
{
    ngx_socket_t  fd;

    if (c->pool == NULL)
    {
        ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed");
        return;
    }

#if (NGX_OPENSSL)

    if (c->ssl)
    {
        if (ngx_ssl_shutdown(c) == NGX_AGAIN)
        {
            c->read->event_handler = ngx_ssl_close_handler;
            c->write->event_handler = ngx_ssl_close_handler;
            return;
        }
    }

#endif

    if (c->read->timer_set)
    {
        ngx_del_timer(c->read);
    }

    if (c->write->timer_set)
    {
        ngx_del_timer(c->write);
    }

    if (ngx_del_conn)
    {
        ngx_del_conn(c, NGX_CLOSE_EVENT);

    }
    else
    {
        if (c->read->active || c->read->disabled)
        {
            ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT);
        }

        if (c->write->active || c->write->disabled)
        {
            ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT);
        }
    }

#if (NGX_THREADS)

    /*
     * we have to clean the connection information before the closing
     * because another thread may reopen the same file descriptor
     * before we clean the connection
     */

    if (ngx_mutex_lock(ngx_posted_events_mutex) == NGX_OK)
    {

        if (c->read->prev)
        {
            ngx_delete_posted_event(c->read);
        }

        if (c->write->prev)
        {
            ngx_delete_posted_event(c->write);
        }

        c->read->closed = 1;
        c->write->closed = 1;

        if (c->single_connection)
        {
            ngx_unlock(&c->lock);
            c->read->locked = 0;
            c->write->locked = 0;
        }

        ngx_mutex_unlock(ngx_posted_events_mutex);
    }

#else

    if (c->read->prev)
    {
        ngx_delete_posted_event(c->read);
    }

    if (c->write->prev)
    {
        ngx_delete_posted_event(c->write);
    }

    c->read->closed = 1;
    c->write->closed = 1;

#endif

    fd = c->fd;
    c->fd = (ngx_socket_t) -1;
    c->data = NULL;

    ngx_destroy_pool(c->pool);

    if (ngx_close_socket(fd) == -1)
    {

        /* we use ngx_cycle->log because c->log was in c->pool */

        ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, ngx_socket_errno,
                      ngx_close_socket_n " failed");
    }
}